Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/basis_universal/transcoder/basisu_containers.h
21349 views
1
// basisu_containers.h
2
#pragma once
3
#include <stdlib.h>
4
#include <stdio.h>
5
#include <stdint.h>
6
#include <assert.h>
7
#include <algorithm>
8
9
#if defined(__linux__) && !defined(ANDROID)
10
// Only for malloc_usable_size() in basisu_containers_impl.h
11
#include <malloc.h>
12
#define HAS_MALLOC_USABLE_SIZE 1
13
#endif
14
15
// Set to 1 to always check vector operator[], front(), and back() even in release.
16
#define BASISU_VECTOR_FORCE_CHECKING 0
17
18
// If 1, the vector container will not query the CRT to get the size of resized memory blocks.
19
#define BASISU_VECTOR_DETERMINISTIC 1
20
21
#ifdef _MSC_VER
22
#define BASISU_FORCE_INLINE __forceinline
23
#else
24
#define BASISU_FORCE_INLINE inline
25
#endif
26
27
#define BASISU_HASHMAP_TEST 0
28
29
namespace basisu
30
{
31
enum { cInvalidIndex = -1 };
32
33
template <typename S> inline S clamp(S value, S low, S high) { return (value < low) ? low : ((value > high) ? high : value); }
34
35
template <typename S> inline S maximum(S a, S b) { return (a > b) ? a : b; }
36
template <typename S> inline S maximum(S a, S b, S c) { return maximum(maximum(a, b), c); }
37
template <typename S> inline S maximum(S a, S b, S c, S d) { return maximum(maximum(maximum(a, b), c), d); }
38
39
template <typename S> inline S minimum(S a, S b) { return (a < b) ? a : b; }
40
template <typename S> inline S minimum(S a, S b, S c) { return minimum(minimum(a, b), c); }
41
template <typename S> inline S minimum(S a, S b, S c, S d) { return minimum(minimum(minimum(a, b), c), d); }
42
43
#ifdef _MSC_VER
44
__declspec(noreturn)
45
#else
46
[[noreturn]]
47
#endif
48
void container_abort(const char* pMsg, ...);
49
50
namespace helpers
51
{
52
inline bool is_power_of_2(uint32_t x) { return x && ((x & (x - 1U)) == 0U); }
53
inline bool is_power_of_2(uint64_t x) { return x && ((x & (x - 1U)) == 0U); }
54
55
template<class T> const T& minimum(const T& a, const T& b) { return (b < a) ? b : a; }
56
template<class T> const T& maximum(const T& a, const T& b) { return (a < b) ? b : a; }
57
58
inline uint32_t floor_log2i(uint32_t v)
59
{
60
uint32_t l = 0;
61
while (v > 1U)
62
{
63
v >>= 1;
64
l++;
65
}
66
return l;
67
}
68
69
inline uint32_t floor_log2i(uint64_t v)
70
{
71
uint32_t l = 0;
72
while (v > 1U)
73
{
74
v >>= 1;
75
l++;
76
}
77
return l;
78
}
79
80
inline uint32_t next_pow2(uint32_t val)
81
{
82
val--;
83
val |= val >> 16;
84
val |= val >> 8;
85
val |= val >> 4;
86
val |= val >> 2;
87
val |= val >> 1;
88
return val + 1;
89
}
90
91
inline uint64_t next_pow2(uint64_t val)
92
{
93
val--;
94
val |= val >> 32;
95
val |= val >> 16;
96
val |= val >> 8;
97
val |= val >> 4;
98
val |= val >> 2;
99
val |= val >> 1;
100
return val + 1;
101
}
102
} // namespace helpers
103
104
template <typename T>
105
inline T* construct(T* p)
106
{
107
return new (static_cast<void*>(p)) T;
108
}
109
110
template <typename T, typename U>
111
inline T* construct(T* p, const U& init)
112
{
113
return new (static_cast<void*>(p)) T(init);
114
}
115
116
template <typename T>
117
inline void construct_array(T* p, size_t n)
118
{
119
T* q = p + n;
120
for (; p != q; ++p)
121
new (static_cast<void*>(p)) T;
122
}
123
124
template <typename T, typename U>
125
inline void construct_array(T* p, size_t n, const U& init)
126
{
127
T* q = p + n;
128
for (; p != q; ++p)
129
new (static_cast<void*>(p)) T(init);
130
}
131
132
template <typename T>
133
inline void destruct(T* p)
134
{
135
p->~T();
136
}
137
138
template <typename T> inline void destruct_array(T* p, size_t n)
139
{
140
T* q = p + n;
141
for (; p != q; ++p)
142
p->~T();
143
}
144
145
template<typename T>
146
struct scalar_type
147
{
148
enum { cFlag = false };
149
static inline void construct(T* p) { basisu::construct(p); }
150
static inline void construct(T* p, const T& init) { basisu::construct(p, init); }
151
static inline void construct_array(T* p, size_t n) { basisu::construct_array(p, n); }
152
static inline void destruct(T* p) { basisu::destruct(p); }
153
static inline void destruct_array(T* p, size_t n) { basisu::destruct_array(p, n); }
154
};
155
156
template<typename T> struct scalar_type<T*>
157
{
158
enum { cFlag = true };
159
static inline void construct(T** p) { memset(p, 0, sizeof(T*)); }
160
static inline void construct(T** p, T* init) { *p = init; }
161
static inline void construct_array(T** p, size_t n) { memset(p, 0, sizeof(T*) * n); }
162
static inline void destruct(T** p) { p; }
163
static inline void destruct_array(T** p, size_t n) { p, n; }
164
};
165
166
#define BASISU_DEFINE_BUILT_IN_TYPE(X) \
167
template<> struct scalar_type<X> { \
168
enum { cFlag = true }; \
169
static inline void construct(X* p) { memset(p, 0, sizeof(X)); } \
170
static inline void construct(X* p, const X& init) { memcpy(p, &init, sizeof(X)); } \
171
static inline void construct_array(X* p, size_t n) { memset(p, 0, sizeof(X) * n); } \
172
static inline void destruct(X* p) { p; } \
173
static inline void destruct_array(X* p, size_t n) { p, n; } };
174
175
BASISU_DEFINE_BUILT_IN_TYPE(bool)
176
BASISU_DEFINE_BUILT_IN_TYPE(char)
177
BASISU_DEFINE_BUILT_IN_TYPE(unsigned char)
178
BASISU_DEFINE_BUILT_IN_TYPE(short)
179
BASISU_DEFINE_BUILT_IN_TYPE(unsigned short)
180
BASISU_DEFINE_BUILT_IN_TYPE(int)
181
BASISU_DEFINE_BUILT_IN_TYPE(unsigned int)
182
BASISU_DEFINE_BUILT_IN_TYPE(long)
183
BASISU_DEFINE_BUILT_IN_TYPE(unsigned long)
184
#ifdef __GNUC__
185
BASISU_DEFINE_BUILT_IN_TYPE(long long)
186
BASISU_DEFINE_BUILT_IN_TYPE(unsigned long long)
187
#else
188
BASISU_DEFINE_BUILT_IN_TYPE(__int64)
189
BASISU_DEFINE_BUILT_IN_TYPE(unsigned __int64)
190
#endif
191
BASISU_DEFINE_BUILT_IN_TYPE(float)
192
BASISU_DEFINE_BUILT_IN_TYPE(double)
193
BASISU_DEFINE_BUILT_IN_TYPE(long double)
194
195
#undef BASISU_DEFINE_BUILT_IN_TYPE
196
197
template<typename T>
198
struct bitwise_movable { enum { cFlag = false }; };
199
200
#define BASISU_DEFINE_BITWISE_MOVABLE(Q) template<> struct bitwise_movable<Q> { enum { cFlag = true }; };
201
202
template<typename T>
203
struct bitwise_copyable { enum { cFlag = false }; };
204
205
#define BASISU_DEFINE_BITWISE_COPYABLE(Q) template<> struct bitwise_copyable<Q> { enum { cFlag = true }; };
206
207
#define BASISU_IS_POD(T) __is_pod(T)
208
209
#define BASISU_IS_SCALAR_TYPE(T) (scalar_type<T>::cFlag)
210
211
#if !defined(BASISU_HAVE_STD_TRIVIALLY_COPYABLE) && defined(__GNUC__) && (__GNUC__ < 5)
212
#define BASISU_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__)
213
#else
214
#define BASISU_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
215
#endif
216
217
// TODO: clean this up, it's still confusing (copying vs. movable).
218
#define BASISU_IS_BITWISE_COPYABLE(T) (BASISU_IS_SCALAR_TYPE(T) || BASISU_IS_POD(T) || BASISU_IS_TRIVIALLY_COPYABLE(T) || std::is_trivial<T>::value || (bitwise_copyable<T>::cFlag))
219
220
#define BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) (BASISU_IS_BITWISE_COPYABLE(T) || (bitwise_movable<T>::cFlag))
221
222
#define BASISU_HAS_DESTRUCTOR(T) ((!scalar_type<T>::cFlag) && (!__is_pod(T)) && (!std::is_trivially_destructible<T>::value))
223
224
typedef char(&yes_t)[1];
225
typedef char(&no_t)[2];
226
227
template <class U> yes_t class_test(int U::*);
228
template <class U> no_t class_test(...);
229
230
template <class T> struct is_class
231
{
232
enum { value = (sizeof(class_test<T>(0)) == sizeof(yes_t)) };
233
};
234
235
template <typename T> struct is_pointer
236
{
237
enum { value = false };
238
};
239
240
template <typename T> struct is_pointer<T*>
241
{
242
enum { value = true };
243
};
244
245
struct empty_type { };
246
247
BASISU_DEFINE_BITWISE_COPYABLE(empty_type);
248
BASISU_DEFINE_BITWISE_MOVABLE(empty_type);
249
250
template<typename T> struct rel_ops
251
{
252
friend bool operator!=(const T& x, const T& y) { return (!(x == y)); }
253
friend bool operator> (const T& x, const T& y) { return (y < x); }
254
friend bool operator<=(const T& x, const T& y) { return (!(y < x)); }
255
friend bool operator>=(const T& x, const T& y) { return (!(x < y)); }
256
};
257
258
struct elemental_vector
259
{
260
void* m_p;
261
size_t m_size;
262
size_t m_capacity;
263
264
typedef void (*object_mover)(void* pDst, void* pSrc, size_t num);
265
266
bool increase_capacity(size_t min_new_capacity, bool grow_hint, size_t element_size, object_mover pRelocate, bool nofail);
267
};
268
269
// Returns true if a+b would overflow a size_t.
270
inline bool add_overflow_check(size_t a, size_t b)
271
{
272
size_t c = a + b;
273
return c < a;
274
}
275
276
// Returns false on overflow, true if OK.
277
template<typename T>
278
inline bool can_fit_into_size_t(T val)
279
{
280
static_assert(std::is_integral<T>::value, "T must be an integral type");
281
282
return (val >= 0) && (static_cast<size_t>(val) == val);
283
}
284
285
// Returns true if a*b would overflow a size_t.
286
inline bool mul_overflow_check(size_t a, size_t b)
287
{
288
// Avoid the division on 32-bit platforms
289
if (sizeof(size_t) == sizeof(uint32_t))
290
return !can_fit_into_size_t(static_cast<uint64_t>(a) * b);
291
else
292
return b && (a > (SIZE_MAX / b));
293
}
294
295
template<typename T>
296
class writable_span;
297
298
template<typename T>
299
class readable_span
300
{
301
public:
302
using value_type = T;
303
using size_type = size_t;
304
using const_pointer = const T*;
305
using const_reference = const T&;
306
using const_iterator = const T*;
307
308
inline readable_span() :
309
m_p(nullptr),
310
m_size(0)
311
{
312
}
313
314
inline readable_span(const writable_span<T>& other);
315
inline readable_span& operator= (const writable_span<T>& rhs);
316
317
inline readable_span(const_pointer p, size_t n)
318
{
319
set(p, n);
320
}
321
322
inline readable_span(const_pointer s, const_pointer e)
323
{
324
set(s, e);
325
}
326
327
inline readable_span(const readable_span& other) :
328
m_p(other.m_p),
329
m_size(other.m_size)
330
{
331
assert(!m_size || m_p);
332
}
333
334
inline readable_span(readable_span&& other) :
335
m_p(other.m_p),
336
m_size(other.m_size)
337
{
338
assert(!m_size || m_p);
339
340
other.m_p = nullptr;
341
other.m_size = 0;
342
}
343
344
template <size_t N>
345
inline readable_span(const T(&arr)[N]) :
346
m_p(arr),
347
m_size(N)
348
{
349
}
350
351
template <size_t N>
352
inline readable_span& set(const T(&arr)[N])
353
{
354
m_p = arr;
355
m_size = N;
356
return *this;
357
}
358
359
inline readable_span& set(const_pointer p, size_t n)
360
{
361
if (!p && n)
362
{
363
assert(0);
364
m_p = nullptr;
365
m_size = 0;
366
}
367
else
368
{
369
m_p = p;
370
m_size = n;
371
}
372
373
return *this;
374
}
375
376
inline readable_span& set(const_pointer s, const_pointer e)
377
{
378
if ((e < s) || (!s && e))
379
{
380
assert(0);
381
m_p = nullptr;
382
m_size = 0;
383
}
384
else
385
{
386
m_p = s;
387
m_size = e - s;
388
}
389
390
return *this;
391
}
392
393
inline bool operator== (const readable_span& rhs) const
394
{
395
return (m_p == rhs.m_p) && (m_size == rhs.m_size);
396
}
397
398
inline bool operator!= (const readable_span& rhs) const
399
{
400
return (m_p != rhs.m_p) || (m_size != rhs.m_size);
401
}
402
403
// only true if the region is totally inside the span
404
inline bool is_inside_ptr(const_pointer p, size_t n) const
405
{
406
if (!is_valid())
407
{
408
assert(0);
409
return false;
410
}
411
412
if (!p)
413
{
414
assert(!n);
415
return false;
416
}
417
418
return (p >= m_p) && ((p + n) <= end());
419
}
420
421
inline bool is_inside(size_t ofs, size_t size) const
422
{
423
if (add_overflow_check(ofs, size))
424
{
425
assert(0);
426
return false;
427
}
428
429
if (!is_valid())
430
{
431
assert(0);
432
return false;
433
}
434
435
if ((ofs + size) > m_size)
436
return false;
437
438
return true;
439
}
440
441
inline readable_span subspan(size_t ofs, size_t n) const
442
{
443
if (!is_valid())
444
{
445
assert(0);
446
return readable_span((const_pointer)nullptr, (size_t)0);
447
}
448
449
if (add_overflow_check(ofs, n))
450
{
451
assert(0);
452
return readable_span((const_pointer)nullptr, (size_t)0);
453
}
454
455
if ((ofs + n) > m_size)
456
{
457
assert(0);
458
return readable_span((const_pointer)nullptr, (size_t)0);
459
}
460
461
return readable_span(m_p + ofs, n);
462
}
463
464
void clear()
465
{
466
m_p = nullptr;
467
m_size = 0;
468
}
469
470
inline bool empty() const { return !m_size; }
471
472
// true if the span is non-nullptr and is not empty
473
inline bool is_valid() const { return m_p && m_size; }
474
475
inline bool is_nullptr() const { return m_p == nullptr; }
476
477
inline size_t size() const { return m_size; }
478
inline size_t size_in_bytes() const { assert(can_fit_into_size_t((uint64_t)m_size * sizeof(T))); return m_size * sizeof(T); }
479
480
inline const_pointer get_ptr() const { return m_p; }
481
482
inline const_iterator begin() const { return m_p; }
483
inline const_iterator end() const { assert(m_p || !m_size); return m_p + m_size; }
484
485
inline const_iterator cbegin() const { return m_p; }
486
inline const_iterator cend() const { assert(m_p || !m_size); return m_p + m_size; }
487
488
inline const_reference front() const
489
{
490
if (!(m_p && m_size))
491
container_abort("readable_span invalid\n");
492
493
return m_p[0];
494
}
495
496
inline const_reference back() const
497
{
498
if (!(m_p && m_size))
499
container_abort("readable_span invalid\n");
500
501
return m_p[m_size - 1];
502
}
503
504
inline readable_span& operator= (const readable_span& rhs)
505
{
506
m_p = rhs.m_p;
507
m_size = rhs.m_size;
508
return *this;
509
}
510
511
inline readable_span& operator= (readable_span&& rhs)
512
{
513
if (this != &rhs)
514
{
515
m_p = rhs.m_p;
516
m_size = rhs.m_size;
517
rhs.m_p = nullptr;
518
rhs.m_size = 0;
519
}
520
521
return *this;
522
}
523
524
inline const_reference operator* () const
525
{
526
if (!(m_p && m_size))
527
container_abort("readable_span invalid\n");
528
529
return *m_p;
530
}
531
532
inline const_pointer operator-> () const
533
{
534
if (!(m_p && m_size))
535
container_abort("readable_span invalid\n");
536
537
return m_p;
538
}
539
540
inline readable_span& remove_prefix(size_t n)
541
{
542
if ((!m_p) || (n > m_size))
543
{
544
assert(0);
545
return *this;
546
}
547
548
m_p += n;
549
m_size -= n;
550
return *this;
551
}
552
553
inline readable_span& remove_suffix(size_t n)
554
{
555
if ((!m_p) || (n > m_size))
556
{
557
assert(0);
558
return *this;
559
}
560
561
m_size -= n;
562
return *this;
563
}
564
565
inline readable_span& enlarge(size_t n)
566
{
567
if (!m_p)
568
{
569
assert(0);
570
return *this;
571
}
572
573
if (add_overflow_check(m_size, n))
574
{
575
assert(0);
576
return *this;
577
}
578
579
m_size += n;
580
return *this;
581
}
582
583
bool copy_from(size_t src_ofs, size_t src_size, T* pDst, size_t dst_ofs) const
584
{
585
if (!src_size)
586
return true;
587
588
if (!pDst)
589
{
590
assert(0);
591
return false;
592
}
593
594
if (!is_inside(src_ofs, src_size))
595
{
596
assert(0);
597
return false;
598
}
599
600
const_pointer pS = m_p + src_ofs;
601
602
if (BASISU_IS_BITWISE_COPYABLE(T))
603
{
604
const uint64_t num_bytes = (uint64_t)src_size * sizeof(T);
605
606
if (!can_fit_into_size_t(num_bytes))
607
{
608
assert(0);
609
return false;
610
}
611
612
memcpy(pDst, pS, (size_t)num_bytes);
613
}
614
else
615
{
616
T* pD = pDst + dst_ofs;
617
T* pDst_end = pD + src_size;
618
619
while (pD != pDst_end)
620
*pD++ = *pS++;
621
}
622
623
return true;
624
}
625
626
inline const_reference operator[] (size_t idx) const
627
{
628
if ((!is_valid()) || (idx >= m_size))
629
container_abort("readable_span: invalid span or index\n");
630
631
return m_p[idx];
632
}
633
634
inline uint16_t read_le16(size_t ofs) const
635
{
636
static_assert(sizeof(T) == 1, "T must be byte size");
637
638
if (!is_inside(ofs, sizeof(uint16_t)))
639
{
640
assert(0);
641
return false;
642
}
643
644
const uint8_t a = (uint8_t)m_p[ofs];
645
const uint8_t b = (uint8_t)m_p[ofs + 1];
646
return a | (b << 8u);
647
}
648
649
template<typename R>
650
inline R read_val(size_t ofs) const
651
{
652
static_assert(sizeof(T) == 1, "T must be byte size");
653
654
if (!is_inside(ofs, sizeof(R)))
655
{
656
assert(0);
657
return (R)0;
658
}
659
660
return *reinterpret_cast<const R*>(&m_p[ofs]);
661
}
662
663
inline uint16_t read_be16(size_t ofs) const
664
{
665
static_assert(sizeof(T) == 1, "T must be byte size");
666
667
if (!is_inside(ofs, sizeof(uint16_t)))
668
{
669
assert(0);
670
return 0;
671
}
672
673
const uint8_t b = (uint8_t)m_p[ofs];
674
const uint8_t a = (uint8_t)m_p[ofs + 1];
675
return a | (b << 8u);
676
}
677
678
inline uint32_t read_le32(size_t ofs) const
679
{
680
static_assert(sizeof(T) == 1, "T must be byte size");
681
682
if (!is_inside(ofs, sizeof(uint32_t)))
683
{
684
assert(0);
685
return 0;
686
}
687
688
const uint8_t a = (uint8_t)m_p[ofs];
689
const uint8_t b = (uint8_t)m_p[ofs + 1];
690
const uint8_t c = (uint8_t)m_p[ofs + 2];
691
const uint8_t d = (uint8_t)m_p[ofs + 3];
692
return a | (b << 8u) | (c << 16u) | (d << 24u);
693
}
694
695
inline uint32_t read_be32(size_t ofs) const
696
{
697
static_assert(sizeof(T) == 1, "T must be byte size");
698
699
if (!is_inside(ofs, sizeof(uint32_t)))
700
{
701
assert(0);
702
return 0;
703
}
704
705
const uint8_t d = (uint8_t)m_p[ofs];
706
const uint8_t c = (uint8_t)m_p[ofs + 1];
707
const uint8_t b = (uint8_t)m_p[ofs + 2];
708
const uint8_t a = (uint8_t)m_p[ofs + 3];
709
return a | (b << 8u) | (c << 16u) | (d << 24u);
710
}
711
712
inline uint64_t read_le64(size_t ofs) const
713
{
714
if (!add_overflow_check(ofs, sizeof(uint64_t)))
715
{
716
assert(0);
717
return 0;
718
}
719
const uint64_t l = read_le32(ofs);
720
const uint64_t h = read_le32(ofs + sizeof(uint32_t));
721
return l | (h << 32u);
722
}
723
724
inline uint64_t read_be64(size_t ofs) const
725
{
726
if (!add_overflow_check(ofs, sizeof(uint64_t)))
727
{
728
assert(0);
729
return 0;
730
}
731
const uint64_t h = read_be32(ofs);
732
const uint64_t l = read_be32(ofs + sizeof(uint32_t));
733
return l | (h << 32u);
734
}
735
736
private:
737
const_pointer m_p;
738
size_t m_size;
739
};
740
741
template<typename T>
742
class writable_span
743
{
744
friend readable_span<T>;
745
746
public:
747
using value_type = T;
748
using size_type = size_t;
749
using const_pointer = const T*;
750
using const_reference = const T&;
751
using const_iterator = const T*;
752
using pointer = T*;
753
using reference = T&;
754
using iterator = T*;
755
756
inline writable_span() :
757
m_p(nullptr),
758
m_size(0)
759
{
760
}
761
762
inline writable_span(T* p, size_t n)
763
{
764
set(p, n);
765
}
766
767
inline writable_span(T* s, T* e)
768
{
769
set(s, e);
770
}
771
772
inline writable_span(const writable_span& other) :
773
m_p(other.m_p),
774
m_size(other.m_size)
775
{
776
assert(!m_size || m_p);
777
}
778
779
inline writable_span(writable_span&& other) :
780
m_p(other.m_p),
781
m_size(other.m_size)
782
{
783
assert(!m_size || m_p);
784
785
other.m_p = nullptr;
786
other.m_size = 0;
787
}
788
789
template <size_t N>
790
inline writable_span(T(&arr)[N]) :
791
m_p(arr),
792
m_size(N)
793
{
794
}
795
796
readable_span<T> get_readable_span() const
797
{
798
return readable_span<T>(m_p, m_size);
799
}
800
801
template <size_t N>
802
inline writable_span& set(T(&arr)[N])
803
{
804
m_p = arr;
805
m_size = N;
806
return *this;
807
}
808
809
inline writable_span& set(T* p, size_t n)
810
{
811
if (!p && n)
812
{
813
assert(0);
814
m_p = nullptr;
815
m_size = 0;
816
}
817
else
818
{
819
m_p = p;
820
m_size = n;
821
}
822
823
return *this;
824
}
825
826
inline writable_span& set(T* s, T* e)
827
{
828
if ((e < s) || (!s && e))
829
{
830
assert(0);
831
m_p = nullptr;
832
m_size = 0;
833
}
834
else
835
{
836
m_p = s;
837
m_size = e - s;
838
}
839
840
return *this;
841
}
842
843
inline bool operator== (const writable_span& rhs) const
844
{
845
return (m_p == rhs.m_p) && (m_size == rhs.m_size);
846
}
847
848
inline bool operator== (const readable_span<T>& rhs) const
849
{
850
return (m_p == rhs.m_p) && (m_size == rhs.m_size);
851
}
852
853
inline bool operator!= (const writable_span& rhs) const
854
{
855
return (m_p != rhs.m_p) || (m_size != rhs.m_size);
856
}
857
858
inline bool operator!= (const readable_span<T>& rhs) const
859
{
860
return (m_p != rhs.m_p) || (m_size != rhs.m_size);
861
}
862
863
// only true if the region is totally inside the span
864
inline bool is_inside_ptr(const_pointer p, size_t n) const
865
{
866
if (!is_valid())
867
{
868
assert(0);
869
return false;
870
}
871
872
if (!p)
873
{
874
assert(!n);
875
return false;
876
}
877
878
return (p >= m_p) && ((p + n) <= end());
879
}
880
881
inline bool is_inside(size_t ofs, size_t size) const
882
{
883
if (add_overflow_check(ofs, size))
884
{
885
assert(0);
886
return false;
887
}
888
889
if (!is_valid())
890
{
891
assert(0);
892
return false;
893
}
894
895
if ((ofs + size) > m_size)
896
return false;
897
898
return true;
899
}
900
901
inline writable_span subspan(size_t ofs, size_t n) const
902
{
903
if (!is_valid())
904
{
905
assert(0);
906
return writable_span((T*)nullptr, (size_t)0);
907
}
908
909
if (add_overflow_check(ofs, n))
910
{
911
assert(0);
912
return writable_span((T*)nullptr, (size_t)0);
913
}
914
915
if ((ofs + n) > m_size)
916
{
917
assert(0);
918
return writable_span((T*)nullptr, (size_t)0);
919
}
920
921
return writable_span(m_p + ofs, n);
922
}
923
924
void clear()
925
{
926
m_p = nullptr;
927
m_size = 0;
928
}
929
930
inline bool empty() const { return !m_size; }
931
932
// true if the span is non-nullptr and is not empty
933
inline bool is_valid() const { return m_p && m_size; }
934
935
inline bool is_nullptr() const { return m_p == nullptr; }
936
937
inline size_t size() const { return m_size; }
938
inline size_t size_in_bytes() const { assert(can_fit_into_size_t((uint64_t)m_size * sizeof(T))); return m_size * sizeof(T); }
939
940
inline T* get_ptr() const { return m_p; }
941
942
inline iterator begin() const { return m_p; }
943
inline iterator end() const { assert(m_p || !m_size); return m_p + m_size; }
944
945
inline const_iterator cbegin() const { return m_p; }
946
inline const_iterator cend() const { assert(m_p || !m_size); return m_p + m_size; }
947
948
inline T& front() const
949
{
950
if (!(m_p && m_size))
951
container_abort("writable_span invalid\n");
952
953
return m_p[0];
954
}
955
956
inline T& back() const
957
{
958
if (!(m_p && m_size))
959
container_abort("writable_span invalid\n");
960
961
return m_p[m_size - 1];
962
}
963
964
inline writable_span& operator= (const writable_span& rhs)
965
{
966
m_p = rhs.m_p;
967
m_size = rhs.m_size;
968
return *this;
969
}
970
971
inline writable_span& operator= (writable_span&& rhs)
972
{
973
if (this != &rhs)
974
{
975
m_p = rhs.m_p;
976
m_size = rhs.m_size;
977
rhs.m_p = nullptr;
978
rhs.m_size = 0;
979
}
980
981
return *this;
982
}
983
984
inline T& operator* () const
985
{
986
if (!(m_p && m_size))
987
container_abort("writable_span invalid\n");
988
989
return *m_p;
990
}
991
992
inline T* operator-> () const
993
{
994
if (!(m_p && m_size))
995
container_abort("writable_span invalid\n");
996
997
return m_p;
998
}
999
1000
inline bool set_all(size_t ofs, size_t size, const_reference val)
1001
{
1002
if (!size)
1003
return true;
1004
1005
if (!is_inside(ofs, size))
1006
{
1007
assert(0);
1008
return false;
1009
}
1010
1011
T* pDst = m_p + ofs;
1012
1013
if ((sizeof(T) == sizeof(uint8_t)) && (BASISU_IS_BITWISE_COPYABLE(T)))
1014
{
1015
memset(pDst, (int)((uint8_t)val), size);
1016
}
1017
else
1018
{
1019
1020
T* pDst_end = pDst + size;
1021
1022
while (pDst != pDst_end)
1023
*pDst++ = val;
1024
}
1025
1026
return true;
1027
}
1028
1029
inline bool set_all(const_reference val)
1030
{
1031
return set_all(0, m_size, val);
1032
}
1033
1034
inline writable_span& remove_prefix(size_t n)
1035
{
1036
if ((!m_p) || (n > m_size))
1037
{
1038
assert(0);
1039
return *this;
1040
}
1041
1042
m_p += n;
1043
m_size -= n;
1044
return *this;
1045
}
1046
1047
inline writable_span& remove_suffix(size_t n)
1048
{
1049
if ((!m_p) || (n > m_size))
1050
{
1051
assert(0);
1052
return *this;
1053
}
1054
1055
m_size -= n;
1056
return *this;
1057
}
1058
1059
inline writable_span& enlarge(size_t n)
1060
{
1061
if (!m_p)
1062
{
1063
assert(0);
1064
return *this;
1065
}
1066
1067
if (add_overflow_check(m_size, n))
1068
{
1069
assert(0);
1070
return *this;
1071
}
1072
1073
m_size += n;
1074
return *this;
1075
}
1076
1077
// copy from this span to the destination ptr
1078
bool copy_from(size_t src_ofs, size_t src_size, T* pDst, size_t dst_ofs) const
1079
{
1080
if (!src_size)
1081
return true;
1082
1083
if (!pDst)
1084
{
1085
assert(0);
1086
return false;
1087
}
1088
1089
if (!is_inside(src_ofs, src_size))
1090
{
1091
assert(0);
1092
return false;
1093
}
1094
1095
const_pointer pS = m_p + src_ofs;
1096
1097
if (BASISU_IS_BITWISE_COPYABLE(T))
1098
{
1099
const uint64_t num_bytes = (uint64_t)src_size * sizeof(T);
1100
1101
if (!can_fit_into_size_t(num_bytes))
1102
{
1103
assert(0);
1104
return false;
1105
}
1106
1107
memcpy(pDst, pS, (size_t)num_bytes);
1108
}
1109
else
1110
{
1111
T* pD = pDst + dst_ofs;
1112
T* pDst_end = pD + src_size;
1113
1114
while (pD != pDst_end)
1115
*pD++ = *pS++;
1116
}
1117
1118
return true;
1119
}
1120
1121
// copy from the source ptr into this span
1122
bool copy_into(const_pointer pSrc, size_t src_ofs, size_t src_size, size_t dst_ofs) const
1123
{
1124
if (!src_size)
1125
return true;
1126
1127
if (!pSrc)
1128
{
1129
assert(0);
1130
return false;
1131
}
1132
1133
if (add_overflow_check(src_ofs, src_size) || add_overflow_check(dst_ofs, src_size))
1134
{
1135
assert(0);
1136
return false;
1137
}
1138
1139
if (!is_valid())
1140
{
1141
assert(0);
1142
return false;
1143
}
1144
1145
if (!is_inside(dst_ofs, src_size))
1146
{
1147
assert(0);
1148
return false;
1149
}
1150
1151
const_pointer pS = pSrc + src_ofs;
1152
T* pD = m_p + dst_ofs;
1153
1154
if (BASISU_IS_BITWISE_COPYABLE(T))
1155
{
1156
const uint64_t num_bytes = (uint64_t)src_size * sizeof(T);
1157
1158
if (!can_fit_into_size_t(num_bytes))
1159
{
1160
assert(0);
1161
return false;
1162
}
1163
1164
memcpy(pD, pS, (size_t)num_bytes);
1165
}
1166
else
1167
{
1168
T* pDst_end = pD + src_size;
1169
1170
while (pD != pDst_end)
1171
*pD++ = *pS++;
1172
}
1173
1174
return true;
1175
}
1176
1177
// copy from a source span into this span
1178
bool copy_into(const readable_span<T>& src, size_t src_ofs, size_t src_size, size_t dst_ofs) const
1179
{
1180
if (!src.is_inside(src_ofs, src_size))
1181
{
1182
assert(0);
1183
return false;
1184
}
1185
1186
return copy_into(src.get_ptr(), src_ofs, src_size, dst_ofs);
1187
}
1188
1189
// copy from a source span into this span
1190
bool copy_into(const writable_span& src, size_t src_ofs, size_t src_size, size_t dst_ofs) const
1191
{
1192
if (!src.is_inside(src_ofs, src_size))
1193
{
1194
assert(0);
1195
return false;
1196
}
1197
1198
return copy_into(src.get_ptr(), src_ofs, src_size, dst_ofs);
1199
}
1200
1201
inline T& operator[] (size_t idx) const
1202
{
1203
if ((!is_valid()) || (idx >= m_size))
1204
container_abort("writable_span: invalid span or index\n");
1205
1206
return m_p[idx];
1207
}
1208
1209
template<typename R>
1210
inline R read_val(size_t ofs) const
1211
{
1212
static_assert(sizeof(T) == 1, "T must be byte size");
1213
1214
if (!is_inside(ofs, sizeof(R)))
1215
{
1216
assert(0);
1217
return (R)0;
1218
}
1219
1220
return *reinterpret_cast<const R*>(&m_p[ofs]);
1221
}
1222
1223
template<typename R>
1224
inline bool write_val(size_t ofs, R val) const
1225
{
1226
static_assert(sizeof(T) == 1, "T must be byte size");
1227
1228
if (!is_inside(ofs, sizeof(R)))
1229
{
1230
assert(0);
1231
return false;
1232
}
1233
1234
*reinterpret_cast<R*>(&m_p[ofs]) = val;
1235
return true;
1236
}
1237
1238
inline bool write_le16(size_t ofs, uint16_t val) const
1239
{
1240
static_assert(sizeof(T) == 1, "T must be byte size");
1241
1242
if (!is_inside(ofs, sizeof(uint16_t)))
1243
{
1244
assert(0);
1245
return false;
1246
}
1247
1248
m_p[ofs] = (uint8_t)val;
1249
m_p[ofs + 1] = (uint8_t)(val >> 8u);
1250
return true;
1251
}
1252
1253
inline bool write_be16(size_t ofs, uint16_t val) const
1254
{
1255
static_assert(sizeof(T) == 1, "T must be byte size");
1256
1257
if (!is_inside(ofs, sizeof(uint16_t)))
1258
{
1259
assert(0);
1260
return false;
1261
}
1262
1263
m_p[ofs + 1] = (uint8_t)val;
1264
m_p[ofs] = (uint8_t)(val >> 8u);
1265
return true;
1266
}
1267
1268
inline bool write_le32(size_t ofs, uint32_t val) const
1269
{
1270
static_assert(sizeof(T) == 1, "T must be byte size");
1271
1272
if (!is_inside(ofs, sizeof(uint32_t)))
1273
{
1274
assert(0);
1275
return false;
1276
}
1277
1278
m_p[ofs] = (uint8_t)val;
1279
m_p[ofs + 1] = (uint8_t)(val >> 8u);
1280
m_p[ofs + 2] = (uint8_t)(val >> 16u);
1281
m_p[ofs + 3] = (uint8_t)(val >> 24u);
1282
return true;
1283
}
1284
1285
inline bool write_be32(size_t ofs, uint32_t val) const
1286
{
1287
static_assert(sizeof(T) == 1, "T must be byte size");
1288
1289
if (!is_inside(ofs, sizeof(uint32_t)))
1290
{
1291
assert(0);
1292
return false;
1293
}
1294
1295
m_p[ofs + 3] = (uint8_t)val;
1296
m_p[ofs + 2] = (uint8_t)(val >> 8u);
1297
m_p[ofs + 1] = (uint8_t)(val >> 16u);
1298
m_p[ofs] = (uint8_t)(val >> 24u);
1299
return true;
1300
}
1301
1302
inline bool write_le64(size_t ofs, uint64_t val) const
1303
{
1304
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1305
{
1306
assert(0);
1307
return false;
1308
}
1309
1310
return write_le32(ofs, (uint32_t)val) && write_le32(ofs + sizeof(uint32_t), (uint32_t)(val >> 32u));
1311
}
1312
1313
inline bool write_be64(size_t ofs, uint64_t val) const
1314
{
1315
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1316
{
1317
assert(0);
1318
return false;
1319
}
1320
1321
return write_be32(ofs + sizeof(uint32_t), (uint32_t)val) && write_be32(ofs, (uint32_t)(val >> 32u));
1322
}
1323
1324
inline uint16_t read_le16(size_t ofs) const
1325
{
1326
static_assert(sizeof(T) == 1, "T must be byte size");
1327
1328
if (!is_inside(ofs, sizeof(uint16_t)))
1329
{
1330
assert(0);
1331
return 0;
1332
}
1333
1334
const uint8_t a = (uint8_t)m_p[ofs];
1335
const uint8_t b = (uint8_t)m_p[ofs + 1];
1336
return a | (b << 8u);
1337
}
1338
1339
inline uint16_t read_be16(size_t ofs) const
1340
{
1341
static_assert(sizeof(T) == 1, "T must be byte size");
1342
1343
if (!is_inside(ofs, sizeof(uint16_t)))
1344
{
1345
assert(0);
1346
return 0;
1347
}
1348
1349
const uint8_t b = (uint8_t)m_p[ofs];
1350
const uint8_t a = (uint8_t)m_p[ofs + 1];
1351
return a | (b << 8u);
1352
}
1353
1354
inline uint32_t read_le32(size_t ofs) const
1355
{
1356
static_assert(sizeof(T) == 1, "T must be byte size");
1357
1358
if (!is_inside(ofs, sizeof(uint32_t)))
1359
{
1360
assert(0);
1361
return 0;
1362
}
1363
1364
const uint8_t a = (uint8_t)m_p[ofs];
1365
const uint8_t b = (uint8_t)m_p[ofs + 1];
1366
const uint8_t c = (uint8_t)m_p[ofs + 2];
1367
const uint8_t d = (uint8_t)m_p[ofs + 3];
1368
return a | (b << 8u) | (c << 16u) | (d << 24u);
1369
}
1370
1371
inline uint32_t read_be32(size_t ofs) const
1372
{
1373
static_assert(sizeof(T) == 1, "T must be byte size");
1374
1375
if (!is_inside(ofs, sizeof(uint32_t)))
1376
{
1377
assert(0);
1378
return 0;
1379
}
1380
1381
const uint8_t d = (uint8_t)m_p[ofs];
1382
const uint8_t c = (uint8_t)m_p[ofs + 1];
1383
const uint8_t b = (uint8_t)m_p[ofs + 2];
1384
const uint8_t a = (uint8_t)m_p[ofs + 3];
1385
return a | (b << 8u) | (c << 16u) | (d << 24u);
1386
}
1387
1388
inline uint64_t read_le64(size_t ofs) const
1389
{
1390
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1391
{
1392
assert(0);
1393
return 0;
1394
}
1395
const uint64_t l = read_le32(ofs);
1396
const uint64_t h = read_le32(ofs + sizeof(uint32_t));
1397
return l | (h << 32u);
1398
}
1399
1400
inline uint64_t read_be64(size_t ofs) const
1401
{
1402
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1403
{
1404
assert(0);
1405
return 0;
1406
}
1407
const uint64_t h = read_be32(ofs);
1408
const uint64_t l = read_be32(ofs + sizeof(uint32_t));
1409
return l | (h << 32u);
1410
}
1411
1412
private:
1413
T* m_p;
1414
size_t m_size;
1415
};
1416
1417
template<typename T>
1418
inline readable_span<T>::readable_span(const writable_span<T>& other) :
1419
m_p(other.m_p),
1420
m_size(other.m_size)
1421
{
1422
}
1423
1424
template<typename T>
1425
inline readable_span<T>& readable_span<T>::operator= (const writable_span<T>& rhs)
1426
{
1427
m_p = rhs.m_p;
1428
m_size = rhs.m_size;
1429
return *this;
1430
}
1431
1432
template<typename T>
1433
inline bool span_copy(const writable_span<T>& dst, const readable_span<T>& src)
1434
{
1435
return dst.copy_into(src, 0, src.size(), 0);
1436
}
1437
1438
template<typename T>
1439
inline bool span_copy(const writable_span<T>& dst, const writable_span<T>& src)
1440
{
1441
return dst.copy_into(src, 0, src.size(), 0);
1442
}
1443
1444
template<typename T>
1445
inline bool span_copy(const writable_span<T>& dst, size_t dst_ofs, const writable_span<T>& src, size_t src_ofs, size_t len)
1446
{
1447
return dst.copy_into(src, src_ofs, len, dst_ofs);
1448
}
1449
1450
template<typename T>
1451
inline bool span_copy(const writable_span<T>& dst, size_t dst_ofs, const readable_span<T>& src, size_t src_ofs, size_t len)
1452
{
1453
return dst.copy_into(src, src_ofs, len, dst_ofs);
1454
}
1455
1456
template<typename T>
1457
class vector : public rel_ops< vector<T> >
1458
{
1459
public:
1460
typedef T* iterator;
1461
typedef const T* const_iterator;
1462
typedef T value_type;
1463
typedef T& reference;
1464
typedef const T& const_reference;
1465
typedef T* pointer;
1466
typedef const T* const_pointer;
1467
1468
inline vector() :
1469
m_p(nullptr),
1470
m_size(0),
1471
m_capacity(0)
1472
{
1473
}
1474
1475
inline vector(size_t n, const T& init) :
1476
m_p(nullptr),
1477
m_size(0),
1478
m_capacity(0)
1479
{
1480
increase_capacity(n, false);
1481
construct_array(m_p, n, init);
1482
m_size = n;
1483
}
1484
1485
inline vector(vector&& other) :
1486
m_p(other.m_p),
1487
m_size(other.m_size),
1488
m_capacity(other.m_capacity)
1489
{
1490
other.m_p = nullptr;
1491
other.m_size = 0;
1492
other.m_capacity = 0;
1493
}
1494
1495
inline vector(const vector& other) :
1496
m_p(nullptr),
1497
m_size(0),
1498
m_capacity(0)
1499
{
1500
increase_capacity(other.m_size, false);
1501
1502
m_size = other.m_size;
1503
1504
if (BASISU_IS_BITWISE_COPYABLE(T))
1505
{
1506
if ((m_p) && (other.m_p))
1507
{
1508
memcpy((void *)m_p, other.m_p, m_size * sizeof(T));
1509
}
1510
}
1511
else
1512
{
1513
T* pDst = m_p;
1514
const T* pSrc = other.m_p;
1515
for (size_t i = m_size; i > 0; i--)
1516
construct(pDst++, *pSrc++);
1517
}
1518
}
1519
1520
inline explicit vector(size_t size) :
1521
m_p(nullptr),
1522
m_size(0),
1523
m_capacity(0)
1524
{
1525
resize(size);
1526
}
1527
1528
inline explicit vector(std::initializer_list<T> init_list) :
1529
m_p(nullptr),
1530
m_size(0),
1531
m_capacity(0)
1532
{
1533
resize(init_list.size());
1534
1535
size_t idx = 0;
1536
for (const T& elem : init_list)
1537
m_p[idx++] = elem;
1538
1539
assert(idx == m_size);
1540
}
1541
1542
inline vector(const readable_span<T>& rs) :
1543
m_p(nullptr),
1544
m_size(0),
1545
m_capacity(0)
1546
{
1547
set(rs);
1548
}
1549
1550
inline vector(const writable_span<T>& ws) :
1551
m_p(nullptr),
1552
m_size(0),
1553
m_capacity(0)
1554
{
1555
set(ws);
1556
}
1557
1558
// Set contents of vector to contents of the readable span
1559
bool set(const readable_span<T>& rs)
1560
{
1561
if (!rs.is_valid())
1562
{
1563
assert(0);
1564
return false;
1565
}
1566
1567
const size_t new_size = rs.size();
1568
1569
// Could call resize(), but it'll redundantly construct trivial types.
1570
if (m_size != new_size)
1571
{
1572
if (new_size < m_size)
1573
{
1574
if (BASISU_HAS_DESTRUCTOR(T))
1575
{
1576
scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
1577
}
1578
}
1579
else
1580
{
1581
if (new_size > m_capacity)
1582
{
1583
if (!increase_capacity(new_size, false, true))
1584
return false;
1585
}
1586
}
1587
1588
// Don't bother constructing trivial types, because we're going to memcpy() over them anyway.
1589
if (!BASISU_IS_BITWISE_COPYABLE(T))
1590
{
1591
scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
1592
}
1593
1594
m_size = new_size;
1595
}
1596
1597
if (!rs.copy_from(0, rs.size(), m_p, 0))
1598
{
1599
assert(0);
1600
return false;
1601
}
1602
1603
return true;
1604
}
1605
1606
// Set contents of vector to contents of the writable span
1607
inline bool set(const writable_span<T>& ws)
1608
{
1609
return set(ws.get_readable_span());
1610
}
1611
1612
inline ~vector()
1613
{
1614
if (m_p)
1615
{
1616
if (BASISU_HAS_DESTRUCTOR(T))
1617
{
1618
scalar_type<T>::destruct_array(m_p, m_size);
1619
}
1620
1621
free(m_p);
1622
}
1623
}
1624
1625
inline vector& operator= (const vector& other)
1626
{
1627
if (this == &other)
1628
return *this;
1629
1630
if (m_capacity >= other.m_size)
1631
resize(0);
1632
else
1633
{
1634
clear();
1635
increase_capacity(other.m_size, false);
1636
}
1637
1638
if (BASISU_IS_BITWISE_COPYABLE(T))
1639
{
1640
if ((m_p) && (other.m_p))
1641
memcpy((void *)m_p, other.m_p, other.m_size * sizeof(T));
1642
}
1643
else
1644
{
1645
T* pDst = m_p;
1646
const T* pSrc = other.m_p;
1647
for (size_t i = other.m_size; i > 0; i--)
1648
construct(pDst++, *pSrc++);
1649
}
1650
1651
m_size = other.m_size;
1652
1653
return *this;
1654
}
1655
1656
inline vector& operator= (vector&& rhs)
1657
{
1658
if (this != &rhs)
1659
{
1660
clear();
1661
1662
m_p = rhs.m_p;
1663
m_size = rhs.m_size;
1664
m_capacity = rhs.m_capacity;
1665
1666
rhs.m_p = nullptr;
1667
rhs.m_size = 0;
1668
rhs.m_capacity = 0;
1669
}
1670
return *this;
1671
}
1672
1673
BASISU_FORCE_INLINE const T* begin() const { return m_p; }
1674
BASISU_FORCE_INLINE T* begin() { return m_p; }
1675
1676
BASISU_FORCE_INLINE const T* end() const { return m_p + m_size; }
1677
BASISU_FORCE_INLINE T* end() { return m_p + m_size; }
1678
1679
BASISU_FORCE_INLINE bool empty() const { return !m_size; }
1680
1681
BASISU_FORCE_INLINE size_t size() const { return m_size; }
1682
BASISU_FORCE_INLINE uint32_t size_u32() const { assert(m_size <= UINT32_MAX); return static_cast<uint32_t>(m_size); }
1683
1684
BASISU_FORCE_INLINE size_t size_in_bytes() const { return m_size * sizeof(T); }
1685
BASISU_FORCE_INLINE uint32_t size_in_bytes_u32() const { assert((m_size * sizeof(T)) <= UINT32_MAX); return static_cast<uint32_t>(m_size * sizeof(T)); }
1686
1687
BASISU_FORCE_INLINE size_t capacity() const { return m_capacity; }
1688
1689
#if !BASISU_VECTOR_FORCE_CHECKING
1690
BASISU_FORCE_INLINE const T& operator[] (size_t i) const { assert(i < m_size); return m_p[i]; }
1691
BASISU_FORCE_INLINE T& operator[] (size_t i) { assert(i < m_size); return m_p[i]; }
1692
#else
1693
BASISU_FORCE_INLINE const T& operator[] (size_t i) const
1694
{
1695
if (i >= m_size)
1696
container_abort("vector::operator[] invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1697
1698
return m_p[i];
1699
}
1700
BASISU_FORCE_INLINE T& operator[] (size_t i)
1701
{
1702
if (i >= m_size)
1703
container_abort("vector::operator[] invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1704
1705
return m_p[i];
1706
}
1707
#endif
1708
1709
// at() always includes range checking, even in final builds, unlike operator [].
1710
BASISU_FORCE_INLINE const T& at(size_t i) const
1711
{
1712
if (i >= m_size)
1713
container_abort("vector::at() invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1714
1715
return m_p[i];
1716
}
1717
BASISU_FORCE_INLINE T& at(size_t i)
1718
{
1719
if (i >= m_size)
1720
container_abort("vector::at() invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1721
1722
return m_p[i];
1723
}
1724
1725
#if !BASISU_VECTOR_FORCE_CHECKING
1726
BASISU_FORCE_INLINE const T& front() const { assert(m_size); return m_p[0]; }
1727
BASISU_FORCE_INLINE T& front() { assert(m_size); return m_p[0]; }
1728
1729
BASISU_FORCE_INLINE const T& back() const { assert(m_size); return m_p[m_size - 1]; }
1730
BASISU_FORCE_INLINE T& back() { assert(m_size); return m_p[m_size - 1]; }
1731
#else
1732
BASISU_FORCE_INLINE const T& front() const
1733
{
1734
if (!m_size)
1735
container_abort("front: vector is empty, type size %zu\n", sizeof(T));
1736
1737
return m_p[0];
1738
}
1739
BASISU_FORCE_INLINE T& front()
1740
{
1741
if (!m_size)
1742
container_abort("front: vector is empty, type size %zu\n", sizeof(T));
1743
1744
return m_p[0];
1745
}
1746
1747
BASISU_FORCE_INLINE const T& back() const
1748
{
1749
if (!m_size)
1750
container_abort("back: vector is empty, type size %zu\n", sizeof(T));
1751
1752
return m_p[m_size - 1];
1753
}
1754
BASISU_FORCE_INLINE T& back()
1755
{
1756
if (!m_size)
1757
container_abort("back: vector is empty, type size %zu\n", sizeof(T));
1758
1759
return m_p[m_size - 1];
1760
}
1761
#endif
1762
1763
BASISU_FORCE_INLINE const T* get_ptr() const { return m_p; }
1764
BASISU_FORCE_INLINE T* get_ptr() { return m_p; }
1765
1766
BASISU_FORCE_INLINE const T* data() const { return m_p; }
1767
BASISU_FORCE_INLINE T* data() { return m_p; }
1768
1769
// clear() sets the container to empty, then frees the allocated block.
1770
inline void clear()
1771
{
1772
if (m_p)
1773
{
1774
if (BASISU_HAS_DESTRUCTOR(T))
1775
{
1776
scalar_type<T>::destruct_array(m_p, m_size);
1777
}
1778
1779
free(m_p);
1780
1781
m_p = nullptr;
1782
m_size = 0;
1783
m_capacity = 0;
1784
}
1785
}
1786
1787
inline void clear_no_destruction()
1788
{
1789
if (m_p)
1790
{
1791
free(m_p);
1792
m_p = nullptr;
1793
m_size = 0;
1794
m_capacity = 0;
1795
}
1796
}
1797
1798
inline void reserve(size_t new_capacity)
1799
{
1800
if (!try_reserve(new_capacity))
1801
container_abort("vector:reserve: try_reserve failed!\n");
1802
}
1803
1804
inline bool try_reserve(size_t new_capacity)
1805
{
1806
if (new_capacity > m_capacity)
1807
{
1808
if (!increase_capacity(new_capacity, false, true))
1809
return false;
1810
}
1811
else if (new_capacity < m_capacity)
1812
{
1813
// Must work around the lack of a "decrease_capacity()" method.
1814
// This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
1815
vector tmp;
1816
if (!tmp.increase_capacity(helpers::maximum(m_size, new_capacity), false, true))
1817
return false;
1818
1819
tmp = *this;
1820
swap(tmp);
1821
}
1822
1823
return true;
1824
}
1825
1826
// try_resize(0) sets the container to empty, but does not free the allocated block.
1827
inline bool try_resize(size_t new_size, bool grow_hint = false)
1828
{
1829
if (m_size != new_size)
1830
{
1831
if (new_size < m_size)
1832
{
1833
if (BASISU_HAS_DESTRUCTOR(T))
1834
{
1835
scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
1836
}
1837
}
1838
else
1839
{
1840
if (new_size > m_capacity)
1841
{
1842
if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
1843
return false;
1844
}
1845
1846
scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
1847
}
1848
1849
m_size = new_size;
1850
}
1851
1852
return true;
1853
}
1854
1855
// resize(0) sets the container to empty, but does not free the allocated block.
1856
inline void resize(size_t new_size, bool grow_hint = false)
1857
{
1858
if (!try_resize(new_size, grow_hint))
1859
container_abort("vector::resize failed, new size %zu\n", new_size);
1860
}
1861
1862
// If size >= capacity/2, reset() sets the container's size to 0 but doesn't free the allocated block (because the container may be similarly loaded in the future).
1863
// Otherwise it blows away the allocated block. See http://www.codercorner.com/blog/?p=494
1864
inline void reset()
1865
{
1866
if (m_size >= (m_capacity >> 1))
1867
resize(0);
1868
else
1869
clear();
1870
}
1871
1872
inline T* try_enlarge(size_t i)
1873
{
1874
size_t cur_size = m_size;
1875
1876
if (add_overflow_check(cur_size, i))
1877
return nullptr;
1878
1879
if (!try_resize(cur_size + i, true))
1880
return nullptr;
1881
1882
return get_ptr() + cur_size;
1883
}
1884
1885
inline T* enlarge(size_t i)
1886
{
1887
T* p = try_enlarge(i);
1888
if (!p)
1889
container_abort("vector::enlarge failed, amount %zu!\n", i);
1890
return p;
1891
}
1892
1893
BASISU_FORCE_INLINE void push_back(const T& obj)
1894
{
1895
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1896
1897
if (m_size >= m_capacity)
1898
{
1899
if (add_overflow_check(m_size, 1))
1900
container_abort("vector::push_back: vector too large\n");
1901
1902
increase_capacity(m_size + 1, true);
1903
}
1904
1905
scalar_type<T>::construct(m_p + m_size, obj);
1906
m_size++;
1907
}
1908
1909
BASISU_FORCE_INLINE void push_back_value(T&& obj)
1910
{
1911
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1912
1913
if (m_size >= m_capacity)
1914
{
1915
if (add_overflow_check(m_size, 1))
1916
container_abort("vector::push_back_value: vector too large\n");
1917
1918
increase_capacity(m_size + 1, true);
1919
}
1920
1921
new ((void*)(m_p + m_size)) T(std::move(obj));
1922
m_size++;
1923
}
1924
1925
inline bool try_push_back(const T& obj)
1926
{
1927
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1928
1929
if (m_size >= m_capacity)
1930
{
1931
if (add_overflow_check(m_size, 1))
1932
return false;
1933
1934
if (!increase_capacity(m_size + 1, true, true))
1935
return false;
1936
}
1937
1938
scalar_type<T>::construct(m_p + m_size, obj);
1939
m_size++;
1940
1941
return true;
1942
}
1943
1944
inline bool try_push_back(T&& obj)
1945
{
1946
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1947
1948
if (m_size >= m_capacity)
1949
{
1950
if (add_overflow_check(m_size, 1))
1951
return false;
1952
1953
if (!increase_capacity(m_size + 1, true, true))
1954
return false;
1955
}
1956
1957
new ((void*)(m_p + m_size)) T(std::move(obj));
1958
m_size++;
1959
1960
return true;
1961
}
1962
1963
// obj is explictly passed in by value, not ref
1964
inline void push_back_value(T obj)
1965
{
1966
if (m_size >= m_capacity)
1967
{
1968
if (add_overflow_check(m_size, 1))
1969
container_abort("vector::push_back_value: vector too large\n");
1970
1971
increase_capacity(m_size + 1, true);
1972
}
1973
1974
scalar_type<T>::construct(m_p + m_size, obj);
1975
m_size++;
1976
}
1977
1978
// obj is explictly passed in by value, not ref
1979
inline bool try_push_back_value(T obj)
1980
{
1981
if (m_size >= m_capacity)
1982
{
1983
if (add_overflow_check(m_size, 1))
1984
return false;
1985
1986
if (!increase_capacity(m_size + 1, true, true))
1987
return false;
1988
}
1989
1990
scalar_type<T>::construct(m_p + m_size, obj);
1991
m_size++;
1992
1993
return true;
1994
}
1995
1996
template<typename... Args>
1997
BASISU_FORCE_INLINE void emplace_back(Args&&... args)
1998
{
1999
if (m_size >= m_capacity)
2000
{
2001
if (add_overflow_check(m_size, 1))
2002
container_abort("vector::enlarge: vector too large\n");
2003
2004
increase_capacity(m_size + 1, true);
2005
}
2006
2007
new ((void*)(m_p + m_size)) T(std::forward<Args>(args)...); // perfect forwarding
2008
m_size++;
2009
}
2010
2011
template<typename... Args>
2012
BASISU_FORCE_INLINE bool try_emplace_back(Args&&... args)
2013
{
2014
if (m_size >= m_capacity)
2015
{
2016
if (add_overflow_check(m_size, 1))
2017
return false;
2018
2019
if (!increase_capacity(m_size + 1, true, true))
2020
return false;
2021
}
2022
2023
new ((void*)(m_p + m_size)) T(std::forward<Args>(args)...); // perfect forwarding
2024
m_size++;
2025
2026
return true;
2027
}
2028
2029
inline void pop_back()
2030
{
2031
assert(m_size);
2032
2033
if (m_size)
2034
{
2035
m_size--;
2036
scalar_type<T>::destruct(&m_p[m_size]);
2037
}
2038
}
2039
2040
inline bool try_insert(size_t index, const T* p, size_t n)
2041
{
2042
assert(index <= m_size);
2043
2044
if (index > m_size)
2045
return false;
2046
2047
if (!n)
2048
return true;
2049
2050
const size_t orig_size = m_size;
2051
2052
if (add_overflow_check(m_size, n))
2053
return false;
2054
2055
if (!try_resize(m_size + n, true))
2056
return false;
2057
2058
const size_t num_to_move = orig_size - index;
2059
2060
if (BASISU_IS_BITWISE_COPYABLE(T))
2061
{
2062
// This overwrites the destination object bits, but bitwise copyable means we don't need to worry about destruction.
2063
memmove(m_p + index + n, m_p + index, sizeof(T) * num_to_move);
2064
}
2065
else
2066
{
2067
const T* pSrc = m_p + orig_size - 1;
2068
T* pDst = const_cast<T*>(pSrc) + n;
2069
2070
for (size_t i = 0; i < num_to_move; i++)
2071
{
2072
assert((uint64_t)(pDst - m_p) < (uint64_t)m_size);
2073
2074
*pDst = std::move(*pSrc);
2075
pDst--;
2076
pSrc--;
2077
}
2078
}
2079
2080
T* pDst = m_p + index;
2081
2082
if (BASISU_IS_BITWISE_COPYABLE(T))
2083
{
2084
// This copies in the new bits, overwriting the existing objects, which is OK for copyable types that don't need destruction.
2085
memcpy(pDst, p, sizeof(T) * n);
2086
}
2087
else
2088
{
2089
for (size_t i = 0; i < n; i++)
2090
{
2091
assert((uint64_t)(pDst - m_p) < (uint64_t)m_size);
2092
*pDst++ = *p++;
2093
}
2094
}
2095
2096
return true;
2097
}
2098
2099
inline void insert(size_t index, const T* p, size_t n)
2100
{
2101
if (!try_insert(index, p, n))
2102
container_abort("vector::insert() failed!\n");
2103
}
2104
2105
inline bool try_insert(T* p, const T& obj)
2106
{
2107
if (p < begin())
2108
{
2109
assert(0);
2110
return false;
2111
}
2112
2113
uint64_t ofs = p - begin();
2114
2115
if (ofs > m_size)
2116
{
2117
assert(0);
2118
return false;
2119
}
2120
2121
if ((size_t)ofs != ofs)
2122
{
2123
assert(0);
2124
return false;
2125
}
2126
2127
return try_insert((size_t)ofs, &obj, 1);
2128
}
2129
2130
inline void insert(T* p, const T& obj)
2131
{
2132
if (!try_insert(p, obj))
2133
container_abort("vector::insert() failed!\n");
2134
}
2135
2136
// push_front() isn't going to be very fast - it's only here for usability.
2137
inline void push_front(const T& obj)
2138
{
2139
insert(0, &obj, 1);
2140
}
2141
2142
inline bool try_push_front(const T& obj)
2143
{
2144
return try_insert(0, &obj, 1);
2145
}
2146
2147
vector& append(const vector& other)
2148
{
2149
if (other.m_size)
2150
insert(m_size, &other[0], other.m_size);
2151
return *this;
2152
}
2153
2154
bool try_append(const vector& other)
2155
{
2156
if (other.m_size)
2157
return try_insert(m_size, &other[0], other.m_size);
2158
2159
return true;
2160
}
2161
2162
vector& append(const T* p, size_t n)
2163
{
2164
if (n)
2165
insert(m_size, p, n);
2166
return *this;
2167
}
2168
2169
bool try_append(const T* p, size_t n)
2170
{
2171
if (n)
2172
return try_insert(m_size, p, n);
2173
2174
return true;
2175
}
2176
2177
inline bool erase(size_t start, size_t n)
2178
{
2179
if (add_overflow_check(start, n))
2180
{
2181
assert(0);
2182
return false;
2183
}
2184
2185
assert((start + n) <= m_size);
2186
2187
if ((start + n) > m_size)
2188
{
2189
assert(0);
2190
return false;
2191
}
2192
2193
if (!n)
2194
return true;
2195
2196
const size_t num_to_move = m_size - (start + n);
2197
2198
T* pDst = m_p + start;
2199
2200
const T* pSrc = m_p + start + n;
2201
2202
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T))
2203
{
2204
// This test is overly cautious.
2205
if ((!BASISU_IS_BITWISE_COPYABLE(T)) || (BASISU_HAS_DESTRUCTOR(T)))
2206
{
2207
// Type has been marked explictly as bitwise movable, which means we can move them around but they may need to be destructed.
2208
// First destroy the erased objects.
2209
scalar_type<T>::destruct_array(pDst, n);
2210
}
2211
2212
// Copy "down" the objects to preserve, filling in the empty slots.
2213
memmove((void *)pDst, pSrc, num_to_move * sizeof(T));
2214
}
2215
else
2216
{
2217
// Type is not bitwise copyable or movable.
2218
// Move them down one at a time by using the equals operator, and destroying anything that's left over at the end.
2219
T* pDst_end = pDst + num_to_move;
2220
2221
while (pDst != pDst_end)
2222
{
2223
*pDst = std::move(*pSrc);
2224
2225
++pDst;
2226
++pSrc;
2227
}
2228
2229
scalar_type<T>::destruct_array(pDst_end, n);
2230
}
2231
2232
m_size -= n;
2233
2234
return true;
2235
}
2236
2237
inline bool erase_index(size_t index)
2238
{
2239
return erase(index, 1);
2240
}
2241
2242
inline bool erase(T* p)
2243
{
2244
assert((p >= m_p) && (p < (m_p + m_size)));
2245
2246
if (p < m_p)
2247
return false;
2248
2249
return erase_index(static_cast<size_t>(p - m_p));
2250
}
2251
2252
inline bool erase(T* pFirst, T* pEnd)
2253
{
2254
assert(pFirst <= pEnd);
2255
assert(pFirst >= begin() && pFirst <= end());
2256
assert(pEnd >= begin() && pEnd <= end());
2257
2258
if ((pFirst < begin()) || (pEnd < pFirst))
2259
{
2260
assert(0);
2261
return false;
2262
}
2263
2264
uint64_t ofs = pFirst - begin();
2265
if ((size_t)ofs != ofs)
2266
{
2267
assert(0);
2268
return false;
2269
}
2270
2271
uint64_t n = pEnd - pFirst;
2272
if ((size_t)n != n)
2273
{
2274
assert(0);
2275
return false;
2276
}
2277
2278
return erase((size_t)ofs, (size_t)n);
2279
}
2280
2281
bool erase_unordered(size_t index)
2282
{
2283
if (index >= m_size)
2284
{
2285
assert(0);
2286
return false;
2287
}
2288
2289
if ((index + 1) < m_size)
2290
{
2291
(*this)[index] = std::move(back());
2292
}
2293
2294
pop_back();
2295
return true;
2296
}
2297
2298
inline bool operator== (const vector& rhs) const
2299
{
2300
if (m_size != rhs.m_size)
2301
return false;
2302
else if (m_size)
2303
{
2304
if (scalar_type<T>::cFlag)
2305
return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
2306
else
2307
{
2308
const T* pSrc = m_p;
2309
const T* pDst = rhs.m_p;
2310
for (size_t i = m_size; i; i--)
2311
if (!(*pSrc++ == *pDst++))
2312
return false;
2313
}
2314
}
2315
2316
return true;
2317
}
2318
2319
inline bool operator< (const vector& rhs) const
2320
{
2321
const size_t min_size = helpers::minimum(m_size, rhs.m_size);
2322
2323
const T* pSrc = m_p;
2324
const T* pSrc_end = m_p + min_size;
2325
const T* pDst = rhs.m_p;
2326
2327
while ((pSrc < pSrc_end) && (*pSrc == *pDst))
2328
{
2329
pSrc++;
2330
pDst++;
2331
}
2332
2333
if (pSrc < pSrc_end)
2334
return *pSrc < *pDst;
2335
2336
return m_size < rhs.m_size;
2337
}
2338
2339
inline void swap(vector& other)
2340
{
2341
std::swap(m_p, other.m_p);
2342
std::swap(m_size, other.m_size);
2343
std::swap(m_capacity, other.m_capacity);
2344
}
2345
2346
inline void sort()
2347
{
2348
std::sort(begin(), end());
2349
}
2350
2351
inline void unique()
2352
{
2353
if (!empty())
2354
{
2355
sort();
2356
2357
resize(std::unique(begin(), end()) - begin());
2358
}
2359
}
2360
2361
inline void reverse()
2362
{
2363
const size_t j = m_size >> 1;
2364
2365
for (size_t i = 0; i < j; i++)
2366
std::swap(m_p[i], m_p[m_size - 1 - i]);
2367
}
2368
2369
inline bool find(const T& key, size_t &idx) const
2370
{
2371
idx = 0;
2372
2373
const T* p = m_p;
2374
const T* p_end = m_p + m_size;
2375
2376
size_t index = 0;
2377
2378
while (p != p_end)
2379
{
2380
if (key == *p)
2381
{
2382
idx = index;
2383
return true;
2384
}
2385
2386
p++;
2387
index++;
2388
}
2389
2390
return false;
2391
}
2392
2393
inline bool find_sorted(const T& key, size_t& idx) const
2394
{
2395
idx = 0;
2396
2397
if (!m_size)
2398
return false;
2399
2400
// Inclusive range
2401
size_t low = 0, high = m_size - 1;
2402
2403
while (low <= high)
2404
{
2405
size_t mid = (size_t)(((uint64_t)low + (uint64_t)high) >> 1);
2406
2407
const T* pTrial_key = m_p + mid;
2408
2409
// Sanity check comparison operator
2410
assert(!((*pTrial_key < key) && (key < *pTrial_key)));
2411
2412
if (*pTrial_key < key)
2413
{
2414
if (add_overflow_check(mid, 1))
2415
break;
2416
2417
low = mid + 1;
2418
}
2419
else if (key < *pTrial_key)
2420
{
2421
if (!mid)
2422
break;
2423
2424
high = mid - 1;
2425
}
2426
else
2427
{
2428
idx = mid;
2429
return true;
2430
}
2431
}
2432
2433
return false;
2434
}
2435
2436
inline size_t count_occurences(const T& key) const
2437
{
2438
size_t c = 0;
2439
2440
const T* p = m_p;
2441
const T* p_end = m_p + m_size;
2442
2443
while (p != p_end)
2444
{
2445
if (key == *p)
2446
c++;
2447
2448
p++;
2449
}
2450
2451
return c;
2452
}
2453
2454
inline void set_all(const T& o)
2455
{
2456
if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
2457
{
2458
#if defined(__GNUC__) && !defined(__clang__)
2459
#pragma GCC diagnostic push
2460
#pragma GCC diagnostic ignored "-Wclass-memaccess"
2461
#endif
2462
memset(m_p, *reinterpret_cast<const uint8_t*>(&o), m_size);
2463
#if defined(__GNUC__) && !defined(__clang__)
2464
#pragma GCC diagnostic pop
2465
#endif
2466
}
2467
else
2468
{
2469
T* pDst = m_p;
2470
T* pDst_end = pDst + m_size;
2471
while (pDst != pDst_end)
2472
*pDst++ = o;
2473
}
2474
}
2475
2476
// Caller assumes ownership of the heap block associated with the container. Container is cleared.
2477
// Caller must use free() on the returned pointer.
2478
inline void* assume_ownership()
2479
{
2480
T* p = m_p;
2481
m_p = nullptr;
2482
m_size = 0;
2483
m_capacity = 0;
2484
return p;
2485
}
2486
2487
// Caller is granting ownership of the indicated heap block.
2488
// Block must have size constructed elements, and have enough room for capacity elements.
2489
// The block must have been allocated using malloc().
2490
// Important: This method is used in Basis Universal. If you change how this container allocates memory, you'll need to change any users of this method.
2491
inline bool grant_ownership(T* p, size_t size, size_t capacity)
2492
{
2493
// To prevent the caller from obviously shooting themselves in the foot.
2494
if (((p + capacity) > m_p) && (p < (m_p + m_capacity)))
2495
{
2496
// Can grant ownership of a block inside the container itself!
2497
assert(0);
2498
return false;
2499
}
2500
2501
if (size > capacity)
2502
{
2503
assert(0);
2504
return false;
2505
}
2506
2507
if (!p)
2508
{
2509
if (capacity)
2510
{
2511
assert(0);
2512
return false;
2513
}
2514
}
2515
else if (!capacity)
2516
{
2517
assert(0);
2518
return false;
2519
}
2520
2521
clear();
2522
m_p = p;
2523
m_size = size;
2524
m_capacity = capacity;
2525
return true;
2526
}
2527
2528
readable_span<T> get_readable_span() const
2529
{
2530
return readable_span<T>(m_p, m_size);
2531
}
2532
2533
writable_span<T> get_writable_span()
2534
{
2535
return writable_span<T>(m_p, m_size);
2536
}
2537
2538
private:
2539
T* m_p;
2540
size_t m_size; // the number of constructed objects
2541
size_t m_capacity; // the size of the allocation
2542
2543
template<typename Q> struct is_vector { enum { cFlag = false }; };
2544
template<typename Q> struct is_vector< vector<Q> > { enum { cFlag = true }; };
2545
2546
static void object_mover(void* pDst_void, void* pSrc_void, size_t num)
2547
{
2548
T* pSrc = static_cast<T*>(pSrc_void);
2549
T* const pSrc_end = pSrc + num;
2550
T* pDst = static_cast<T*>(pDst_void);
2551
2552
while (pSrc != pSrc_end)
2553
{
2554
new ((void*)(pDst)) T(std::move(*pSrc));
2555
scalar_type<T>::destruct(pSrc);
2556
2557
++pSrc;
2558
++pDst;
2559
}
2560
}
2561
2562
inline bool increase_capacity(size_t min_new_capacity, bool grow_hint, bool nofail = false)
2563
{
2564
return reinterpret_cast<elemental_vector*>(this)->increase_capacity(
2565
min_new_capacity, grow_hint, sizeof(T),
2566
(BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) || (is_vector<T>::cFlag)) ? nullptr : object_mover, nofail);
2567
}
2568
};
2569
2570
template<typename T> struct bitwise_movable< vector<T> > { enum { cFlag = true }; };
2571
2572
// Hash map
2573
// rg TODO 9/8/2024: I've upgraded this class to support 64-bit size_t, and it needs a lot more testing.
2574
2575
const uint32_t SIZE_T_BITS = sizeof(size_t) * 8U;
2576
2577
inline uint32_t safe_shift_left(uint32_t v, uint32_t l)
2578
{
2579
return (l < 32U) ? (v << l) : 0;
2580
}
2581
2582
inline uint64_t safe_shift_left(uint64_t v, uint32_t l)
2583
{
2584
return (l < 64U) ? (v << l) : 0;
2585
}
2586
2587
template <typename T>
2588
struct hasher
2589
{
2590
inline size_t operator() (const T& key) const { return static_cast<size_t>(key); }
2591
};
2592
2593
template <typename T>
2594
struct equal_to
2595
{
2596
inline bool operator()(const T& a, const T& b) const { return a == b; }
2597
};
2598
2599
// Important: The Hasher and Equals objects must be bitwise movable!
2600
template<typename Key, typename Value = empty_type, typename Hasher = hasher<Key>, typename Equals = equal_to<Key> >
2601
class hash_map
2602
{
2603
public:
2604
class iterator;
2605
class const_iterator;
2606
2607
private:
2608
friend class iterator;
2609
friend class const_iterator;
2610
2611
enum state
2612
{
2613
cStateInvalid = 0,
2614
cStateValid = 1
2615
};
2616
2617
enum
2618
{
2619
cMinHashSize = 4U
2620
};
2621
2622
public:
2623
typedef hash_map<Key, Value, Hasher, Equals> hash_map_type;
2624
typedef std::pair<Key, Value> value_type;
2625
typedef Key key_type;
2626
typedef Value referent_type;
2627
typedef Hasher hasher_type;
2628
typedef Equals equals_type;
2629
2630
hash_map() :
2631
m_num_valid(0),
2632
m_grow_threshold(0),
2633
m_hash_shift(SIZE_T_BITS)
2634
{
2635
static_assert((SIZE_T_BITS == 32) || (SIZE_T_BITS == 64), "SIZE_T_BITS must be 32 or 64");
2636
}
2637
2638
hash_map(const hash_map& other) :
2639
m_values(other.m_values),
2640
m_num_valid(other.m_num_valid),
2641
m_grow_threshold(other.m_grow_threshold),
2642
m_hash_shift(other.m_hash_shift),
2643
m_hasher(other.m_hasher),
2644
m_equals(other.m_equals)
2645
{
2646
static_assert((SIZE_T_BITS == 32) || (SIZE_T_BITS == 64), "SIZE_T_BITS must be 32 or 64");
2647
}
2648
2649
hash_map(hash_map&& other) :
2650
m_values(std::move(other.m_values)),
2651
m_num_valid(other.m_num_valid),
2652
m_grow_threshold(other.m_grow_threshold),
2653
m_hash_shift(other.m_hash_shift),
2654
m_hasher(std::move(other.m_hasher)),
2655
m_equals(std::move(other.m_equals))
2656
{
2657
static_assert((SIZE_T_BITS == 32) || (SIZE_T_BITS == 64), "SIZE_T_BITS must be 32 or 64");
2658
2659
other.m_hash_shift = SIZE_T_BITS;
2660
other.m_num_valid = 0;
2661
other.m_grow_threshold = 0;
2662
}
2663
2664
hash_map& operator= (const hash_map& other)
2665
{
2666
if (this == &other)
2667
return *this;
2668
2669
clear();
2670
2671
m_values = other.m_values;
2672
m_hash_shift = other.m_hash_shift;
2673
m_num_valid = other.m_num_valid;
2674
m_grow_threshold = other.m_grow_threshold;
2675
m_hasher = other.m_hasher;
2676
m_equals = other.m_equals;
2677
2678
return *this;
2679
}
2680
2681
hash_map& operator= (hash_map&& other)
2682
{
2683
if (this == &other)
2684
return *this;
2685
2686
clear();
2687
2688
m_values = std::move(other.m_values);
2689
m_hash_shift = other.m_hash_shift;
2690
m_num_valid = other.m_num_valid;
2691
m_grow_threshold = other.m_grow_threshold;
2692
m_hasher = std::move(other.m_hasher);
2693
m_equals = std::move(other.m_equals);
2694
2695
other.m_hash_shift = SIZE_T_BITS;
2696
other.m_num_valid = 0;
2697
other.m_grow_threshold = 0;
2698
2699
return *this;
2700
}
2701
2702
inline ~hash_map()
2703
{
2704
clear();
2705
}
2706
2707
inline const Equals& get_equals() const { return m_equals; }
2708
inline Equals& get_equals() { return m_equals; }
2709
inline void set_equals(const Equals& equals) { m_equals = equals; }
2710
2711
inline const Hasher& get_hasher() const { return m_hasher; }
2712
inline Hasher& get_hasher() { return m_hasher; }
2713
inline void set_hasher(const Hasher& hasher) { m_hasher = hasher; }
2714
2715
inline void clear()
2716
{
2717
if (m_values.empty())
2718
return;
2719
2720
if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
2721
{
2722
node* p = &get_node(0);
2723
node* p_end = p + m_values.size();
2724
2725
size_t num_remaining = m_num_valid;
2726
while (p != p_end)
2727
{
2728
if (p->state)
2729
{
2730
destruct_value_type(p);
2731
num_remaining--;
2732
if (!num_remaining)
2733
break;
2734
}
2735
2736
p++;
2737
}
2738
}
2739
2740
m_values.clear_no_destruction();
2741
2742
m_hash_shift = SIZE_T_BITS;
2743
m_num_valid = 0;
2744
m_grow_threshold = 0;
2745
}
2746
2747
inline void reset()
2748
{
2749
if (!m_num_valid)
2750
return;
2751
2752
if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
2753
{
2754
node* p = &get_node(0);
2755
node* p_end = p + m_values.size();
2756
2757
size_t num_remaining = m_num_valid;
2758
while (p != p_end)
2759
{
2760
if (p->state)
2761
{
2762
destruct_value_type(p);
2763
p->state = cStateInvalid;
2764
2765
num_remaining--;
2766
if (!num_remaining)
2767
break;
2768
}
2769
2770
p++;
2771
}
2772
}
2773
else if (sizeof(node) <= 16)
2774
{
2775
memset(&m_values[0], 0, m_values.size_in_bytes());
2776
}
2777
else
2778
{
2779
node* p = &get_node(0);
2780
node* p_end = p + m_values.size();
2781
2782
size_t num_remaining = m_num_valid;
2783
while (p != p_end)
2784
{
2785
if (p->state)
2786
{
2787
p->state = cStateInvalid;
2788
2789
num_remaining--;
2790
if (!num_remaining)
2791
break;
2792
}
2793
2794
p++;
2795
}
2796
}
2797
2798
m_num_valid = 0;
2799
}
2800
2801
inline size_t size()
2802
{
2803
return m_num_valid;
2804
}
2805
2806
inline size_t get_table_size()
2807
{
2808
return m_values.size();
2809
}
2810
2811
inline bool empty()
2812
{
2813
return !m_num_valid;
2814
}
2815
2816
inline bool reserve(size_t new_capacity)
2817
{
2818
if (!new_capacity)
2819
return true;
2820
2821
uint64_t new_hash_size = new_capacity;
2822
2823
new_hash_size = new_hash_size * 2ULL;
2824
2825
if (!helpers::is_power_of_2(new_hash_size))
2826
new_hash_size = helpers::next_pow2(new_hash_size);
2827
2828
new_hash_size = helpers::maximum<uint64_t>(cMinHashSize, new_hash_size);
2829
2830
if (!can_fit_into_size_t(new_hash_size))
2831
{
2832
assert(0);
2833
return false;
2834
}
2835
2836
assert(new_hash_size >= new_capacity);
2837
2838
if (new_hash_size <= m_values.size())
2839
return true;
2840
2841
return rehash((size_t)new_hash_size);
2842
}
2843
2844
class iterator
2845
{
2846
friend class hash_map<Key, Value, Hasher, Equals>;
2847
friend class hash_map<Key, Value, Hasher, Equals>::const_iterator;
2848
2849
public:
2850
inline iterator() : m_pTable(nullptr), m_index(0) { }
2851
inline iterator(hash_map_type& table, size_t index) : m_pTable(&table), m_index(index) { }
2852
inline iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
2853
2854
inline iterator& operator= (const iterator& other)
2855
{
2856
m_pTable = other.m_pTable;
2857
m_index = other.m_index;
2858
return *this;
2859
}
2860
2861
// post-increment
2862
inline iterator operator++(int)
2863
{
2864
iterator result(*this);
2865
++*this;
2866
return result;
2867
}
2868
2869
// pre-increment
2870
inline iterator& operator++()
2871
{
2872
probe();
2873
return *this;
2874
}
2875
2876
inline value_type& operator*() const { return *get_cur(); }
2877
inline value_type* operator->() const { return get_cur(); }
2878
2879
inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2880
inline bool operator != (const iterator& b) const { return !(*this == b); }
2881
inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2882
inline bool operator != (const const_iterator& b) const { return !(*this == b); }
2883
2884
private:
2885
hash_map_type* m_pTable;
2886
size_t m_index;
2887
2888
inline value_type* get_cur() const
2889
{
2890
assert(m_pTable && (m_index < m_pTable->m_values.size()));
2891
assert(m_pTable->get_node_state(m_index) == cStateValid);
2892
2893
return &m_pTable->get_node(m_index);
2894
}
2895
2896
inline void probe()
2897
{
2898
assert(m_pTable);
2899
m_index = m_pTable->find_next(m_index);
2900
}
2901
};
2902
2903
class const_iterator
2904
{
2905
friend class hash_map<Key, Value, Hasher, Equals>;
2906
friend class hash_map<Key, Value, Hasher, Equals>::iterator;
2907
2908
public:
2909
inline const_iterator() : m_pTable(nullptr), m_index(0) { }
2910
inline const_iterator(const hash_map_type& table, size_t index) : m_pTable(&table), m_index(index) { }
2911
inline const_iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
2912
inline const_iterator(const const_iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
2913
2914
inline const_iterator& operator= (const const_iterator& other)
2915
{
2916
m_pTable = other.m_pTable;
2917
m_index = other.m_index;
2918
return *this;
2919
}
2920
2921
inline const_iterator& operator= (const iterator& other)
2922
{
2923
m_pTable = other.m_pTable;
2924
m_index = other.m_index;
2925
return *this;
2926
}
2927
2928
// post-increment
2929
inline const_iterator operator++(int)
2930
{
2931
const_iterator result(*this);
2932
++*this;
2933
return result;
2934
}
2935
2936
// pre-increment
2937
inline const_iterator& operator++()
2938
{
2939
probe();
2940
return *this;
2941
}
2942
2943
inline const value_type& operator*() const { return *get_cur(); }
2944
inline const value_type* operator->() const { return get_cur(); }
2945
2946
inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2947
inline bool operator != (const const_iterator& b) const { return !(*this == b); }
2948
inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2949
inline bool operator != (const iterator& b) const { return !(*this == b); }
2950
2951
private:
2952
const hash_map_type* m_pTable;
2953
size_t m_index;
2954
2955
inline const value_type* get_cur() const
2956
{
2957
assert(m_pTable && (m_index < m_pTable->m_values.size()));
2958
assert(m_pTable->get_node_state(m_index) == cStateValid);
2959
2960
return &m_pTable->get_node(m_index);
2961
}
2962
2963
inline void probe()
2964
{
2965
assert(m_pTable);
2966
m_index = m_pTable->find_next(m_index);
2967
}
2968
};
2969
2970
inline const_iterator begin() const
2971
{
2972
if (!m_num_valid)
2973
return end();
2974
2975
return const_iterator(*this, find_next(std::numeric_limits<size_t>::max()));
2976
}
2977
2978
inline const_iterator end() const
2979
{
2980
return const_iterator(*this, m_values.size());
2981
}
2982
2983
inline iterator begin()
2984
{
2985
if (!m_num_valid)
2986
return end();
2987
2988
return iterator(*this, find_next(std::numeric_limits<size_t>::max()));
2989
}
2990
2991
inline iterator end()
2992
{
2993
return iterator(*this, m_values.size());
2994
}
2995
2996
// insert_result.first will always point to inserted key/value (or the already existing key/value).
2997
// insert_result.second will be true if a new key/value was inserted, or false if the key already existed (in which case first will point to the already existing value).
2998
typedef std::pair<iterator, bool> insert_result;
2999
3000
inline insert_result insert(const Key& k, const Value& v = Value())
3001
{
3002
insert_result result;
3003
if (!insert_no_grow(result, k, v))
3004
{
3005
if (!try_grow())
3006
container_abort("hash_map::try_grow() failed");
3007
3008
// This must succeed.
3009
if (!insert_no_grow(result, k, v))
3010
container_abort("hash_map::insert() failed");
3011
}
3012
3013
return result;
3014
}
3015
3016
inline bool try_insert(insert_result& result, const Key& k, const Value& v = Value())
3017
{
3018
if (!insert_no_grow(result, k, v))
3019
{
3020
if (!try_grow())
3021
return false;
3022
3023
if (!insert_no_grow(result, k, v))
3024
return false;
3025
}
3026
3027
return true;
3028
}
3029
3030
inline insert_result insert(Key&& k, Value&& v = Value())
3031
{
3032
insert_result result;
3033
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3034
{
3035
if (!try_grow())
3036
container_abort("hash_map::try_grow() failed");
3037
3038
// This must succeed.
3039
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3040
container_abort("hash_map::insert() failed");
3041
}
3042
3043
return result;
3044
}
3045
3046
inline bool try_insert(insert_result& result, Key&& k, Value&& v = Value())
3047
{
3048
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3049
{
3050
if (!try_grow())
3051
return false;
3052
3053
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3054
return false;
3055
}
3056
3057
return true;
3058
}
3059
3060
inline insert_result insert(const value_type& v)
3061
{
3062
return insert(v.first, v.second);
3063
}
3064
3065
inline bool try_insert(insert_result& result, const value_type& v)
3066
{
3067
return try_insert(result, v.first, v.second);
3068
}
3069
3070
inline insert_result insert(value_type&& v)
3071
{
3072
return insert(std::move(v.first), std::move(v.second));
3073
}
3074
3075
inline bool try_insert(insert_result& result, value_type&& v)
3076
{
3077
return try_insert(result, std::move(v.first), std::move(v.second));
3078
}
3079
3080
inline const_iterator find(const Key& k) const
3081
{
3082
return const_iterator(*this, find_index(k));
3083
}
3084
3085
inline iterator find(const Key& k)
3086
{
3087
return iterator(*this, find_index(k));
3088
}
3089
3090
inline bool contains(const Key& k) const
3091
{
3092
const size_t idx = find_index(k);
3093
return idx != m_values.size();
3094
}
3095
3096
inline bool erase(const Key& k)
3097
{
3098
size_t i = find_index(k);
3099
3100
if (i >= m_values.size())
3101
return false;
3102
3103
node* pDst = &get_node(i);
3104
destruct_value_type(pDst);
3105
pDst->state = cStateInvalid;
3106
3107
m_num_valid--;
3108
3109
for (; ; )
3110
{
3111
size_t r, j = i;
3112
3113
node* pSrc = pDst;
3114
3115
do
3116
{
3117
if (!i)
3118
{
3119
i = m_values.size() - 1;
3120
pSrc = &get_node(i);
3121
}
3122
else
3123
{
3124
i--;
3125
pSrc--;
3126
}
3127
3128
if (!pSrc->state)
3129
return true;
3130
3131
r = hash_key(pSrc->first);
3132
3133
} while ((i <= r && r < j) || (r < j && j < i) || (j < i && i <= r));
3134
3135
move_node(pDst, pSrc);
3136
3137
pDst = pSrc;
3138
}
3139
}
3140
3141
inline void swap(hash_map_type& other)
3142
{
3143
m_values.swap(other.m_values);
3144
std::swap(m_hash_shift, other.m_hash_shift);
3145
std::swap(m_num_valid, other.m_num_valid);
3146
std::swap(m_grow_threshold, other.m_grow_threshold);
3147
std::swap(m_hasher, other.m_hasher);
3148
std::swap(m_equals, other.m_equals);
3149
}
3150
3151
private:
3152
struct node : public value_type
3153
{
3154
uint8_t state;
3155
};
3156
3157
static inline void construct_value_type(value_type* pDst, const Key& k, const Value& v)
3158
{
3159
if (BASISU_IS_BITWISE_COPYABLE(Key))
3160
memcpy(&pDst->first, &k, sizeof(Key));
3161
else
3162
scalar_type<Key>::construct(&pDst->first, k);
3163
3164
if (BASISU_IS_BITWISE_COPYABLE(Value))
3165
memcpy(&pDst->second, &v, sizeof(Value));
3166
else
3167
scalar_type<Value>::construct(&pDst->second, v);
3168
}
3169
3170
static inline void construct_value_type(value_type* pDst, const value_type* pSrc)
3171
{
3172
if ((BASISU_IS_BITWISE_COPYABLE(Key)) && (BASISU_IS_BITWISE_COPYABLE(Value)))
3173
{
3174
memcpy(pDst, pSrc, sizeof(value_type));
3175
}
3176
else
3177
{
3178
if (BASISU_IS_BITWISE_COPYABLE(Key))
3179
memcpy(&pDst->first, &pSrc->first, sizeof(Key));
3180
else
3181
scalar_type<Key>::construct(&pDst->first, pSrc->first);
3182
3183
if (BASISU_IS_BITWISE_COPYABLE(Value))
3184
memcpy(&pDst->second, &pSrc->second, sizeof(Value));
3185
else
3186
scalar_type<Value>::construct(&pDst->second, pSrc->second);
3187
}
3188
}
3189
3190
static inline void destruct_value_type(value_type* p)
3191
{
3192
scalar_type<Key>::destruct(&p->first);
3193
scalar_type<Value>::destruct(&p->second);
3194
}
3195
3196
// Moves nodes *pSrc to *pDst efficiently from one hashmap to another.
3197
// pDst should NOT be constructed on entry.
3198
static inline void move_node(node* pDst, node* pSrc, bool update_src_state = true)
3199
{
3200
assert(!pDst->state);
3201
3202
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key) && BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
3203
{
3204
memcpy(pDst, pSrc, sizeof(node));
3205
3206
assert(pDst->state == cStateValid);
3207
}
3208
else
3209
{
3210
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key))
3211
memcpy(&pDst->first, &pSrc->first, sizeof(Key));
3212
else
3213
{
3214
new ((void*)&pDst->first) Key(std::move(pSrc->first));
3215
scalar_type<Key>::destruct(&pSrc->first);
3216
}
3217
3218
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
3219
memcpy(&pDst->second, &pSrc->second, sizeof(Value));
3220
else
3221
{
3222
new ((void*)&pDst->second) Value(std::move(pSrc->second));
3223
scalar_type<Value>::destruct(&pSrc->second);
3224
}
3225
3226
pDst->state = cStateValid;
3227
}
3228
3229
if (update_src_state)
3230
pSrc->state = cStateInvalid;
3231
}
3232
3233
struct raw_node
3234
{
3235
inline raw_node()
3236
{
3237
node* p = reinterpret_cast<node*>(this);
3238
p->state = cStateInvalid;
3239
}
3240
3241
// In practice, this should never be called (right?). We manage destruction ourselves.
3242
inline ~raw_node()
3243
{
3244
node* p = reinterpret_cast<node*>(this);
3245
if (p->state)
3246
hash_map_type::destruct_value_type(p);
3247
}
3248
3249
inline raw_node(const raw_node& other)
3250
{
3251
node* pDst = reinterpret_cast<node*>(this);
3252
const node* pSrc = reinterpret_cast<const node*>(&other);
3253
3254
if (pSrc->state)
3255
{
3256
hash_map_type::construct_value_type(pDst, pSrc);
3257
pDst->state = cStateValid;
3258
}
3259
else
3260
pDst->state = cStateInvalid;
3261
}
3262
3263
inline raw_node& operator= (const raw_node& rhs)
3264
{
3265
if (this == &rhs)
3266
return *this;
3267
3268
node* pDst = reinterpret_cast<node*>(this);
3269
const node* pSrc = reinterpret_cast<const node*>(&rhs);
3270
3271
if (pSrc->state)
3272
{
3273
if (pDst->state)
3274
{
3275
pDst->first = pSrc->first;
3276
pDst->second = pSrc->second;
3277
}
3278
else
3279
{
3280
hash_map_type::construct_value_type(pDst, pSrc);
3281
pDst->state = cStateValid;
3282
}
3283
}
3284
else if (pDst->state)
3285
{
3286
hash_map_type::destruct_value_type(pDst);
3287
pDst->state = cStateInvalid;
3288
}
3289
3290
return *this;
3291
}
3292
3293
uint8_t m_bits[sizeof(node)];
3294
};
3295
3296
typedef basisu::vector<raw_node> node_vector;
3297
3298
node_vector m_values;
3299
3300
size_t m_num_valid;
3301
size_t m_grow_threshold;
3302
3303
uint32_t m_hash_shift;
3304
3305
Hasher m_hasher;
3306
Equals m_equals;
3307
3308
inline size_t hash_key(const Key& k) const
3309
{
3310
assert((safe_shift_left(static_cast<uint64_t>(1), (SIZE_T_BITS - m_hash_shift))) == m_values.size());
3311
3312
// Fibonacci hashing
3313
if (SIZE_T_BITS == 32)
3314
{
3315
assert(m_hash_shift != 32);
3316
3317
uint32_t hash = static_cast<uint32_t>(m_hasher(k));
3318
hash = (2654435769U * hash) >> m_hash_shift;
3319
3320
assert(hash < m_values.size());
3321
return (size_t)hash;
3322
}
3323
else
3324
{
3325
assert(m_hash_shift != 64);
3326
3327
uint64_t hash = static_cast<uint64_t>(m_hasher(k));
3328
hash = (0x9E3779B97F4A7C15ULL * hash) >> m_hash_shift;
3329
3330
assert(hash < m_values.size());
3331
return (size_t)hash;
3332
}
3333
}
3334
3335
inline const node& get_node(size_t index) const
3336
{
3337
return *reinterpret_cast<const node*>(&m_values[index]);
3338
}
3339
3340
inline node& get_node(size_t index)
3341
{
3342
return *reinterpret_cast<node*>(&m_values[index]);
3343
}
3344
3345
inline state get_node_state(size_t index) const
3346
{
3347
return static_cast<state>(get_node(index).state);
3348
}
3349
3350
inline void set_node_state(size_t index, bool valid)
3351
{
3352
get_node(index).state = valid;
3353
}
3354
3355
inline bool try_grow()
3356
{
3357
uint64_t n = m_values.size() * 2ULL;
3358
3359
if (!helpers::is_power_of_2(n))
3360
n = helpers::next_pow2(n);
3361
3362
if (!can_fit_into_size_t(n))
3363
{
3364
assert(0);
3365
return false;
3366
}
3367
3368
return rehash(helpers::maximum<size_t>(cMinHashSize, (size_t)n));
3369
}
3370
3371
// new_hash_size must be a power of 2.
3372
inline bool rehash(size_t new_hash_size)
3373
{
3374
if (!helpers::is_power_of_2((uint64_t)new_hash_size))
3375
{
3376
assert(0);
3377
return false;
3378
}
3379
3380
if (new_hash_size < m_num_valid)
3381
{
3382
assert(0);
3383
return false;
3384
}
3385
3386
if (new_hash_size == m_values.size())
3387
return true;
3388
3389
hash_map new_map;
3390
if (!new_map.m_values.try_resize(new_hash_size))
3391
return false;
3392
3393
new_map.m_hash_shift = SIZE_T_BITS - helpers::floor_log2i((uint64_t)new_hash_size);
3394
assert(new_hash_size == safe_shift_left(static_cast<uint64_t>(1), SIZE_T_BITS - new_map.m_hash_shift));
3395
3396
new_map.m_grow_threshold = std::numeric_limits<size_t>::max();
3397
3398
node* pNode = reinterpret_cast<node*>(m_values.begin());
3399
node* pNode_end = pNode + m_values.size();
3400
3401
while (pNode != pNode_end)
3402
{
3403
if (pNode->state)
3404
{
3405
new_map.move_into(pNode);
3406
3407
if (new_map.m_num_valid == m_num_valid)
3408
break;
3409
}
3410
3411
pNode++;
3412
}
3413
3414
new_map.m_grow_threshold = new_hash_size >> 1U;
3415
if (new_hash_size & 1)
3416
new_map.m_grow_threshold++;
3417
3418
m_values.clear_no_destruction();
3419
m_hash_shift = SIZE_T_BITS;
3420
3421
swap(new_map);
3422
3423
return true;
3424
}
3425
3426
inline size_t find_next(size_t index) const
3427
{
3428
index++;
3429
3430
if (index >= m_values.size())
3431
return index;
3432
3433
const node* pNode = &get_node(index);
3434
3435
for (; ; )
3436
{
3437
if (pNode->state)
3438
break;
3439
3440
if (++index >= m_values.size())
3441
break;
3442
3443
pNode++;
3444
}
3445
3446
return index;
3447
}
3448
3449
inline size_t find_index(const Key& k) const
3450
{
3451
if (m_num_valid)
3452
{
3453
size_t index = hash_key(k);
3454
const node* pNode = &get_node(index);
3455
3456
if (pNode->state)
3457
{
3458
if (m_equals(pNode->first, k))
3459
return index;
3460
3461
const size_t orig_index = index;
3462
3463
for (; ; )
3464
{
3465
if (!index)
3466
{
3467
index = m_values.size() - 1;
3468
pNode = &get_node(index);
3469
}
3470
else
3471
{
3472
index--;
3473
pNode--;
3474
}
3475
3476
if (index == orig_index)
3477
break;
3478
3479
if (!pNode->state)
3480
break;
3481
3482
if (m_equals(pNode->first, k))
3483
return index;
3484
}
3485
}
3486
}
3487
3488
return m_values.size();
3489
}
3490
3491
inline bool insert_no_grow(insert_result& result, const Key& k, const Value& v)
3492
{
3493
if (!m_values.size())
3494
return false;
3495
3496
size_t index = hash_key(k);
3497
node* pNode = &get_node(index);
3498
3499
if (pNode->state)
3500
{
3501
if (m_equals(pNode->first, k))
3502
{
3503
result.first = iterator(*this, index);
3504
result.second = false;
3505
return true;
3506
}
3507
3508
const size_t orig_index = index;
3509
3510
for (; ; )
3511
{
3512
if (!index)
3513
{
3514
index = m_values.size() - 1;
3515
pNode = &get_node(index);
3516
}
3517
else
3518
{
3519
index--;
3520
pNode--;
3521
}
3522
3523
if (orig_index == index)
3524
return false;
3525
3526
if (!pNode->state)
3527
break;
3528
3529
if (m_equals(pNode->first, k))
3530
{
3531
result.first = iterator(*this, index);
3532
result.second = false;
3533
return true;
3534
}
3535
}
3536
}
3537
3538
if (m_num_valid >= m_grow_threshold)
3539
return false;
3540
3541
construct_value_type(pNode, k, v);
3542
3543
pNode->state = cStateValid;
3544
3545
m_num_valid++;
3546
assert(m_num_valid <= m_values.size());
3547
3548
result.first = iterator(*this, index);
3549
result.second = true;
3550
3551
return true;
3552
}
3553
3554
// Move user supplied key/value into a node.
3555
static inline void move_value_type(value_type* pDst, Key&& k, Value&& v)
3556
{
3557
// Not checking for is MOVABLE because the caller could later destruct k and/or v (what state do we set them to?)
3558
if (BASISU_IS_BITWISE_COPYABLE(Key))
3559
{
3560
memcpy(&pDst->first, &k, sizeof(Key));
3561
}
3562
else
3563
{
3564
new ((void*)&pDst->first) Key(std::move(k));
3565
// No destruction - user will do that (we don't own k).
3566
}
3567
3568
if (BASISU_IS_BITWISE_COPYABLE(Value))
3569
{
3570
memcpy(&pDst->second, &v, sizeof(Value));
3571
}
3572
else
3573
{
3574
new ((void*)&pDst->second) Value(std::move(v));
3575
// No destruction - user will do that (we don't own v).
3576
}
3577
}
3578
3579
// Insert user provided k/v, by moving, into the current hash table
3580
inline bool insert_no_grow_move(insert_result& result, Key&& k, Value&& v)
3581
{
3582
if (!m_values.size())
3583
return false;
3584
3585
size_t index = hash_key(k);
3586
node* pNode = &get_node(index);
3587
3588
if (pNode->state)
3589
{
3590
if (m_equals(pNode->first, k))
3591
{
3592
result.first = iterator(*this, index);
3593
result.second = false;
3594
return true;
3595
}
3596
3597
const size_t orig_index = index;
3598
3599
for (; ; )
3600
{
3601
if (!index)
3602
{
3603
index = m_values.size() - 1;
3604
pNode = &get_node(index);
3605
}
3606
else
3607
{
3608
index--;
3609
pNode--;
3610
}
3611
3612
if (orig_index == index)
3613
return false;
3614
3615
if (!pNode->state)
3616
break;
3617
3618
if (m_equals(pNode->first, k))
3619
{
3620
result.first = iterator(*this, index);
3621
result.second = false;
3622
return true;
3623
}
3624
}
3625
}
3626
3627
if (m_num_valid >= m_grow_threshold)
3628
return false;
3629
3630
move_value_type(pNode, std::move(k), std::move(v));
3631
3632
pNode->state = cStateValid;
3633
3634
m_num_valid++;
3635
assert(m_num_valid <= m_values.size());
3636
3637
result.first = iterator(*this, index);
3638
result.second = true;
3639
3640
return true;
3641
}
3642
3643
// Insert pNode by moving into the current hash table
3644
inline void move_into(node* pNode)
3645
{
3646
size_t index = hash_key(pNode->first);
3647
node* pDst_node = &get_node(index);
3648
3649
if (pDst_node->state)
3650
{
3651
const size_t orig_index = index;
3652
3653
for (; ; )
3654
{
3655
if (!index)
3656
{
3657
index = m_values.size() - 1;
3658
pDst_node = &get_node(index);
3659
}
3660
else
3661
{
3662
index--;
3663
pDst_node--;
3664
}
3665
3666
if (index == orig_index)
3667
{
3668
assert(false);
3669
return;
3670
}
3671
3672
if (!pDst_node->state)
3673
break;
3674
}
3675
}
3676
3677
// No need to update the source node's state (it's going away)
3678
move_node(pDst_node, pNode, false);
3679
3680
m_num_valid++;
3681
}
3682
};
3683
3684
template<typename Key, typename Value, typename Hasher, typename Equals>
3685
struct bitwise_movable< hash_map<Key, Value, Hasher, Equals> > { enum { cFlag = true }; };
3686
3687
#if BASISU_HASHMAP_TEST
3688
extern void hash_map_test();
3689
#endif
3690
3691
// String formatting
3692
inline std::string string_format(const char* pFmt, ...)
3693
{
3694
char buf[2048];
3695
3696
va_list args;
3697
va_start(args, pFmt);
3698
#ifdef _WIN32
3699
vsprintf_s(buf, sizeof(buf), pFmt, args);
3700
#else
3701
vsnprintf(buf, sizeof(buf), pFmt, args);
3702
#endif
3703
va_end(args);
3704
3705
return std::string(buf);
3706
}
3707
3708
enum class variant_type
3709
{
3710
cInvalid,
3711
cI32, cU32,
3712
cI64, cU64,
3713
cFlt, cDbl, cBool,
3714
cStrPtr, cStdStr
3715
};
3716
3717
struct fmt_variant
3718
{
3719
union
3720
{
3721
int32_t m_i32;
3722
uint32_t m_u32;
3723
int64_t m_i64;
3724
uint64_t m_u64;
3725
float m_flt;
3726
double m_dbl;
3727
bool m_bool;
3728
const char* m_pStr;
3729
};
3730
3731
std::string m_str;
3732
3733
variant_type m_type;
3734
3735
inline fmt_variant() :
3736
m_u64(0),
3737
m_type(variant_type::cInvalid)
3738
{
3739
}
3740
3741
inline fmt_variant(const fmt_variant& other) :
3742
m_u64(other.m_u64),
3743
m_str(other.m_str),
3744
m_type(other.m_type)
3745
{
3746
}
3747
3748
inline fmt_variant(fmt_variant&& other) :
3749
m_u64(other.m_u64),
3750
m_str(std::move(other.m_str)),
3751
m_type(other.m_type)
3752
{
3753
other.m_type = variant_type::cInvalid;
3754
other.m_u64 = 0;
3755
}
3756
3757
inline fmt_variant& operator= (fmt_variant&& other)
3758
{
3759
if (this == &other)
3760
return *this;
3761
3762
m_type = other.m_type;
3763
m_u64 = other.m_u64;
3764
m_str = std::move(other.m_str);
3765
3766
other.m_type = variant_type::cInvalid;
3767
other.m_u64 = 0;
3768
3769
return *this;
3770
}
3771
3772
inline fmt_variant& operator= (const fmt_variant& rhs)
3773
{
3774
if (this == &rhs)
3775
return *this;
3776
3777
m_u64 = rhs.m_u64;
3778
m_type = rhs.m_type;
3779
m_str = rhs.m_str;
3780
3781
return *this;
3782
}
3783
3784
inline fmt_variant(int32_t v) : m_i32(v), m_type(variant_type::cI32) { }
3785
inline fmt_variant(uint32_t v) : m_u32(v), m_type(variant_type::cU32) { }
3786
inline fmt_variant(int64_t v) : m_i64(v), m_type(variant_type::cI64) { }
3787
inline fmt_variant(uint64_t v) : m_u64(v), m_type(variant_type::cU64) { }
3788
#ifdef _MSC_VER
3789
inline fmt_variant(unsigned long v) : m_u64(v), m_type(variant_type::cU64) {}
3790
inline fmt_variant(long v) : m_i64(v), m_type(variant_type::cI64) {}
3791
#endif
3792
inline fmt_variant(float v) : m_flt(v), m_type(variant_type::cFlt) { }
3793
inline fmt_variant(double v) : m_dbl(v), m_type(variant_type::cDbl) { }
3794
inline fmt_variant(const char* pStr) : m_pStr(pStr), m_type(variant_type::cStrPtr) { }
3795
inline fmt_variant(const std::string& str) : m_u64(0), m_str(str), m_type(variant_type::cStdStr) { }
3796
inline fmt_variant(bool val) : m_bool(val), m_type(variant_type::cBool) { }
3797
3798
bool to_string(std::string& res, std::string& fmt) const;
3799
};
3800
3801
typedef basisu::vector<fmt_variant> fmt_variant_vec;
3802
3803
bool fmt_variants(std::string& res, const char* pFmt, const fmt_variant_vec& variants);
3804
3805
template <typename... Args>
3806
inline bool fmt_string(std::string& res, const char* pFmt, Args&&... args)
3807
{
3808
return fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... });
3809
}
3810
3811
template <typename... Args>
3812
inline std::string fmt_string(const char* pFmt, Args&&... args)
3813
{
3814
std::string res;
3815
fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... });
3816
return res;
3817
}
3818
3819
template <typename... Args>
3820
inline int fmt_printf(const char* pFmt, Args&&... args)
3821
{
3822
std::string res;
3823
if (!fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... }))
3824
return EOF;
3825
3826
return fputs(res.c_str(), stdout);
3827
}
3828
3829
template <typename... Args>
3830
inline int fmt_fprintf(FILE* pFile, const char* pFmt, Args&&... args)
3831
{
3832
std::string res;
3833
if (!fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... }))
3834
return EOF;
3835
3836
return fputs(res.c_str(), pFile);
3837
}
3838
3839
// fixed_array - zero initialized by default, operator[] is always bounds checked.
3840
template <std::size_t N, typename T>
3841
class fixed_array
3842
{
3843
static_assert(N >= 1, "fixed_array size must be at least 1");
3844
3845
public:
3846
using value_type = T;
3847
using size_type = std::size_t;
3848
using difference_type = std::ptrdiff_t;
3849
using reference = T&;
3850
using const_reference = const T&;
3851
using pointer = T*;
3852
using const_pointer = const T*;
3853
using iterator = T*;
3854
using const_iterator = const T*;
3855
3856
T m_data[N];
3857
3858
BASISU_FORCE_INLINE fixed_array()
3859
{
3860
initialize_array();
3861
}
3862
3863
BASISU_FORCE_INLINE fixed_array(std::initializer_list<T> list)
3864
{
3865
assert(list.size() <= N);
3866
3867
std::size_t copy_size = std::min(list.size(), N);
3868
std::copy_n(list.begin(), copy_size, m_data); // Copy up to min(list.size(), N)
3869
3870
if (list.size() < N)
3871
{
3872
// Initialize the rest of the array
3873
std::fill(m_data + copy_size, m_data + N, T{});
3874
}
3875
}
3876
3877
BASISU_FORCE_INLINE T& operator[](std::size_t index)
3878
{
3879
if (index >= N)
3880
container_abort("fixed_array: Index out of bounds.");
3881
return m_data[index];
3882
}
3883
3884
BASISU_FORCE_INLINE const T& operator[](std::size_t index) const
3885
{
3886
if (index >= N)
3887
container_abort("fixed_array: Index out of bounds.");
3888
return m_data[index];
3889
}
3890
3891
BASISU_FORCE_INLINE T* begin() { return m_data; }
3892
BASISU_FORCE_INLINE const T* begin() const { return m_data; }
3893
3894
BASISU_FORCE_INLINE T* end() { return m_data + N; }
3895
BASISU_FORCE_INLINE const T* end() const { return m_data + N; }
3896
3897
BASISU_FORCE_INLINE const T* data() const { return m_data; }
3898
BASISU_FORCE_INLINE T* data() { return m_data; }
3899
3900
BASISU_FORCE_INLINE const T& front() const { return m_data[0]; }
3901
BASISU_FORCE_INLINE T& front() { return m_data[0]; }
3902
3903
BASISU_FORCE_INLINE const T& back() const { return m_data[N - 1]; }
3904
BASISU_FORCE_INLINE T& back() { return m_data[N - 1]; }
3905
3906
BASISU_FORCE_INLINE constexpr std::size_t size() const { return N; }
3907
3908
BASISU_FORCE_INLINE void clear()
3909
{
3910
initialize_array(); // Reinitialize the array
3911
}
3912
3913
BASISU_FORCE_INLINE void set_all(const T& value)
3914
{
3915
std::fill(m_data, m_data + N, value);
3916
}
3917
3918
BASISU_FORCE_INLINE readable_span<T> get_readable_span() const
3919
{
3920
return readable_span<T>(m_data, N);
3921
}
3922
3923
BASISU_FORCE_INLINE writable_span<T> get_writable_span()
3924
{
3925
return writable_span<T>(m_data, N);
3926
}
3927
3928
private:
3929
BASISU_FORCE_INLINE void initialize_array()
3930
{
3931
if constexpr (std::is_integral<T>::value || std::is_floating_point<T>::value)
3932
memset(m_data, 0, sizeof(m_data));
3933
else
3934
std::fill(m_data, m_data + N, T{});
3935
}
3936
3937
BASISU_FORCE_INLINE T& access_element(std::size_t index)
3938
{
3939
if (index >= N)
3940
container_abort("fixed_array: Index out of bounds.");
3941
return m_data[index];
3942
}
3943
3944
BASISU_FORCE_INLINE const T& access_element(std::size_t index) const
3945
{
3946
if (index >= N)
3947
container_abort("fixed_array: Index out of bounds.");
3948
return m_data[index];
3949
}
3950
};
3951
3952
// 2D array
3953
3954
template<typename T>
3955
class vector2D
3956
{
3957
typedef basisu::vector<T> vec_type;
3958
3959
uint32_t m_width, m_height;
3960
vec_type m_values;
3961
3962
public:
3963
vector2D() :
3964
m_width(0),
3965
m_height(0)
3966
{
3967
}
3968
3969
vector2D(uint32_t w, uint32_t h) :
3970
m_width(0),
3971
m_height(0)
3972
{
3973
resize(w, h);
3974
}
3975
3976
vector2D(const vector2D& other)
3977
{
3978
*this = other;
3979
}
3980
3981
vector2D(vector2D&& other) :
3982
m_width(0),
3983
m_height(0)
3984
{
3985
*this = std::move(other);
3986
}
3987
3988
vector2D& operator= (const vector2D& other)
3989
{
3990
if (this != &other)
3991
{
3992
m_width = other.m_width;
3993
m_height = other.m_height;
3994
m_values = other.m_values;
3995
}
3996
return *this;
3997
}
3998
3999
vector2D& operator= (vector2D&& other)
4000
{
4001
if (this != &other)
4002
{
4003
m_width = other.m_width;
4004
m_height = other.m_height;
4005
m_values = std::move(other.m_values);
4006
4007
other.m_width = 0;
4008
other.m_height = 0;
4009
}
4010
return *this;
4011
}
4012
4013
inline bool operator== (const vector2D& rhs) const
4014
{
4015
return (m_width == rhs.m_width) && (m_height == rhs.m_height) && (m_values == rhs.m_values);
4016
}
4017
4018
inline size_t size_in_bytes() const { return m_values.size_in_bytes(); }
4019
4020
inline uint32_t get_width() const { return m_width; }
4021
inline uint32_t get_height() const { return m_height; }
4022
4023
inline const T& operator() (uint32_t x, uint32_t y) const { assert(x < m_width && y < m_height); return m_values[x + y * m_width]; }
4024
inline T& operator() (uint32_t x, uint32_t y) { assert(x < m_width && y < m_height); return m_values[x + y * m_width]; }
4025
4026
inline size_t size() const { return m_values.size(); }
4027
4028
inline const T& operator[] (uint32_t i) const { return m_values[i]; }
4029
inline T& operator[] (uint32_t i) { return m_values[i]; }
4030
4031
inline const T& at_clamped(int x, int y) const { return (*this)(clamp<int>(x, 0, m_width - 1), clamp<int>(y, 0, m_height - 1)); }
4032
inline T& at_clamped(int x, int y) { return (*this)(clamp<int>(x, 0, m_width - 1), clamp<int>(y, 0, m_height - 1)); }
4033
4034
void clear()
4035
{
4036
m_width = 0;
4037
m_height = 0;
4038
m_values.clear();
4039
}
4040
4041
void set_all(const T& val)
4042
{
4043
vector_set_all(m_values, val);
4044
}
4045
4046
inline const T* get_ptr() const { return m_values.data(); }
4047
inline T* get_ptr() { return m_values.data(); }
4048
4049
vector2D& resize(uint32_t new_width, uint32_t new_height)
4050
{
4051
if ((m_width == new_width) && (m_height == new_height))
4052
return *this;
4053
4054
const uint64_t total_vals = (uint64_t)new_width * new_height;
4055
4056
if (!can_fit_into_size_t(total_vals))
4057
{
4058
// What can we do?
4059
assert(0);
4060
return *this;
4061
}
4062
4063
vec_type oldVals((size_t)total_vals);
4064
oldVals.swap(m_values);
4065
4066
const uint32_t w = minimum(m_width, new_width);
4067
const uint32_t h = minimum(m_height, new_height);
4068
4069
if ((w) && (h))
4070
{
4071
for (uint32_t y = 0; y < h; y++)
4072
for (uint32_t x = 0; x < w; x++)
4073
m_values[x + y * new_width] = oldVals[x + y * m_width];
4074
}
4075
4076
m_width = new_width;
4077
m_height = new_height;
4078
4079
return *this;
4080
}
4081
4082
bool try_resize(uint32_t new_width, uint32_t new_height)
4083
{
4084
if ((m_width == new_width) && (m_height == new_height))
4085
return true;
4086
4087
const uint64_t total_vals = (uint64_t)new_width * new_height;
4088
4089
if (!can_fit_into_size_t(total_vals))
4090
{
4091
// What can we do?
4092
assert(0);
4093
return false;
4094
}
4095
4096
vec_type oldVals;
4097
if (!oldVals.try_resize((size_t)total_vals))
4098
return false;
4099
4100
oldVals.swap(m_values);
4101
4102
const uint32_t w = minimum(m_width, new_width);
4103
const uint32_t h = minimum(m_height, new_height);
4104
4105
if ((w) && (h))
4106
{
4107
for (uint32_t y = 0; y < h; y++)
4108
for (uint32_t x = 0; x < w; x++)
4109
m_values[x + y * new_width] = oldVals[x + y * m_width];
4110
}
4111
4112
m_width = new_width;
4113
m_height = new_height;
4114
4115
return true;
4116
}
4117
4118
const vector2D& extract_block_clamped(T* pDst, uint32_t src_x, uint32_t src_y, uint32_t w, uint32_t h) const
4119
{
4120
// HACK HACK
4121
if (((src_x + w) > m_width) || ((src_y + h) > m_height))
4122
{
4123
// Slower clamping case
4124
for (uint32_t y = 0; y < h; y++)
4125
for (uint32_t x = 0; x < w; x++)
4126
*pDst++ = at_clamped(src_x + x, src_y + y);
4127
}
4128
else
4129
{
4130
const T* pSrc = &m_values[src_x + src_y * m_width];
4131
4132
for (uint32_t y = 0; y < h; y++)
4133
{
4134
memcpy(pDst, pSrc, w * sizeof(T));
4135
pSrc += m_width;
4136
pDst += w;
4137
}
4138
}
4139
4140
return *this;
4141
}
4142
};
4143
4144
} // namespace basisu
4145
4146
namespace std
4147
{
4148
template<typename T>
4149
inline void swap(basisu::vector<T>& a, basisu::vector<T>& b)
4150
{
4151
a.swap(b);
4152
}
4153
4154
template<typename Key, typename Value, typename Hasher, typename Equals>
4155
inline void swap(basisu::hash_map<Key, Value, Hasher, Equals>& a, basisu::hash_map<Key, Value, Hasher, Equals>& b)
4156
{
4157
a.swap(b);
4158
}
4159
4160
} // namespace std
4161
4162