Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/basis_universal/transcoder/basisu_containers.h
9905 views
1
// basisu_containers.h
2
#pragma once
3
#include <stdlib.h>
4
#include <stdio.h>
5
#include <stdint.h>
6
#include <assert.h>
7
#include <algorithm>
8
9
#if defined(__linux__) && !defined(ANDROID)
10
// Only for malloc_usable_size() in basisu_containers_impl.h
11
#include <malloc.h>
12
#define HAS_MALLOC_USABLE_SIZE 1
13
#endif
14
15
// Set to 1 to always check vector operator[], front(), and back() even in release.
16
#define BASISU_VECTOR_FORCE_CHECKING 0
17
18
// If 1, the vector container will not query the CRT to get the size of resized memory blocks.
19
#define BASISU_VECTOR_DETERMINISTIC 1
20
21
#ifdef _MSC_VER
22
#define BASISU_FORCE_INLINE __forceinline
23
#else
24
#define BASISU_FORCE_INLINE inline
25
#endif
26
27
#define BASISU_HASHMAP_TEST 0
28
29
namespace basisu
30
{
31
enum { cInvalidIndex = -1 };
32
33
template <typename S> inline S clamp(S value, S low, S high) { return (value < low) ? low : ((value > high) ? high : value); }
34
35
template <typename S> inline S maximum(S a, S b) { return (a > b) ? a : b; }
36
template <typename S> inline S maximum(S a, S b, S c) { return maximum(maximum(a, b), c); }
37
template <typename S> inline S maximum(S a, S b, S c, S d) { return maximum(maximum(maximum(a, b), c), d); }
38
39
template <typename S> inline S minimum(S a, S b) { return (a < b) ? a : b; }
40
template <typename S> inline S minimum(S a, S b, S c) { return minimum(minimum(a, b), c); }
41
template <typename S> inline S minimum(S a, S b, S c, S d) { return minimum(minimum(minimum(a, b), c), d); }
42
43
#ifdef _MSC_VER
44
__declspec(noreturn)
45
#else
46
[[noreturn]]
47
#endif
48
void container_abort(const char* pMsg, ...);
49
50
namespace helpers
51
{
52
inline bool is_power_of_2(uint32_t x) { return x && ((x & (x - 1U)) == 0U); }
53
inline bool is_power_of_2(uint64_t x) { return x && ((x & (x - 1U)) == 0U); }
54
55
template<class T> const T& minimum(const T& a, const T& b) { return (b < a) ? b : a; }
56
template<class T> const T& maximum(const T& a, const T& b) { return (a < b) ? b : a; }
57
58
inline uint32_t floor_log2i(uint32_t v)
59
{
60
uint32_t l = 0;
61
while (v > 1U)
62
{
63
v >>= 1;
64
l++;
65
}
66
return l;
67
}
68
69
inline uint32_t floor_log2i(uint64_t v)
70
{
71
uint32_t l = 0;
72
while (v > 1U)
73
{
74
v >>= 1;
75
l++;
76
}
77
return l;
78
}
79
80
inline uint32_t next_pow2(uint32_t val)
81
{
82
val--;
83
val |= val >> 16;
84
val |= val >> 8;
85
val |= val >> 4;
86
val |= val >> 2;
87
val |= val >> 1;
88
return val + 1;
89
}
90
91
inline uint64_t next_pow2(uint64_t val)
92
{
93
val--;
94
val |= val >> 32;
95
val |= val >> 16;
96
val |= val >> 8;
97
val |= val >> 4;
98
val |= val >> 2;
99
val |= val >> 1;
100
return val + 1;
101
}
102
} // namespace helpers
103
104
template <typename T>
105
inline T* construct(T* p)
106
{
107
return new (static_cast<void*>(p)) T;
108
}
109
110
template <typename T, typename U>
111
inline T* construct(T* p, const U& init)
112
{
113
return new (static_cast<void*>(p)) T(init);
114
}
115
116
template <typename T>
117
inline void construct_array(T* p, size_t n)
118
{
119
T* q = p + n;
120
for (; p != q; ++p)
121
new (static_cast<void*>(p)) T;
122
}
123
124
template <typename T, typename U>
125
inline void construct_array(T* p, size_t n, const U& init)
126
{
127
T* q = p + n;
128
for (; p != q; ++p)
129
new (static_cast<void*>(p)) T(init);
130
}
131
132
template <typename T>
133
inline void destruct(T* p)
134
{
135
p->~T();
136
}
137
138
template <typename T> inline void destruct_array(T* p, size_t n)
139
{
140
T* q = p + n;
141
for (; p != q; ++p)
142
p->~T();
143
}
144
145
template<typename T>
146
struct scalar_type
147
{
148
enum { cFlag = false };
149
static inline void construct(T* p) { basisu::construct(p); }
150
static inline void construct(T* p, const T& init) { basisu::construct(p, init); }
151
static inline void construct_array(T* p, size_t n) { basisu::construct_array(p, n); }
152
static inline void destruct(T* p) { basisu::destruct(p); }
153
static inline void destruct_array(T* p, size_t n) { basisu::destruct_array(p, n); }
154
};
155
156
template<typename T> struct scalar_type<T*>
157
{
158
enum { cFlag = true };
159
static inline void construct(T** p) { memset(p, 0, sizeof(T*)); }
160
static inline void construct(T** p, T* init) { *p = init; }
161
static inline void construct_array(T** p, size_t n) { memset(p, 0, sizeof(T*) * n); }
162
static inline void destruct(T** p) { p; }
163
static inline void destruct_array(T** p, size_t n) { p, n; }
164
};
165
166
#define BASISU_DEFINE_BUILT_IN_TYPE(X) \
167
template<> struct scalar_type<X> { \
168
enum { cFlag = true }; \
169
static inline void construct(X* p) { memset(p, 0, sizeof(X)); } \
170
static inline void construct(X* p, const X& init) { memcpy(p, &init, sizeof(X)); } \
171
static inline void construct_array(X* p, size_t n) { memset(p, 0, sizeof(X) * n); } \
172
static inline void destruct(X* p) { p; } \
173
static inline void destruct_array(X* p, size_t n) { p, n; } };
174
175
BASISU_DEFINE_BUILT_IN_TYPE(bool)
176
BASISU_DEFINE_BUILT_IN_TYPE(char)
177
BASISU_DEFINE_BUILT_IN_TYPE(unsigned char)
178
BASISU_DEFINE_BUILT_IN_TYPE(short)
179
BASISU_DEFINE_BUILT_IN_TYPE(unsigned short)
180
BASISU_DEFINE_BUILT_IN_TYPE(int)
181
BASISU_DEFINE_BUILT_IN_TYPE(unsigned int)
182
BASISU_DEFINE_BUILT_IN_TYPE(long)
183
BASISU_DEFINE_BUILT_IN_TYPE(unsigned long)
184
#ifdef __GNUC__
185
BASISU_DEFINE_BUILT_IN_TYPE(long long)
186
BASISU_DEFINE_BUILT_IN_TYPE(unsigned long long)
187
#else
188
BASISU_DEFINE_BUILT_IN_TYPE(__int64)
189
BASISU_DEFINE_BUILT_IN_TYPE(unsigned __int64)
190
#endif
191
BASISU_DEFINE_BUILT_IN_TYPE(float)
192
BASISU_DEFINE_BUILT_IN_TYPE(double)
193
BASISU_DEFINE_BUILT_IN_TYPE(long double)
194
195
#undef BASISU_DEFINE_BUILT_IN_TYPE
196
197
template<typename T>
198
struct bitwise_movable { enum { cFlag = false }; };
199
200
#define BASISU_DEFINE_BITWISE_MOVABLE(Q) template<> struct bitwise_movable<Q> { enum { cFlag = true }; };
201
202
template<typename T>
203
struct bitwise_copyable { enum { cFlag = false }; };
204
205
#define BASISU_DEFINE_BITWISE_COPYABLE(Q) template<> struct bitwise_copyable<Q> { enum { cFlag = true }; };
206
207
#define BASISU_IS_POD(T) __is_pod(T)
208
209
#define BASISU_IS_SCALAR_TYPE(T) (scalar_type<T>::cFlag)
210
211
#if !defined(BASISU_HAVE_STD_TRIVIALLY_COPYABLE) && defined(__GNUC__) && (__GNUC__ < 5)
212
#define BASISU_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__)
213
#else
214
#define BASISU_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
215
#endif
216
217
// TODO: clean this up, it's still confusing (copying vs. movable).
218
#define BASISU_IS_BITWISE_COPYABLE(T) (BASISU_IS_SCALAR_TYPE(T) || BASISU_IS_POD(T) || BASISU_IS_TRIVIALLY_COPYABLE(T) || std::is_trivial<T>::value || (bitwise_copyable<T>::cFlag))
219
220
#define BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) (BASISU_IS_BITWISE_COPYABLE(T) || (bitwise_movable<T>::cFlag))
221
222
#define BASISU_HAS_DESTRUCTOR(T) ((!scalar_type<T>::cFlag) && (!__is_pod(T)) && (!std::is_trivially_destructible<T>::value))
223
224
typedef char(&yes_t)[1];
225
typedef char(&no_t)[2];
226
227
template <class U> yes_t class_test(int U::*);
228
template <class U> no_t class_test(...);
229
230
template <class T> struct is_class
231
{
232
enum { value = (sizeof(class_test<T>(0)) == sizeof(yes_t)) };
233
};
234
235
template <typename T> struct is_pointer
236
{
237
enum { value = false };
238
};
239
240
template <typename T> struct is_pointer<T*>
241
{
242
enum { value = true };
243
};
244
245
struct empty_type { };
246
247
BASISU_DEFINE_BITWISE_COPYABLE(empty_type);
248
BASISU_DEFINE_BITWISE_MOVABLE(empty_type);
249
250
template<typename T> struct rel_ops
251
{
252
friend bool operator!=(const T& x, const T& y) { return (!(x == y)); }
253
friend bool operator> (const T& x, const T& y) { return (y < x); }
254
friend bool operator<=(const T& x, const T& y) { return (!(y < x)); }
255
friend bool operator>=(const T& x, const T& y) { return (!(x < y)); }
256
};
257
258
struct elemental_vector
259
{
260
void* m_p;
261
size_t m_size;
262
size_t m_capacity;
263
264
typedef void (*object_mover)(void* pDst, void* pSrc, size_t num);
265
266
bool increase_capacity(size_t min_new_capacity, bool grow_hint, size_t element_size, object_mover pRelocate, bool nofail);
267
};
268
269
// Returns true if a+b would overflow a size_t.
270
inline bool add_overflow_check(size_t a, size_t b)
271
{
272
size_t c = a + b;
273
return c < a;
274
}
275
276
// Returns false on overflow, true if OK.
277
template<typename T>
278
inline bool can_fit_into_size_t(T val)
279
{
280
static_assert(std::is_integral<T>::value, "T must be an integral type");
281
282
return (val >= 0) && (static_cast<size_t>(val) == val);
283
}
284
285
// Returns true if a*b would overflow a size_t.
286
inline bool mul_overflow_check(size_t a, size_t b)
287
{
288
// Avoid the division on 32-bit platforms
289
if (sizeof(size_t) == sizeof(uint32_t))
290
return !can_fit_into_size_t(static_cast<uint64_t>(a) * b);
291
else
292
return b && (a > (SIZE_MAX / b));
293
}
294
295
template<typename T>
296
class writable_span;
297
298
template<typename T>
299
class readable_span
300
{
301
public:
302
using value_type = T;
303
using size_type = size_t;
304
using const_pointer = const T*;
305
using const_reference = const T&;
306
using const_iterator = const T*;
307
308
inline readable_span() :
309
m_p(nullptr),
310
m_size(0)
311
{
312
}
313
314
inline readable_span(const writable_span<T>& other);
315
inline readable_span& operator= (const writable_span<T>& rhs);
316
317
inline readable_span(const_pointer p, size_t n)
318
{
319
set(p, n);
320
}
321
322
inline readable_span(const_pointer s, const_pointer e)
323
{
324
set(s, e);
325
}
326
327
inline readable_span(const readable_span& other) :
328
m_p(other.m_p),
329
m_size(other.m_size)
330
{
331
assert(!m_size || m_p);
332
}
333
334
inline readable_span(readable_span&& other) :
335
m_p(other.m_p),
336
m_size(other.m_size)
337
{
338
assert(!m_size || m_p);
339
340
other.m_p = nullptr;
341
other.m_size = 0;
342
}
343
344
template <size_t N>
345
inline readable_span(const T(&arr)[N]) :
346
m_p(arr),
347
m_size(N)
348
{
349
}
350
351
template <size_t N>
352
inline readable_span& set(const T(&arr)[N])
353
{
354
m_p = arr;
355
m_size = N;
356
return *this;
357
}
358
359
inline readable_span& set(const_pointer p, size_t n)
360
{
361
if (!p && n)
362
{
363
assert(0);
364
m_p = nullptr;
365
m_size = 0;
366
}
367
else
368
{
369
m_p = p;
370
m_size = n;
371
}
372
373
return *this;
374
}
375
376
inline readable_span& set(const_pointer s, const_pointer e)
377
{
378
if ((e < s) || (!s && e))
379
{
380
assert(0);
381
m_p = nullptr;
382
m_size = 0;
383
}
384
else
385
{
386
m_p = s;
387
m_size = e - s;
388
}
389
390
return *this;
391
}
392
393
inline bool operator== (const readable_span& rhs) const
394
{
395
return (m_p == rhs.m_p) && (m_size == rhs.m_size);
396
}
397
398
inline bool operator!= (const readable_span& rhs) const
399
{
400
return (m_p != rhs.m_p) || (m_size != rhs.m_size);
401
}
402
403
// only true if the region is totally inside the span
404
inline bool is_inside_ptr(const_pointer p, size_t n) const
405
{
406
if (!is_valid())
407
{
408
assert(0);
409
return false;
410
}
411
412
if (!p)
413
{
414
assert(!n);
415
return false;
416
}
417
418
return (p >= m_p) && ((p + n) <= end());
419
}
420
421
inline bool is_inside(size_t ofs, size_t size) const
422
{
423
if (add_overflow_check(ofs, size))
424
{
425
assert(0);
426
return false;
427
}
428
429
if (!is_valid())
430
{
431
assert(0);
432
return false;
433
}
434
435
if ((ofs + size) > m_size)
436
return false;
437
438
return true;
439
}
440
441
inline readable_span subspan(size_t ofs, size_t n) const
442
{
443
if (!is_valid())
444
{
445
assert(0);
446
return readable_span((const_pointer)nullptr, (size_t)0);
447
}
448
449
if (add_overflow_check(ofs, n))
450
{
451
assert(0);
452
return readable_span((const_pointer)nullptr, (size_t)0);
453
}
454
455
if ((ofs + n) > m_size)
456
{
457
assert(0);
458
return readable_span((const_pointer)nullptr, (size_t)0);
459
}
460
461
return readable_span(m_p + ofs, n);
462
}
463
464
void clear()
465
{
466
m_p = nullptr;
467
m_size = 0;
468
}
469
470
inline bool empty() const { return !m_size; }
471
472
// true if the span is non-nullptr and is not empty
473
inline bool is_valid() const { return m_p && m_size; }
474
475
inline bool is_nullptr() const { return m_p == nullptr; }
476
477
inline size_t size() const { return m_size; }
478
inline size_t size_in_bytes() const { assert(can_fit_into_size_t((uint64_t)m_size * sizeof(T))); return m_size * sizeof(T); }
479
480
inline const_pointer get_ptr() const { return m_p; }
481
482
inline const_iterator begin() const { return m_p; }
483
inline const_iterator end() const { assert(m_p || !m_size); return m_p + m_size; }
484
485
inline const_iterator cbegin() const { return m_p; }
486
inline const_iterator cend() const { assert(m_p || !m_size); return m_p + m_size; }
487
488
inline const_reference front() const
489
{
490
if (!(m_p && m_size))
491
container_abort("readable_span invalid\n");
492
493
return m_p[0];
494
}
495
496
inline const_reference back() const
497
{
498
if (!(m_p && m_size))
499
container_abort("readable_span invalid\n");
500
501
return m_p[m_size - 1];
502
}
503
504
inline readable_span& operator= (const readable_span& rhs)
505
{
506
m_p = rhs.m_p;
507
m_size = rhs.m_size;
508
return *this;
509
}
510
511
inline readable_span& operator= (readable_span&& rhs)
512
{
513
if (this != &rhs)
514
{
515
m_p = rhs.m_p;
516
m_size = rhs.m_size;
517
rhs.m_p = nullptr;
518
rhs.m_size = 0;
519
}
520
521
return *this;
522
}
523
524
inline const_reference operator* () const
525
{
526
if (!(m_p && m_size))
527
container_abort("readable_span invalid\n");
528
529
return *m_p;
530
}
531
532
inline const_pointer operator-> () const
533
{
534
if (!(m_p && m_size))
535
container_abort("readable_span invalid\n");
536
537
return m_p;
538
}
539
540
inline readable_span& remove_prefix(size_t n)
541
{
542
if ((!m_p) || (n > m_size))
543
{
544
assert(0);
545
return *this;
546
}
547
548
m_p += n;
549
m_size -= n;
550
return *this;
551
}
552
553
inline readable_span& remove_suffix(size_t n)
554
{
555
if ((!m_p) || (n > m_size))
556
{
557
assert(0);
558
return *this;
559
}
560
561
m_size -= n;
562
return *this;
563
}
564
565
inline readable_span& enlarge(size_t n)
566
{
567
if (!m_p)
568
{
569
assert(0);
570
return *this;
571
}
572
573
if (add_overflow_check(m_size, n))
574
{
575
assert(0);
576
return *this;
577
}
578
579
m_size += n;
580
return *this;
581
}
582
583
bool copy_from(size_t src_ofs, size_t src_size, T* pDst, size_t dst_ofs) const
584
{
585
if (!src_size)
586
return true;
587
588
if (!pDst)
589
{
590
assert(0);
591
return false;
592
}
593
594
if (!is_inside(src_ofs, src_size))
595
{
596
assert(0);
597
return false;
598
}
599
600
const_pointer pS = m_p + src_ofs;
601
602
if (BASISU_IS_BITWISE_COPYABLE(T))
603
{
604
const uint64_t num_bytes = (uint64_t)src_size * sizeof(T);
605
606
if (!can_fit_into_size_t(num_bytes))
607
{
608
assert(0);
609
return false;
610
}
611
612
memcpy(pDst, pS, (size_t)num_bytes);
613
}
614
else
615
{
616
T* pD = pDst + dst_ofs;
617
T* pDst_end = pD + src_size;
618
619
while (pD != pDst_end)
620
*pD++ = *pS++;
621
}
622
623
return true;
624
}
625
626
inline const_reference operator[] (size_t idx) const
627
{
628
if ((!is_valid()) || (idx >= m_size))
629
container_abort("readable_span: invalid span or index\n");
630
631
return m_p[idx];
632
}
633
634
inline uint16_t read_le16(size_t ofs) const
635
{
636
static_assert(sizeof(T) == 1, "T must be byte size");
637
638
if (!is_inside(ofs, sizeof(uint16_t)))
639
{
640
assert(0);
641
return false;
642
}
643
644
const uint8_t a = (uint8_t)m_p[ofs];
645
const uint8_t b = (uint8_t)m_p[ofs + 1];
646
return a | (b << 8u);
647
}
648
649
template<typename R>
650
inline R read_val(size_t ofs) const
651
{
652
static_assert(sizeof(T) == 1, "T must be byte size");
653
654
if (!is_inside(ofs, sizeof(R)))
655
{
656
assert(0);
657
return (R)0;
658
}
659
660
return *reinterpret_cast<const R*>(&m_p[ofs]);
661
}
662
663
inline uint16_t read_be16(size_t ofs) const
664
{
665
static_assert(sizeof(T) == 1, "T must be byte size");
666
667
if (!is_inside(ofs, sizeof(uint16_t)))
668
{
669
assert(0);
670
return 0;
671
}
672
673
const uint8_t b = (uint8_t)m_p[ofs];
674
const uint8_t a = (uint8_t)m_p[ofs + 1];
675
return a | (b << 8u);
676
}
677
678
inline uint32_t read_le32(size_t ofs) const
679
{
680
static_assert(sizeof(T) == 1, "T must be byte size");
681
682
if (!is_inside(ofs, sizeof(uint32_t)))
683
{
684
assert(0);
685
return 0;
686
}
687
688
const uint8_t a = (uint8_t)m_p[ofs];
689
const uint8_t b = (uint8_t)m_p[ofs + 1];
690
const uint8_t c = (uint8_t)m_p[ofs + 2];
691
const uint8_t d = (uint8_t)m_p[ofs + 3];
692
return a | (b << 8u) | (c << 16u) | (d << 24u);
693
}
694
695
inline uint32_t read_be32(size_t ofs) const
696
{
697
static_assert(sizeof(T) == 1, "T must be byte size");
698
699
if (!is_inside(ofs, sizeof(uint32_t)))
700
{
701
assert(0);
702
return 0;
703
}
704
705
const uint8_t d = (uint8_t)m_p[ofs];
706
const uint8_t c = (uint8_t)m_p[ofs + 1];
707
const uint8_t b = (uint8_t)m_p[ofs + 2];
708
const uint8_t a = (uint8_t)m_p[ofs + 3];
709
return a | (b << 8u) | (c << 16u) | (d << 24u);
710
}
711
712
inline uint64_t read_le64(size_t ofs) const
713
{
714
if (!add_overflow_check(ofs, sizeof(uint64_t)))
715
{
716
assert(0);
717
return 0;
718
}
719
const uint64_t l = read_le32(ofs);
720
const uint64_t h = read_le32(ofs + sizeof(uint32_t));
721
return l | (h << 32u);
722
}
723
724
inline uint64_t read_be64(size_t ofs) const
725
{
726
if (!add_overflow_check(ofs, sizeof(uint64_t)))
727
{
728
assert(0);
729
return 0;
730
}
731
const uint64_t h = read_be32(ofs);
732
const uint64_t l = read_be32(ofs + sizeof(uint32_t));
733
return l | (h << 32u);
734
}
735
736
private:
737
const_pointer m_p;
738
size_t m_size;
739
};
740
741
template<typename T>
742
class writable_span
743
{
744
friend readable_span<T>;
745
746
public:
747
using value_type = T;
748
using size_type = size_t;
749
using const_pointer = const T*;
750
using const_reference = const T&;
751
using const_iterator = const T*;
752
using pointer = T*;
753
using reference = T&;
754
using iterator = T*;
755
756
inline writable_span() :
757
m_p(nullptr),
758
m_size(0)
759
{
760
}
761
762
inline writable_span(T* p, size_t n)
763
{
764
set(p, n);
765
}
766
767
inline writable_span(T* s, T* e)
768
{
769
set(s, e);
770
}
771
772
inline writable_span(const writable_span& other) :
773
m_p(other.m_p),
774
m_size(other.m_size)
775
{
776
assert(!m_size || m_p);
777
}
778
779
inline writable_span(writable_span&& other) :
780
m_p(other.m_p),
781
m_size(other.m_size)
782
{
783
assert(!m_size || m_p);
784
785
other.m_p = nullptr;
786
other.m_size = 0;
787
}
788
789
template <size_t N>
790
inline writable_span(T(&arr)[N]) :
791
m_p(arr),
792
m_size(N)
793
{
794
}
795
796
readable_span<T> get_readable_span() const
797
{
798
return readable_span<T>(m_p, m_size);
799
}
800
801
template <size_t N>
802
inline writable_span& set(T(&arr)[N])
803
{
804
m_p = arr;
805
m_size = N;
806
return *this;
807
}
808
809
inline writable_span& set(T* p, size_t n)
810
{
811
if (!p && n)
812
{
813
assert(0);
814
m_p = nullptr;
815
m_size = 0;
816
}
817
else
818
{
819
m_p = p;
820
m_size = n;
821
}
822
823
return *this;
824
}
825
826
inline writable_span& set(T* s, T* e)
827
{
828
if ((e < s) || (!s && e))
829
{
830
assert(0);
831
m_p = nullptr;
832
m_size = 0;
833
}
834
else
835
{
836
m_p = s;
837
m_size = e - s;
838
}
839
840
return *this;
841
}
842
843
inline bool operator== (const writable_span& rhs) const
844
{
845
return (m_p == rhs.m_p) && (m_size == rhs.m_size);
846
}
847
848
inline bool operator== (const readable_span<T>& rhs) const
849
{
850
return (m_p == rhs.m_p) && (m_size == rhs.m_size);
851
}
852
853
inline bool operator!= (const writable_span& rhs) const
854
{
855
return (m_p != rhs.m_p) || (m_size != rhs.m_size);
856
}
857
858
inline bool operator!= (const readable_span<T>& rhs) const
859
{
860
return (m_p != rhs.m_p) || (m_size != rhs.m_size);
861
}
862
863
// only true if the region is totally inside the span
864
inline bool is_inside_ptr(const_pointer p, size_t n) const
865
{
866
if (!is_valid())
867
{
868
assert(0);
869
return false;
870
}
871
872
if (!p)
873
{
874
assert(!n);
875
return false;
876
}
877
878
return (p >= m_p) && ((p + n) <= end());
879
}
880
881
inline bool is_inside(size_t ofs, size_t size) const
882
{
883
if (add_overflow_check(ofs, size))
884
{
885
assert(0);
886
return false;
887
}
888
889
if (!is_valid())
890
{
891
assert(0);
892
return false;
893
}
894
895
if ((ofs + size) > m_size)
896
return false;
897
898
return true;
899
}
900
901
inline writable_span subspan(size_t ofs, size_t n) const
902
{
903
if (!is_valid())
904
{
905
assert(0);
906
return writable_span((T*)nullptr, (size_t)0);
907
}
908
909
if (add_overflow_check(ofs, n))
910
{
911
assert(0);
912
return writable_span((T*)nullptr, (size_t)0);
913
}
914
915
if ((ofs + n) > m_size)
916
{
917
assert(0);
918
return writable_span((T*)nullptr, (size_t)0);
919
}
920
921
return writable_span(m_p + ofs, n);
922
}
923
924
void clear()
925
{
926
m_p = nullptr;
927
m_size = 0;
928
}
929
930
inline bool empty() const { return !m_size; }
931
932
// true if the span is non-nullptr and is not empty
933
inline bool is_valid() const { return m_p && m_size; }
934
935
inline bool is_nullptr() const { return m_p == nullptr; }
936
937
inline size_t size() const { return m_size; }
938
inline size_t size_in_bytes() const { assert(can_fit_into_size_t((uint64_t)m_size * sizeof(T))); return m_size * sizeof(T); }
939
940
inline T* get_ptr() const { return m_p; }
941
942
inline iterator begin() const { return m_p; }
943
inline iterator end() const { assert(m_p || !m_size); return m_p + m_size; }
944
945
inline const_iterator cbegin() const { return m_p; }
946
inline const_iterator cend() const { assert(m_p || !m_size); return m_p + m_size; }
947
948
inline T& front() const
949
{
950
if (!(m_p && m_size))
951
container_abort("writable_span invalid\n");
952
953
return m_p[0];
954
}
955
956
inline T& back() const
957
{
958
if (!(m_p && m_size))
959
container_abort("writable_span invalid\n");
960
961
return m_p[m_size - 1];
962
}
963
964
inline writable_span& operator= (const writable_span& rhs)
965
{
966
m_p = rhs.m_p;
967
m_size = rhs.m_size;
968
return *this;
969
}
970
971
inline writable_span& operator= (writable_span&& rhs)
972
{
973
if (this != &rhs)
974
{
975
m_p = rhs.m_p;
976
m_size = rhs.m_size;
977
rhs.m_p = nullptr;
978
rhs.m_size = 0;
979
}
980
981
return *this;
982
}
983
984
inline T& operator* () const
985
{
986
if (!(m_p && m_size))
987
container_abort("writable_span invalid\n");
988
989
return *m_p;
990
}
991
992
inline T* operator-> () const
993
{
994
if (!(m_p && m_size))
995
container_abort("writable_span invalid\n");
996
997
return m_p;
998
}
999
1000
inline bool set_all(size_t ofs, size_t size, const_reference val)
1001
{
1002
if (!size)
1003
return true;
1004
1005
if (!is_inside(ofs, size))
1006
{
1007
assert(0);
1008
return false;
1009
}
1010
1011
T* pDst = m_p + ofs;
1012
1013
if ((sizeof(T) == sizeof(uint8_t)) && (BASISU_IS_BITWISE_COPYABLE(T)))
1014
{
1015
memset(pDst, (int)((uint8_t)val), size);
1016
}
1017
else
1018
{
1019
1020
T* pDst_end = pDst + size;
1021
1022
while (pDst != pDst_end)
1023
*pDst++ = val;
1024
}
1025
1026
return true;
1027
}
1028
1029
inline bool set_all(const_reference val)
1030
{
1031
return set_all(0, m_size, val);
1032
}
1033
1034
inline writable_span& remove_prefix(size_t n)
1035
{
1036
if ((!m_p) || (n > m_size))
1037
{
1038
assert(0);
1039
return *this;
1040
}
1041
1042
m_p += n;
1043
m_size -= n;
1044
return *this;
1045
}
1046
1047
inline writable_span& remove_suffix(size_t n)
1048
{
1049
if ((!m_p) || (n > m_size))
1050
{
1051
assert(0);
1052
return *this;
1053
}
1054
1055
m_size -= n;
1056
return *this;
1057
}
1058
1059
inline writable_span& enlarge(size_t n)
1060
{
1061
if (!m_p)
1062
{
1063
assert(0);
1064
return *this;
1065
}
1066
1067
if (add_overflow_check(m_size, n))
1068
{
1069
assert(0);
1070
return *this;
1071
}
1072
1073
m_size += n;
1074
return *this;
1075
}
1076
1077
// copy from this span to the destination ptr
1078
bool copy_from(size_t src_ofs, size_t src_size, T* pDst, size_t dst_ofs) const
1079
{
1080
if (!src_size)
1081
return true;
1082
1083
if (!pDst)
1084
{
1085
assert(0);
1086
return false;
1087
}
1088
1089
if (!is_inside(src_ofs, src_size))
1090
{
1091
assert(0);
1092
return false;
1093
}
1094
1095
const_pointer pS = m_p + src_ofs;
1096
1097
if (BASISU_IS_BITWISE_COPYABLE(T))
1098
{
1099
const uint64_t num_bytes = (uint64_t)src_size * sizeof(T);
1100
1101
if (!can_fit_into_size_t(num_bytes))
1102
{
1103
assert(0);
1104
return false;
1105
}
1106
1107
memcpy(pDst, pS, (size_t)num_bytes);
1108
}
1109
else
1110
{
1111
T* pD = pDst + dst_ofs;
1112
T* pDst_end = pD + src_size;
1113
1114
while (pD != pDst_end)
1115
*pD++ = *pS++;
1116
}
1117
1118
return true;
1119
}
1120
1121
// copy from the source ptr into this span
1122
bool copy_into(const_pointer pSrc, size_t src_ofs, size_t src_size, size_t dst_ofs) const
1123
{
1124
if (!src_size)
1125
return true;
1126
1127
if (!pSrc)
1128
{
1129
assert(0);
1130
return false;
1131
}
1132
1133
if (add_overflow_check(src_ofs, src_size) || add_overflow_check(dst_ofs, src_size))
1134
{
1135
assert(0);
1136
return false;
1137
}
1138
1139
if (!is_valid())
1140
{
1141
assert(0);
1142
return false;
1143
}
1144
1145
if (!is_inside(dst_ofs, src_size))
1146
{
1147
assert(0);
1148
return false;
1149
}
1150
1151
const_pointer pS = pSrc + src_ofs;
1152
T* pD = m_p + dst_ofs;
1153
1154
if (BASISU_IS_BITWISE_COPYABLE(T))
1155
{
1156
const uint64_t num_bytes = (uint64_t)src_size * sizeof(T);
1157
1158
if (!can_fit_into_size_t(num_bytes))
1159
{
1160
assert(0);
1161
return false;
1162
}
1163
1164
memcpy(pD, pS, (size_t)num_bytes);
1165
}
1166
else
1167
{
1168
T* pDst_end = pD + src_size;
1169
1170
while (pD != pDst_end)
1171
*pD++ = *pS++;
1172
}
1173
1174
return true;
1175
}
1176
1177
// copy from a source span into this span
1178
bool copy_into(const readable_span<T>& src, size_t src_ofs, size_t src_size, size_t dst_ofs) const
1179
{
1180
if (!src.is_inside(src_ofs, src_size))
1181
{
1182
assert(0);
1183
return false;
1184
}
1185
1186
return copy_into(src.get_ptr(), src_ofs, src_size, dst_ofs);
1187
}
1188
1189
// copy from a source span into this span
1190
bool copy_into(const writable_span& src, size_t src_ofs, size_t src_size, size_t dst_ofs) const
1191
{
1192
if (!src.is_inside(src_ofs, src_size))
1193
{
1194
assert(0);
1195
return false;
1196
}
1197
1198
return copy_into(src.get_ptr(), src_ofs, src_size, dst_ofs);
1199
}
1200
1201
inline T& operator[] (size_t idx) const
1202
{
1203
if ((!is_valid()) || (idx >= m_size))
1204
container_abort("writable_span: invalid span or index\n");
1205
1206
return m_p[idx];
1207
}
1208
1209
template<typename R>
1210
inline R read_val(size_t ofs) const
1211
{
1212
static_assert(sizeof(T) == 1, "T must be byte size");
1213
1214
if (!is_inside(ofs, sizeof(R)))
1215
{
1216
assert(0);
1217
return (R)0;
1218
}
1219
1220
return *reinterpret_cast<const R*>(&m_p[ofs]);
1221
}
1222
1223
template<typename R>
1224
inline bool write_val(size_t ofs, R val) const
1225
{
1226
static_assert(sizeof(T) == 1, "T must be byte size");
1227
1228
if (!is_inside(ofs, sizeof(R)))
1229
{
1230
assert(0);
1231
return false;
1232
}
1233
1234
*reinterpret_cast<R*>(&m_p[ofs]) = val;
1235
return true;
1236
}
1237
1238
inline bool write_le16(size_t ofs, uint16_t val) const
1239
{
1240
static_assert(sizeof(T) == 1, "T must be byte size");
1241
1242
if (!is_inside(ofs, sizeof(uint16_t)))
1243
{
1244
assert(0);
1245
return false;
1246
}
1247
1248
m_p[ofs] = (uint8_t)val;
1249
m_p[ofs + 1] = (uint8_t)(val >> 8u);
1250
return true;
1251
}
1252
1253
inline bool write_be16(size_t ofs, uint16_t val) const
1254
{
1255
static_assert(sizeof(T) == 1, "T must be byte size");
1256
1257
if (!is_inside(ofs, sizeof(uint16_t)))
1258
{
1259
assert(0);
1260
return false;
1261
}
1262
1263
m_p[ofs + 1] = (uint8_t)val;
1264
m_p[ofs] = (uint8_t)(val >> 8u);
1265
return true;
1266
}
1267
1268
inline bool write_le32(size_t ofs, uint32_t val) const
1269
{
1270
static_assert(sizeof(T) == 1, "T must be byte size");
1271
1272
if (!is_inside(ofs, sizeof(uint32_t)))
1273
{
1274
assert(0);
1275
return false;
1276
}
1277
1278
m_p[ofs] = (uint8_t)val;
1279
m_p[ofs + 1] = (uint8_t)(val >> 8u);
1280
m_p[ofs + 2] = (uint8_t)(val >> 16u);
1281
m_p[ofs + 3] = (uint8_t)(val >> 24u);
1282
return true;
1283
}
1284
1285
inline bool write_be32(size_t ofs, uint32_t val) const
1286
{
1287
static_assert(sizeof(T) == 1, "T must be byte size");
1288
1289
if (!is_inside(ofs, sizeof(uint32_t)))
1290
{
1291
assert(0);
1292
return false;
1293
}
1294
1295
m_p[ofs + 3] = (uint8_t)val;
1296
m_p[ofs + 2] = (uint8_t)(val >> 8u);
1297
m_p[ofs + 1] = (uint8_t)(val >> 16u);
1298
m_p[ofs] = (uint8_t)(val >> 24u);
1299
return true;
1300
}
1301
1302
inline bool write_le64(size_t ofs, uint64_t val) const
1303
{
1304
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1305
{
1306
assert(0);
1307
return false;
1308
}
1309
1310
return write_le32(ofs, (uint32_t)val) && write_le32(ofs + sizeof(uint32_t), (uint32_t)(val >> 32u));
1311
}
1312
1313
inline bool write_be64(size_t ofs, uint64_t val) const
1314
{
1315
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1316
{
1317
assert(0);
1318
return false;
1319
}
1320
1321
return write_be32(ofs + sizeof(uint32_t), (uint32_t)val) && write_be32(ofs, (uint32_t)(val >> 32u));
1322
}
1323
1324
inline uint16_t read_le16(size_t ofs) const
1325
{
1326
static_assert(sizeof(T) == 1, "T must be byte size");
1327
1328
if (!is_inside(ofs, sizeof(uint16_t)))
1329
{
1330
assert(0);
1331
return 0;
1332
}
1333
1334
const uint8_t a = (uint8_t)m_p[ofs];
1335
const uint8_t b = (uint8_t)m_p[ofs + 1];
1336
return a | (b << 8u);
1337
}
1338
1339
inline uint16_t read_be16(size_t ofs) const
1340
{
1341
static_assert(sizeof(T) == 1, "T must be byte size");
1342
1343
if (!is_inside(ofs, sizeof(uint16_t)))
1344
{
1345
assert(0);
1346
return 0;
1347
}
1348
1349
const uint8_t b = (uint8_t)m_p[ofs];
1350
const uint8_t a = (uint8_t)m_p[ofs + 1];
1351
return a | (b << 8u);
1352
}
1353
1354
inline uint32_t read_le32(size_t ofs) const
1355
{
1356
static_assert(sizeof(T) == 1, "T must be byte size");
1357
1358
if (!is_inside(ofs, sizeof(uint32_t)))
1359
{
1360
assert(0);
1361
return 0;
1362
}
1363
1364
const uint8_t a = (uint8_t)m_p[ofs];
1365
const uint8_t b = (uint8_t)m_p[ofs + 1];
1366
const uint8_t c = (uint8_t)m_p[ofs + 2];
1367
const uint8_t d = (uint8_t)m_p[ofs + 3];
1368
return a | (b << 8u) | (c << 16u) | (d << 24u);
1369
}
1370
1371
inline uint32_t read_be32(size_t ofs) const
1372
{
1373
static_assert(sizeof(T) == 1, "T must be byte size");
1374
1375
if (!is_inside(ofs, sizeof(uint32_t)))
1376
{
1377
assert(0);
1378
return 0;
1379
}
1380
1381
const uint8_t d = (uint8_t)m_p[ofs];
1382
const uint8_t c = (uint8_t)m_p[ofs + 1];
1383
const uint8_t b = (uint8_t)m_p[ofs + 2];
1384
const uint8_t a = (uint8_t)m_p[ofs + 3];
1385
return a | (b << 8u) | (c << 16u) | (d << 24u);
1386
}
1387
1388
inline uint64_t read_le64(size_t ofs) const
1389
{
1390
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1391
{
1392
assert(0);
1393
return 0;
1394
}
1395
const uint64_t l = read_le32(ofs);
1396
const uint64_t h = read_le32(ofs + sizeof(uint32_t));
1397
return l | (h << 32u);
1398
}
1399
1400
inline uint64_t read_be64(size_t ofs) const
1401
{
1402
if (!add_overflow_check(ofs, sizeof(uint64_t)))
1403
{
1404
assert(0);
1405
return 0;
1406
}
1407
const uint64_t h = read_be32(ofs);
1408
const uint64_t l = read_be32(ofs + sizeof(uint32_t));
1409
return l | (h << 32u);
1410
}
1411
1412
private:
1413
T* m_p;
1414
size_t m_size;
1415
};
1416
1417
template<typename T>
1418
inline readable_span<T>::readable_span(const writable_span<T>& other) :
1419
m_p(other.m_p),
1420
m_size(other.m_size)
1421
{
1422
}
1423
1424
template<typename T>
1425
inline readable_span<T>& readable_span<T>::operator= (const writable_span<T>& rhs)
1426
{
1427
m_p = rhs.m_p;
1428
m_size = rhs.m_size;
1429
return *this;
1430
}
1431
1432
template<typename T>
1433
inline bool span_copy(const writable_span<T>& dst, const readable_span<T>& src)
1434
{
1435
return dst.copy_into(src, 0, src.size(), 0);
1436
}
1437
1438
template<typename T>
1439
inline bool span_copy(const writable_span<T>& dst, const writable_span<T>& src)
1440
{
1441
return dst.copy_into(src, 0, src.size(), 0);
1442
}
1443
1444
template<typename T>
1445
inline bool span_copy(const writable_span<T>& dst, size_t dst_ofs, const writable_span<T>& src, size_t src_ofs, size_t len)
1446
{
1447
return dst.copy_into(src, src_ofs, len, dst_ofs);
1448
}
1449
1450
template<typename T>
1451
inline bool span_copy(const writable_span<T>& dst, size_t dst_ofs, const readable_span<T>& src, size_t src_ofs, size_t len)
1452
{
1453
return dst.copy_into(src, src_ofs, len, dst_ofs);
1454
}
1455
1456
template<typename T>
1457
class vector : public rel_ops< vector<T> >
1458
{
1459
public:
1460
typedef T* iterator;
1461
typedef const T* const_iterator;
1462
typedef T value_type;
1463
typedef T& reference;
1464
typedef const T& const_reference;
1465
typedef T* pointer;
1466
typedef const T* const_pointer;
1467
1468
inline vector() :
1469
m_p(nullptr),
1470
m_size(0),
1471
m_capacity(0)
1472
{
1473
}
1474
1475
inline vector(size_t n, const T& init) :
1476
m_p(nullptr),
1477
m_size(0),
1478
m_capacity(0)
1479
{
1480
increase_capacity(n, false);
1481
construct_array(m_p, n, init);
1482
m_size = n;
1483
}
1484
1485
inline vector(vector&& other) :
1486
m_p(other.m_p),
1487
m_size(other.m_size),
1488
m_capacity(other.m_capacity)
1489
{
1490
other.m_p = nullptr;
1491
other.m_size = 0;
1492
other.m_capacity = 0;
1493
}
1494
1495
inline vector(const vector& other) :
1496
m_p(nullptr),
1497
m_size(0),
1498
m_capacity(0)
1499
{
1500
increase_capacity(other.m_size, false);
1501
1502
m_size = other.m_size;
1503
1504
if (BASISU_IS_BITWISE_COPYABLE(T))
1505
{
1506
1507
#ifndef __EMSCRIPTEN__
1508
#ifdef __GNUC__
1509
#pragma GCC diagnostic push
1510
#pragma GCC diagnostic ignored "-Wclass-memaccess"
1511
#endif
1512
#endif
1513
if ((m_p) && (other.m_p))
1514
{
1515
memcpy(m_p, other.m_p, m_size * sizeof(T));
1516
}
1517
#ifndef __EMSCRIPTEN__
1518
#ifdef __GNUC__
1519
#pragma GCC diagnostic pop
1520
#endif
1521
#endif
1522
}
1523
else
1524
{
1525
T* pDst = m_p;
1526
const T* pSrc = other.m_p;
1527
for (size_t i = m_size; i > 0; i--)
1528
construct(pDst++, *pSrc++);
1529
}
1530
}
1531
1532
inline explicit vector(size_t size) :
1533
m_p(nullptr),
1534
m_size(0),
1535
m_capacity(0)
1536
{
1537
resize(size);
1538
}
1539
1540
inline explicit vector(std::initializer_list<T> init_list) :
1541
m_p(nullptr),
1542
m_size(0),
1543
m_capacity(0)
1544
{
1545
resize(init_list.size());
1546
1547
size_t idx = 0;
1548
for (const T& elem : init_list)
1549
m_p[idx++] = elem;
1550
1551
assert(idx == m_size);
1552
}
1553
1554
inline vector(const readable_span<T>& rs) :
1555
m_p(nullptr),
1556
m_size(0),
1557
m_capacity(0)
1558
{
1559
set(rs);
1560
}
1561
1562
inline vector(const writable_span<T>& ws) :
1563
m_p(nullptr),
1564
m_size(0),
1565
m_capacity(0)
1566
{
1567
set(ws);
1568
}
1569
1570
// Set contents of vector to contents of the readable span
1571
bool set(const readable_span<T>& rs)
1572
{
1573
if (!rs.is_valid())
1574
{
1575
assert(0);
1576
return false;
1577
}
1578
1579
const size_t new_size = rs.size();
1580
1581
// Could call resize(), but it'll redundantly construct trivial types.
1582
if (m_size != new_size)
1583
{
1584
if (new_size < m_size)
1585
{
1586
if (BASISU_HAS_DESTRUCTOR(T))
1587
{
1588
scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
1589
}
1590
}
1591
else
1592
{
1593
if (new_size > m_capacity)
1594
{
1595
if (!increase_capacity(new_size, false, true))
1596
return false;
1597
}
1598
}
1599
1600
// Don't bother constructing trivial types, because we're going to memcpy() over them anyway.
1601
if (!BASISU_IS_BITWISE_COPYABLE(T))
1602
{
1603
scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
1604
}
1605
1606
m_size = new_size;
1607
}
1608
1609
if (!rs.copy_from(0, rs.size(), m_p, 0))
1610
{
1611
assert(0);
1612
return false;
1613
}
1614
1615
return true;
1616
}
1617
1618
// Set contents of vector to contents of the writable span
1619
inline bool set(const writable_span<T>& ws)
1620
{
1621
return set(ws.get_readable_span());
1622
}
1623
1624
inline ~vector()
1625
{
1626
if (m_p)
1627
{
1628
if (BASISU_HAS_DESTRUCTOR(T))
1629
{
1630
scalar_type<T>::destruct_array(m_p, m_size);
1631
}
1632
1633
free(m_p);
1634
}
1635
}
1636
1637
inline vector& operator= (const vector& other)
1638
{
1639
if (this == &other)
1640
return *this;
1641
1642
if (m_capacity >= other.m_size)
1643
resize(0);
1644
else
1645
{
1646
clear();
1647
increase_capacity(other.m_size, false);
1648
}
1649
1650
if (BASISU_IS_BITWISE_COPYABLE(T))
1651
{
1652
#ifndef __EMSCRIPTEN__
1653
#ifdef __GNUC__
1654
#pragma GCC diagnostic push
1655
#pragma GCC diagnostic ignored "-Wclass-memaccess"
1656
#endif
1657
#endif
1658
if ((m_p) && (other.m_p))
1659
memcpy(m_p, other.m_p, other.m_size * sizeof(T));
1660
#ifndef __EMSCRIPTEN__
1661
#ifdef __GNUC__
1662
#pragma GCC diagnostic pop
1663
#endif
1664
#endif
1665
}
1666
else
1667
{
1668
T* pDst = m_p;
1669
const T* pSrc = other.m_p;
1670
for (size_t i = other.m_size; i > 0; i--)
1671
construct(pDst++, *pSrc++);
1672
}
1673
1674
m_size = other.m_size;
1675
1676
return *this;
1677
}
1678
1679
inline vector& operator= (vector&& rhs)
1680
{
1681
if (this != &rhs)
1682
{
1683
clear();
1684
1685
m_p = rhs.m_p;
1686
m_size = rhs.m_size;
1687
m_capacity = rhs.m_capacity;
1688
1689
rhs.m_p = nullptr;
1690
rhs.m_size = 0;
1691
rhs.m_capacity = 0;
1692
}
1693
return *this;
1694
}
1695
1696
BASISU_FORCE_INLINE const T* begin() const { return m_p; }
1697
BASISU_FORCE_INLINE T* begin() { return m_p; }
1698
1699
BASISU_FORCE_INLINE const T* end() const { return m_p + m_size; }
1700
BASISU_FORCE_INLINE T* end() { return m_p + m_size; }
1701
1702
BASISU_FORCE_INLINE bool empty() const { return !m_size; }
1703
1704
BASISU_FORCE_INLINE size_t size() const { return m_size; }
1705
BASISU_FORCE_INLINE uint32_t size_u32() const { assert(m_size <= UINT32_MAX); return static_cast<uint32_t>(m_size); }
1706
1707
BASISU_FORCE_INLINE size_t size_in_bytes() const { return m_size * sizeof(T); }
1708
BASISU_FORCE_INLINE uint32_t size_in_bytes_u32() const { assert((m_size * sizeof(T)) <= UINT32_MAX); return static_cast<uint32_t>(m_size * sizeof(T)); }
1709
1710
BASISU_FORCE_INLINE size_t capacity() const { return m_capacity; }
1711
1712
#if !BASISU_VECTOR_FORCE_CHECKING
1713
BASISU_FORCE_INLINE const T& operator[] (size_t i) const { assert(i < m_size); return m_p[i]; }
1714
BASISU_FORCE_INLINE T& operator[] (size_t i) { assert(i < m_size); return m_p[i]; }
1715
#else
1716
BASISU_FORCE_INLINE const T& operator[] (size_t i) const
1717
{
1718
if (i >= m_size)
1719
container_abort("vector::operator[] invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1720
1721
return m_p[i];
1722
}
1723
BASISU_FORCE_INLINE T& operator[] (size_t i)
1724
{
1725
if (i >= m_size)
1726
container_abort("vector::operator[] invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1727
1728
return m_p[i];
1729
}
1730
#endif
1731
1732
// at() always includes range checking, even in final builds, unlike operator [].
1733
BASISU_FORCE_INLINE const T& at(size_t i) const
1734
{
1735
if (i >= m_size)
1736
container_abort("vector::at() invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1737
1738
return m_p[i];
1739
}
1740
BASISU_FORCE_INLINE T& at(size_t i)
1741
{
1742
if (i >= m_size)
1743
container_abort("vector::at() invalid index: %zu, max entries %u, type size %zu\n", i, m_size, sizeof(T));
1744
1745
return m_p[i];
1746
}
1747
1748
#if !BASISU_VECTOR_FORCE_CHECKING
1749
BASISU_FORCE_INLINE const T& front() const { assert(m_size); return m_p[0]; }
1750
BASISU_FORCE_INLINE T& front() { assert(m_size); return m_p[0]; }
1751
1752
BASISU_FORCE_INLINE const T& back() const { assert(m_size); return m_p[m_size - 1]; }
1753
BASISU_FORCE_INLINE T& back() { assert(m_size); return m_p[m_size - 1]; }
1754
#else
1755
BASISU_FORCE_INLINE const T& front() const
1756
{
1757
if (!m_size)
1758
container_abort("front: vector is empty, type size %zu\n", sizeof(T));
1759
1760
return m_p[0];
1761
}
1762
BASISU_FORCE_INLINE T& front()
1763
{
1764
if (!m_size)
1765
container_abort("front: vector is empty, type size %zu\n", sizeof(T));
1766
1767
return m_p[0];
1768
}
1769
1770
BASISU_FORCE_INLINE const T& back() const
1771
{
1772
if (!m_size)
1773
container_abort("back: vector is empty, type size %zu\n", sizeof(T));
1774
1775
return m_p[m_size - 1];
1776
}
1777
BASISU_FORCE_INLINE T& back()
1778
{
1779
if (!m_size)
1780
container_abort("back: vector is empty, type size %zu\n", sizeof(T));
1781
1782
return m_p[m_size - 1];
1783
}
1784
#endif
1785
1786
BASISU_FORCE_INLINE const T* get_ptr() const { return m_p; }
1787
BASISU_FORCE_INLINE T* get_ptr() { return m_p; }
1788
1789
BASISU_FORCE_INLINE const T* data() const { return m_p; }
1790
BASISU_FORCE_INLINE T* data() { return m_p; }
1791
1792
// clear() sets the container to empty, then frees the allocated block.
1793
inline void clear()
1794
{
1795
if (m_p)
1796
{
1797
if (BASISU_HAS_DESTRUCTOR(T))
1798
{
1799
scalar_type<T>::destruct_array(m_p, m_size);
1800
}
1801
1802
free(m_p);
1803
1804
m_p = nullptr;
1805
m_size = 0;
1806
m_capacity = 0;
1807
}
1808
}
1809
1810
inline void clear_no_destruction()
1811
{
1812
if (m_p)
1813
{
1814
free(m_p);
1815
m_p = nullptr;
1816
m_size = 0;
1817
m_capacity = 0;
1818
}
1819
}
1820
1821
inline void reserve(size_t new_capacity)
1822
{
1823
if (!try_reserve(new_capacity))
1824
container_abort("vector:reserve: try_reserve failed!\n");
1825
}
1826
1827
inline bool try_reserve(size_t new_capacity)
1828
{
1829
if (new_capacity > m_capacity)
1830
{
1831
if (!increase_capacity(new_capacity, false, true))
1832
return false;
1833
}
1834
else if (new_capacity < m_capacity)
1835
{
1836
// Must work around the lack of a "decrease_capacity()" method.
1837
// This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
1838
vector tmp;
1839
if (!tmp.increase_capacity(helpers::maximum(m_size, new_capacity), false, true))
1840
return false;
1841
1842
tmp = *this;
1843
swap(tmp);
1844
}
1845
1846
return true;
1847
}
1848
1849
// try_resize(0) sets the container to empty, but does not free the allocated block.
1850
inline bool try_resize(size_t new_size, bool grow_hint = false)
1851
{
1852
if (m_size != new_size)
1853
{
1854
if (new_size < m_size)
1855
{
1856
if (BASISU_HAS_DESTRUCTOR(T))
1857
{
1858
scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
1859
}
1860
}
1861
else
1862
{
1863
if (new_size > m_capacity)
1864
{
1865
if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
1866
return false;
1867
}
1868
1869
scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
1870
}
1871
1872
m_size = new_size;
1873
}
1874
1875
return true;
1876
}
1877
1878
// resize(0) sets the container to empty, but does not free the allocated block.
1879
inline void resize(size_t new_size, bool grow_hint = false)
1880
{
1881
if (!try_resize(new_size, grow_hint))
1882
container_abort("vector::resize failed, new size %zu\n", new_size);
1883
}
1884
1885
// If size >= capacity/2, reset() sets the container's size to 0 but doesn't free the allocated block (because the container may be similarly loaded in the future).
1886
// Otherwise it blows away the allocated block. See http://www.codercorner.com/blog/?p=494
1887
inline void reset()
1888
{
1889
if (m_size >= (m_capacity >> 1))
1890
resize(0);
1891
else
1892
clear();
1893
}
1894
1895
inline T* try_enlarge(size_t i)
1896
{
1897
size_t cur_size = m_size;
1898
1899
if (add_overflow_check(cur_size, i))
1900
return nullptr;
1901
1902
if (!try_resize(cur_size + i, true))
1903
return nullptr;
1904
1905
return get_ptr() + cur_size;
1906
}
1907
1908
inline T* enlarge(size_t i)
1909
{
1910
T* p = try_enlarge(i);
1911
if (!p)
1912
container_abort("vector::enlarge failed, amount %zu!\n", i);
1913
return p;
1914
}
1915
1916
BASISU_FORCE_INLINE void push_back(const T& obj)
1917
{
1918
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1919
1920
if (m_size >= m_capacity)
1921
{
1922
if (add_overflow_check(m_size, 1))
1923
container_abort("vector::push_back: vector too large\n");
1924
1925
increase_capacity(m_size + 1, true);
1926
}
1927
1928
scalar_type<T>::construct(m_p + m_size, obj);
1929
m_size++;
1930
}
1931
1932
BASISU_FORCE_INLINE void push_back_value(T&& obj)
1933
{
1934
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1935
1936
if (m_size >= m_capacity)
1937
{
1938
if (add_overflow_check(m_size, 1))
1939
container_abort("vector::push_back_value: vector too large\n");
1940
1941
increase_capacity(m_size + 1, true);
1942
}
1943
1944
new ((void*)(m_p + m_size)) T(std::move(obj));
1945
m_size++;
1946
}
1947
1948
inline bool try_push_back(const T& obj)
1949
{
1950
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1951
1952
if (m_size >= m_capacity)
1953
{
1954
if (add_overflow_check(m_size, 1))
1955
return false;
1956
1957
if (!increase_capacity(m_size + 1, true, true))
1958
return false;
1959
}
1960
1961
scalar_type<T>::construct(m_p + m_size, obj);
1962
m_size++;
1963
1964
return true;
1965
}
1966
1967
inline bool try_push_back(T&& obj)
1968
{
1969
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
1970
1971
if (m_size >= m_capacity)
1972
{
1973
if (add_overflow_check(m_size, 1))
1974
return false;
1975
1976
if (!increase_capacity(m_size + 1, true, true))
1977
return false;
1978
}
1979
1980
new ((void*)(m_p + m_size)) T(std::move(obj));
1981
m_size++;
1982
1983
return true;
1984
}
1985
1986
// obj is explictly passed in by value, not ref
1987
inline void push_back_value(T obj)
1988
{
1989
if (m_size >= m_capacity)
1990
{
1991
if (add_overflow_check(m_size, 1))
1992
container_abort("vector::push_back_value: vector too large\n");
1993
1994
increase_capacity(m_size + 1, true);
1995
}
1996
1997
scalar_type<T>::construct(m_p + m_size, obj);
1998
m_size++;
1999
}
2000
2001
// obj is explictly passed in by value, not ref
2002
inline bool try_push_back_value(T obj)
2003
{
2004
if (m_size >= m_capacity)
2005
{
2006
if (add_overflow_check(m_size, 1))
2007
return false;
2008
2009
if (!increase_capacity(m_size + 1, true, true))
2010
return false;
2011
}
2012
2013
scalar_type<T>::construct(m_p + m_size, obj);
2014
m_size++;
2015
2016
return true;
2017
}
2018
2019
template<typename... Args>
2020
BASISU_FORCE_INLINE void emplace_back(Args&&... args)
2021
{
2022
if (m_size >= m_capacity)
2023
{
2024
if (add_overflow_check(m_size, 1))
2025
container_abort("vector::enlarge: vector too large\n");
2026
2027
increase_capacity(m_size + 1, true);
2028
}
2029
2030
new ((void*)(m_p + m_size)) T(std::forward<Args>(args)...); // perfect forwarding
2031
m_size++;
2032
}
2033
2034
template<typename... Args>
2035
BASISU_FORCE_INLINE bool try_emplace_back(Args&&... args)
2036
{
2037
if (m_size >= m_capacity)
2038
{
2039
if (add_overflow_check(m_size, 1))
2040
return false;
2041
2042
if (!increase_capacity(m_size + 1, true, true))
2043
return false;
2044
}
2045
2046
new ((void*)(m_p + m_size)) T(std::forward<Args>(args)...); // perfect forwarding
2047
m_size++;
2048
2049
return true;
2050
}
2051
2052
inline void pop_back()
2053
{
2054
assert(m_size);
2055
2056
if (m_size)
2057
{
2058
m_size--;
2059
scalar_type<T>::destruct(&m_p[m_size]);
2060
}
2061
}
2062
2063
inline bool try_insert(size_t index, const T* p, size_t n)
2064
{
2065
assert(index <= m_size);
2066
2067
if (index > m_size)
2068
return false;
2069
2070
if (!n)
2071
return true;
2072
2073
const size_t orig_size = m_size;
2074
2075
if (add_overflow_check(m_size, n))
2076
return false;
2077
2078
if (!try_resize(m_size + n, true))
2079
return false;
2080
2081
const size_t num_to_move = orig_size - index;
2082
2083
if (BASISU_IS_BITWISE_COPYABLE(T))
2084
{
2085
// This overwrites the destination object bits, but bitwise copyable means we don't need to worry about destruction.
2086
memmove(m_p + index + n, m_p + index, sizeof(T) * num_to_move);
2087
}
2088
else
2089
{
2090
const T* pSrc = m_p + orig_size - 1;
2091
T* pDst = const_cast<T*>(pSrc) + n;
2092
2093
for (size_t i = 0; i < num_to_move; i++)
2094
{
2095
assert((uint64_t)(pDst - m_p) < (uint64_t)m_size);
2096
2097
*pDst = std::move(*pSrc);
2098
pDst--;
2099
pSrc--;
2100
}
2101
}
2102
2103
T* pDst = m_p + index;
2104
2105
if (BASISU_IS_BITWISE_COPYABLE(T))
2106
{
2107
// This copies in the new bits, overwriting the existing objects, which is OK for copyable types that don't need destruction.
2108
memcpy(pDst, p, sizeof(T) * n);
2109
}
2110
else
2111
{
2112
for (size_t i = 0; i < n; i++)
2113
{
2114
assert((uint64_t)(pDst - m_p) < (uint64_t)m_size);
2115
*pDst++ = *p++;
2116
}
2117
}
2118
2119
return true;
2120
}
2121
2122
inline void insert(size_t index, const T* p, size_t n)
2123
{
2124
if (!try_insert(index, p, n))
2125
container_abort("vector::insert() failed!\n");
2126
}
2127
2128
inline bool try_insert(T* p, const T& obj)
2129
{
2130
if (p < begin())
2131
{
2132
assert(0);
2133
return false;
2134
}
2135
2136
uint64_t ofs = p - begin();
2137
2138
if (ofs > m_size)
2139
{
2140
assert(0);
2141
return false;
2142
}
2143
2144
if ((size_t)ofs != ofs)
2145
{
2146
assert(0);
2147
return false;
2148
}
2149
2150
return try_insert((size_t)ofs, &obj, 1);
2151
}
2152
2153
inline void insert(T* p, const T& obj)
2154
{
2155
if (!try_insert(p, obj))
2156
container_abort("vector::insert() failed!\n");
2157
}
2158
2159
// push_front() isn't going to be very fast - it's only here for usability.
2160
inline void push_front(const T& obj)
2161
{
2162
insert(0, &obj, 1);
2163
}
2164
2165
inline bool try_push_front(const T& obj)
2166
{
2167
return try_insert(0, &obj, 1);
2168
}
2169
2170
vector& append(const vector& other)
2171
{
2172
if (other.m_size)
2173
insert(m_size, &other[0], other.m_size);
2174
return *this;
2175
}
2176
2177
bool try_append(const vector& other)
2178
{
2179
if (other.m_size)
2180
return try_insert(m_size, &other[0], other.m_size);
2181
2182
return true;
2183
}
2184
2185
vector& append(const T* p, size_t n)
2186
{
2187
if (n)
2188
insert(m_size, p, n);
2189
return *this;
2190
}
2191
2192
bool try_append(const T* p, size_t n)
2193
{
2194
if (n)
2195
return try_insert(m_size, p, n);
2196
2197
return true;
2198
}
2199
2200
inline bool erase(size_t start, size_t n)
2201
{
2202
if (add_overflow_check(start, n))
2203
{
2204
assert(0);
2205
return false;
2206
}
2207
2208
assert((start + n) <= m_size);
2209
2210
if ((start + n) > m_size)
2211
{
2212
assert(0);
2213
return false;
2214
}
2215
2216
if (!n)
2217
return true;
2218
2219
const size_t num_to_move = m_size - (start + n);
2220
2221
T* pDst = m_p + start;
2222
2223
const T* pSrc = m_p + start + n;
2224
2225
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T))
2226
{
2227
// This test is overly cautious.
2228
if ((!BASISU_IS_BITWISE_COPYABLE(T)) || (BASISU_HAS_DESTRUCTOR(T)))
2229
{
2230
// Type has been marked explictly as bitwise movable, which means we can move them around but they may need to be destructed.
2231
// First destroy the erased objects.
2232
scalar_type<T>::destruct_array(pDst, n);
2233
}
2234
2235
// Copy "down" the objects to preserve, filling in the empty slots.
2236
2237
#ifndef __EMSCRIPTEN__
2238
#ifdef __GNUC__
2239
#pragma GCC diagnostic push
2240
#pragma GCC diagnostic ignored "-Wclass-memaccess"
2241
#endif
2242
#endif
2243
2244
memmove(pDst, pSrc, num_to_move * sizeof(T));
2245
2246
#ifndef __EMSCRIPTEN__
2247
#ifdef __GNUC__
2248
#pragma GCC diagnostic pop
2249
#endif
2250
#endif
2251
}
2252
else
2253
{
2254
// Type is not bitwise copyable or movable.
2255
// Move them down one at a time by using the equals operator, and destroying anything that's left over at the end.
2256
T* pDst_end = pDst + num_to_move;
2257
2258
while (pDst != pDst_end)
2259
{
2260
*pDst = std::move(*pSrc);
2261
2262
++pDst;
2263
++pSrc;
2264
}
2265
2266
scalar_type<T>::destruct_array(pDst_end, n);
2267
}
2268
2269
m_size -= n;
2270
2271
return true;
2272
}
2273
2274
inline bool erase_index(size_t index)
2275
{
2276
return erase(index, 1);
2277
}
2278
2279
inline bool erase(T* p)
2280
{
2281
assert((p >= m_p) && (p < (m_p + m_size)));
2282
2283
if (p < m_p)
2284
return false;
2285
2286
return erase_index(static_cast<size_t>(p - m_p));
2287
}
2288
2289
inline bool erase(T* pFirst, T* pEnd)
2290
{
2291
assert(pFirst <= pEnd);
2292
assert(pFirst >= begin() && pFirst <= end());
2293
assert(pEnd >= begin() && pEnd <= end());
2294
2295
if ((pFirst < begin()) || (pEnd < pFirst))
2296
{
2297
assert(0);
2298
return false;
2299
}
2300
2301
uint64_t ofs = pFirst - begin();
2302
if ((size_t)ofs != ofs)
2303
{
2304
assert(0);
2305
return false;
2306
}
2307
2308
uint64_t n = pEnd - pFirst;
2309
if ((size_t)n != n)
2310
{
2311
assert(0);
2312
return false;
2313
}
2314
2315
return erase((size_t)ofs, (size_t)n);
2316
}
2317
2318
bool erase_unordered(size_t index)
2319
{
2320
if (index >= m_size)
2321
{
2322
assert(0);
2323
return false;
2324
}
2325
2326
if ((index + 1) < m_size)
2327
{
2328
(*this)[index] = std::move(back());
2329
}
2330
2331
pop_back();
2332
return true;
2333
}
2334
2335
inline bool operator== (const vector& rhs) const
2336
{
2337
if (m_size != rhs.m_size)
2338
return false;
2339
else if (m_size)
2340
{
2341
if (scalar_type<T>::cFlag)
2342
return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
2343
else
2344
{
2345
const T* pSrc = m_p;
2346
const T* pDst = rhs.m_p;
2347
for (size_t i = m_size; i; i--)
2348
if (!(*pSrc++ == *pDst++))
2349
return false;
2350
}
2351
}
2352
2353
return true;
2354
}
2355
2356
inline bool operator< (const vector& rhs) const
2357
{
2358
const size_t min_size = helpers::minimum(m_size, rhs.m_size);
2359
2360
const T* pSrc = m_p;
2361
const T* pSrc_end = m_p + min_size;
2362
const T* pDst = rhs.m_p;
2363
2364
while ((pSrc < pSrc_end) && (*pSrc == *pDst))
2365
{
2366
pSrc++;
2367
pDst++;
2368
}
2369
2370
if (pSrc < pSrc_end)
2371
return *pSrc < *pDst;
2372
2373
return m_size < rhs.m_size;
2374
}
2375
2376
inline void swap(vector& other)
2377
{
2378
std::swap(m_p, other.m_p);
2379
std::swap(m_size, other.m_size);
2380
std::swap(m_capacity, other.m_capacity);
2381
}
2382
2383
inline void sort()
2384
{
2385
std::sort(begin(), end());
2386
}
2387
2388
inline void unique()
2389
{
2390
if (!empty())
2391
{
2392
sort();
2393
2394
resize(std::unique(begin(), end()) - begin());
2395
}
2396
}
2397
2398
inline void reverse()
2399
{
2400
const size_t j = m_size >> 1;
2401
2402
for (size_t i = 0; i < j; i++)
2403
std::swap(m_p[i], m_p[m_size - 1 - i]);
2404
}
2405
2406
inline bool find(const T& key, size_t &idx) const
2407
{
2408
idx = 0;
2409
2410
const T* p = m_p;
2411
const T* p_end = m_p + m_size;
2412
2413
size_t index = 0;
2414
2415
while (p != p_end)
2416
{
2417
if (key == *p)
2418
{
2419
idx = index;
2420
return true;
2421
}
2422
2423
p++;
2424
index++;
2425
}
2426
2427
return false;
2428
}
2429
2430
inline bool find_sorted(const T& key, size_t& idx) const
2431
{
2432
idx = 0;
2433
2434
if (!m_size)
2435
return false;
2436
2437
// Inclusive range
2438
size_t low = 0, high = m_size - 1;
2439
2440
while (low <= high)
2441
{
2442
size_t mid = (size_t)(((uint64_t)low + (uint64_t)high) >> 1);
2443
2444
const T* pTrial_key = m_p + mid;
2445
2446
// Sanity check comparison operator
2447
assert(!((*pTrial_key < key) && (key < *pTrial_key)));
2448
2449
if (*pTrial_key < key)
2450
{
2451
if (add_overflow_check(mid, 1))
2452
break;
2453
2454
low = mid + 1;
2455
}
2456
else if (key < *pTrial_key)
2457
{
2458
if (!mid)
2459
break;
2460
2461
high = mid - 1;
2462
}
2463
else
2464
{
2465
idx = mid;
2466
return true;
2467
}
2468
}
2469
2470
return false;
2471
}
2472
2473
inline size_t count_occurences(const T& key) const
2474
{
2475
size_t c = 0;
2476
2477
const T* p = m_p;
2478
const T* p_end = m_p + m_size;
2479
2480
while (p != p_end)
2481
{
2482
if (key == *p)
2483
c++;
2484
2485
p++;
2486
}
2487
2488
return c;
2489
}
2490
2491
inline void set_all(const T& o)
2492
{
2493
if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
2494
{
2495
#ifndef __EMSCRIPTEN__
2496
#ifdef __GNUC__
2497
#pragma GCC diagnostic push
2498
#pragma GCC diagnostic ignored "-Wclass-memaccess"
2499
#endif
2500
#endif
2501
memset(m_p, *reinterpret_cast<const uint8_t*>(&o), m_size);
2502
2503
#ifndef __EMSCRIPTEN__
2504
#ifdef __GNUC__
2505
#pragma GCC diagnostic pop
2506
#endif
2507
#endif
2508
}
2509
else
2510
{
2511
T* pDst = m_p;
2512
T* pDst_end = pDst + m_size;
2513
while (pDst != pDst_end)
2514
*pDst++ = o;
2515
}
2516
}
2517
2518
// Caller assumes ownership of the heap block associated with the container. Container is cleared.
2519
// Caller must use free() on the returned pointer.
2520
inline void* assume_ownership()
2521
{
2522
T* p = m_p;
2523
m_p = nullptr;
2524
m_size = 0;
2525
m_capacity = 0;
2526
return p;
2527
}
2528
2529
// Caller is granting ownership of the indicated heap block.
2530
// Block must have size constructed elements, and have enough room for capacity elements.
2531
// The block must have been allocated using malloc().
2532
// Important: This method is used in Basis Universal. If you change how this container allocates memory, you'll need to change any users of this method.
2533
inline bool grant_ownership(T* p, size_t size, size_t capacity)
2534
{
2535
// To prevent the caller from obviously shooting themselves in the foot.
2536
if (((p + capacity) > m_p) && (p < (m_p + m_capacity)))
2537
{
2538
// Can grant ownership of a block inside the container itself!
2539
assert(0);
2540
return false;
2541
}
2542
2543
if (size > capacity)
2544
{
2545
assert(0);
2546
return false;
2547
}
2548
2549
if (!p)
2550
{
2551
if (capacity)
2552
{
2553
assert(0);
2554
return false;
2555
}
2556
}
2557
else if (!capacity)
2558
{
2559
assert(0);
2560
return false;
2561
}
2562
2563
clear();
2564
m_p = p;
2565
m_size = size;
2566
m_capacity = capacity;
2567
return true;
2568
}
2569
2570
readable_span<T> get_readable_span() const
2571
{
2572
return readable_span<T>(m_p, m_size);
2573
}
2574
2575
writable_span<T> get_writable_span()
2576
{
2577
return writable_span<T>(m_p, m_size);
2578
}
2579
2580
private:
2581
T* m_p;
2582
size_t m_size; // the number of constructed objects
2583
size_t m_capacity; // the size of the allocation
2584
2585
template<typename Q> struct is_vector { enum { cFlag = false }; };
2586
template<typename Q> struct is_vector< vector<Q> > { enum { cFlag = true }; };
2587
2588
static void object_mover(void* pDst_void, void* pSrc_void, size_t num)
2589
{
2590
T* pSrc = static_cast<T*>(pSrc_void);
2591
T* const pSrc_end = pSrc + num;
2592
T* pDst = static_cast<T*>(pDst_void);
2593
2594
while (pSrc != pSrc_end)
2595
{
2596
new ((void*)(pDst)) T(std::move(*pSrc));
2597
scalar_type<T>::destruct(pSrc);
2598
2599
++pSrc;
2600
++pDst;
2601
}
2602
}
2603
2604
inline bool increase_capacity(size_t min_new_capacity, bool grow_hint, bool nofail = false)
2605
{
2606
return reinterpret_cast<elemental_vector*>(this)->increase_capacity(
2607
min_new_capacity, grow_hint, sizeof(T),
2608
(BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) || (is_vector<T>::cFlag)) ? nullptr : object_mover, nofail);
2609
}
2610
};
2611
2612
template<typename T> struct bitwise_movable< vector<T> > { enum { cFlag = true }; };
2613
2614
// Hash map
2615
// rg TODO 9/8/2024: I've upgraded this class to support 64-bit size_t, and it needs a lot more testing.
2616
2617
const uint32_t SIZE_T_BITS = sizeof(size_t) * 8U;
2618
2619
inline uint32_t safe_shift_left(uint32_t v, uint32_t l)
2620
{
2621
return (l < 32U) ? (v << l) : 0;
2622
}
2623
2624
inline uint64_t safe_shift_left(uint64_t v, uint32_t l)
2625
{
2626
return (l < 64U) ? (v << l) : 0;
2627
}
2628
2629
template <typename T>
2630
struct hasher
2631
{
2632
inline size_t operator() (const T& key) const { return static_cast<size_t>(key); }
2633
};
2634
2635
template <typename T>
2636
struct equal_to
2637
{
2638
inline bool operator()(const T& a, const T& b) const { return a == b; }
2639
};
2640
2641
// Important: The Hasher and Equals objects must be bitwise movable!
2642
template<typename Key, typename Value = empty_type, typename Hasher = hasher<Key>, typename Equals = equal_to<Key> >
2643
class hash_map
2644
{
2645
public:
2646
class iterator;
2647
class const_iterator;
2648
2649
private:
2650
friend class iterator;
2651
friend class const_iterator;
2652
2653
enum state
2654
{
2655
cStateInvalid = 0,
2656
cStateValid = 1
2657
};
2658
2659
enum
2660
{
2661
cMinHashSize = 4U
2662
};
2663
2664
public:
2665
typedef hash_map<Key, Value, Hasher, Equals> hash_map_type;
2666
typedef std::pair<Key, Value> value_type;
2667
typedef Key key_type;
2668
typedef Value referent_type;
2669
typedef Hasher hasher_type;
2670
typedef Equals equals_type;
2671
2672
hash_map() :
2673
m_num_valid(0),
2674
m_grow_threshold(0),
2675
m_hash_shift(SIZE_T_BITS)
2676
{
2677
static_assert((SIZE_T_BITS == 32) || (SIZE_T_BITS == 64), "SIZE_T_BITS must be 32 or 64");
2678
}
2679
2680
hash_map(const hash_map& other) :
2681
m_values(other.m_values),
2682
m_num_valid(other.m_num_valid),
2683
m_grow_threshold(other.m_grow_threshold),
2684
m_hash_shift(other.m_hash_shift),
2685
m_hasher(other.m_hasher),
2686
m_equals(other.m_equals)
2687
{
2688
static_assert((SIZE_T_BITS == 32) || (SIZE_T_BITS == 64), "SIZE_T_BITS must be 32 or 64");
2689
}
2690
2691
hash_map(hash_map&& other) :
2692
m_values(std::move(other.m_values)),
2693
m_num_valid(other.m_num_valid),
2694
m_grow_threshold(other.m_grow_threshold),
2695
m_hash_shift(other.m_hash_shift),
2696
m_hasher(std::move(other.m_hasher)),
2697
m_equals(std::move(other.m_equals))
2698
{
2699
static_assert((SIZE_T_BITS == 32) || (SIZE_T_BITS == 64), "SIZE_T_BITS must be 32 or 64");
2700
2701
other.m_hash_shift = SIZE_T_BITS;
2702
other.m_num_valid = 0;
2703
other.m_grow_threshold = 0;
2704
}
2705
2706
hash_map& operator= (const hash_map& other)
2707
{
2708
if (this == &other)
2709
return *this;
2710
2711
clear();
2712
2713
m_values = other.m_values;
2714
m_hash_shift = other.m_hash_shift;
2715
m_num_valid = other.m_num_valid;
2716
m_grow_threshold = other.m_grow_threshold;
2717
m_hasher = other.m_hasher;
2718
m_equals = other.m_equals;
2719
2720
return *this;
2721
}
2722
2723
hash_map& operator= (hash_map&& other)
2724
{
2725
if (this == &other)
2726
return *this;
2727
2728
clear();
2729
2730
m_values = std::move(other.m_values);
2731
m_hash_shift = other.m_hash_shift;
2732
m_num_valid = other.m_num_valid;
2733
m_grow_threshold = other.m_grow_threshold;
2734
m_hasher = std::move(other.m_hasher);
2735
m_equals = std::move(other.m_equals);
2736
2737
other.m_hash_shift = SIZE_T_BITS;
2738
other.m_num_valid = 0;
2739
other.m_grow_threshold = 0;
2740
2741
return *this;
2742
}
2743
2744
inline ~hash_map()
2745
{
2746
clear();
2747
}
2748
2749
inline const Equals& get_equals() const { return m_equals; }
2750
inline Equals& get_equals() { return m_equals; }
2751
inline void set_equals(const Equals& equals) { m_equals = equals; }
2752
2753
inline const Hasher& get_hasher() const { return m_hasher; }
2754
inline Hasher& get_hasher() { return m_hasher; }
2755
inline void set_hasher(const Hasher& hasher) { m_hasher = hasher; }
2756
2757
inline void clear()
2758
{
2759
if (m_values.empty())
2760
return;
2761
2762
if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
2763
{
2764
node* p = &get_node(0);
2765
node* p_end = p + m_values.size();
2766
2767
size_t num_remaining = m_num_valid;
2768
while (p != p_end)
2769
{
2770
if (p->state)
2771
{
2772
destruct_value_type(p);
2773
num_remaining--;
2774
if (!num_remaining)
2775
break;
2776
}
2777
2778
p++;
2779
}
2780
}
2781
2782
m_values.clear_no_destruction();
2783
2784
m_hash_shift = SIZE_T_BITS;
2785
m_num_valid = 0;
2786
m_grow_threshold = 0;
2787
}
2788
2789
inline void reset()
2790
{
2791
if (!m_num_valid)
2792
return;
2793
2794
if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
2795
{
2796
node* p = &get_node(0);
2797
node* p_end = p + m_values.size();
2798
2799
size_t num_remaining = m_num_valid;
2800
while (p != p_end)
2801
{
2802
if (p->state)
2803
{
2804
destruct_value_type(p);
2805
p->state = cStateInvalid;
2806
2807
num_remaining--;
2808
if (!num_remaining)
2809
break;
2810
}
2811
2812
p++;
2813
}
2814
}
2815
else if (sizeof(node) <= 16)
2816
{
2817
memset(&m_values[0], 0, m_values.size_in_bytes());
2818
}
2819
else
2820
{
2821
node* p = &get_node(0);
2822
node* p_end = p + m_values.size();
2823
2824
size_t num_remaining = m_num_valid;
2825
while (p != p_end)
2826
{
2827
if (p->state)
2828
{
2829
p->state = cStateInvalid;
2830
2831
num_remaining--;
2832
if (!num_remaining)
2833
break;
2834
}
2835
2836
p++;
2837
}
2838
}
2839
2840
m_num_valid = 0;
2841
}
2842
2843
inline size_t size()
2844
{
2845
return m_num_valid;
2846
}
2847
2848
inline size_t get_table_size()
2849
{
2850
return m_values.size();
2851
}
2852
2853
inline bool empty()
2854
{
2855
return !m_num_valid;
2856
}
2857
2858
inline bool reserve(size_t new_capacity)
2859
{
2860
if (!new_capacity)
2861
return true;
2862
2863
uint64_t new_hash_size = new_capacity;
2864
2865
new_hash_size = new_hash_size * 2ULL;
2866
2867
if (!helpers::is_power_of_2(new_hash_size))
2868
new_hash_size = helpers::next_pow2(new_hash_size);
2869
2870
new_hash_size = helpers::maximum<uint64_t>(cMinHashSize, new_hash_size);
2871
2872
if (!can_fit_into_size_t(new_hash_size))
2873
{
2874
assert(0);
2875
return false;
2876
}
2877
2878
assert(new_hash_size >= new_capacity);
2879
2880
if (new_hash_size <= m_values.size())
2881
return true;
2882
2883
return rehash((size_t)new_hash_size);
2884
}
2885
2886
class iterator
2887
{
2888
friend class hash_map<Key, Value, Hasher, Equals>;
2889
friend class hash_map<Key, Value, Hasher, Equals>::const_iterator;
2890
2891
public:
2892
inline iterator() : m_pTable(nullptr), m_index(0) { }
2893
inline iterator(hash_map_type& table, size_t index) : m_pTable(&table), m_index(index) { }
2894
inline iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
2895
2896
inline iterator& operator= (const iterator& other)
2897
{
2898
m_pTable = other.m_pTable;
2899
m_index = other.m_index;
2900
return *this;
2901
}
2902
2903
// post-increment
2904
inline iterator operator++(int)
2905
{
2906
iterator result(*this);
2907
++*this;
2908
return result;
2909
}
2910
2911
// pre-increment
2912
inline iterator& operator++()
2913
{
2914
probe();
2915
return *this;
2916
}
2917
2918
inline value_type& operator*() const { return *get_cur(); }
2919
inline value_type* operator->() const { return get_cur(); }
2920
2921
inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2922
inline bool operator != (const iterator& b) const { return !(*this == b); }
2923
inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2924
inline bool operator != (const const_iterator& b) const { return !(*this == b); }
2925
2926
private:
2927
hash_map_type* m_pTable;
2928
size_t m_index;
2929
2930
inline value_type* get_cur() const
2931
{
2932
assert(m_pTable && (m_index < m_pTable->m_values.size()));
2933
assert(m_pTable->get_node_state(m_index) == cStateValid);
2934
2935
return &m_pTable->get_node(m_index);
2936
}
2937
2938
inline void probe()
2939
{
2940
assert(m_pTable);
2941
m_index = m_pTable->find_next(m_index);
2942
}
2943
};
2944
2945
class const_iterator
2946
{
2947
friend class hash_map<Key, Value, Hasher, Equals>;
2948
friend class hash_map<Key, Value, Hasher, Equals>::iterator;
2949
2950
public:
2951
inline const_iterator() : m_pTable(nullptr), m_index(0) { }
2952
inline const_iterator(const hash_map_type& table, size_t index) : m_pTable(&table), m_index(index) { }
2953
inline const_iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
2954
inline const_iterator(const const_iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
2955
2956
inline const_iterator& operator= (const const_iterator& other)
2957
{
2958
m_pTable = other.m_pTable;
2959
m_index = other.m_index;
2960
return *this;
2961
}
2962
2963
inline const_iterator& operator= (const iterator& other)
2964
{
2965
m_pTable = other.m_pTable;
2966
m_index = other.m_index;
2967
return *this;
2968
}
2969
2970
// post-increment
2971
inline const_iterator operator++(int)
2972
{
2973
const_iterator result(*this);
2974
++*this;
2975
return result;
2976
}
2977
2978
// pre-increment
2979
inline const_iterator& operator++()
2980
{
2981
probe();
2982
return *this;
2983
}
2984
2985
inline const value_type& operator*() const { return *get_cur(); }
2986
inline const value_type* operator->() const { return get_cur(); }
2987
2988
inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2989
inline bool operator != (const const_iterator& b) const { return !(*this == b); }
2990
inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
2991
inline bool operator != (const iterator& b) const { return !(*this == b); }
2992
2993
private:
2994
const hash_map_type* m_pTable;
2995
size_t m_index;
2996
2997
inline const value_type* get_cur() const
2998
{
2999
assert(m_pTable && (m_index < m_pTable->m_values.size()));
3000
assert(m_pTable->get_node_state(m_index) == cStateValid);
3001
3002
return &m_pTable->get_node(m_index);
3003
}
3004
3005
inline void probe()
3006
{
3007
assert(m_pTable);
3008
m_index = m_pTable->find_next(m_index);
3009
}
3010
};
3011
3012
inline const_iterator begin() const
3013
{
3014
if (!m_num_valid)
3015
return end();
3016
3017
return const_iterator(*this, find_next(std::numeric_limits<size_t>::max()));
3018
}
3019
3020
inline const_iterator end() const
3021
{
3022
return const_iterator(*this, m_values.size());
3023
}
3024
3025
inline iterator begin()
3026
{
3027
if (!m_num_valid)
3028
return end();
3029
3030
return iterator(*this, find_next(std::numeric_limits<size_t>::max()));
3031
}
3032
3033
inline iterator end()
3034
{
3035
return iterator(*this, m_values.size());
3036
}
3037
3038
// insert_result.first will always point to inserted key/value (or the already existing key/value).
3039
// insert_result.second will be true if a new key/value was inserted, or false if the key already existed (in which case first will point to the already existing value).
3040
typedef std::pair<iterator, bool> insert_result;
3041
3042
inline insert_result insert(const Key& k, const Value& v = Value())
3043
{
3044
insert_result result;
3045
if (!insert_no_grow(result, k, v))
3046
{
3047
if (!try_grow())
3048
container_abort("hash_map::try_grow() failed");
3049
3050
// This must succeed.
3051
if (!insert_no_grow(result, k, v))
3052
container_abort("hash_map::insert() failed");
3053
}
3054
3055
return result;
3056
}
3057
3058
inline bool try_insert(insert_result& result, const Key& k, const Value& v = Value())
3059
{
3060
if (!insert_no_grow(result, k, v))
3061
{
3062
if (!try_grow())
3063
return false;
3064
3065
if (!insert_no_grow(result, k, v))
3066
return false;
3067
}
3068
3069
return true;
3070
}
3071
3072
inline insert_result insert(Key&& k, Value&& v = Value())
3073
{
3074
insert_result result;
3075
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3076
{
3077
if (!try_grow())
3078
container_abort("hash_map::try_grow() failed");
3079
3080
// This must succeed.
3081
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3082
container_abort("hash_map::insert() failed");
3083
}
3084
3085
return result;
3086
}
3087
3088
inline bool try_insert(insert_result& result, Key&& k, Value&& v = Value())
3089
{
3090
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3091
{
3092
if (!try_grow())
3093
return false;
3094
3095
if (!insert_no_grow_move(result, std::move(k), std::move(v)))
3096
return false;
3097
}
3098
3099
return true;
3100
}
3101
3102
inline insert_result insert(const value_type& v)
3103
{
3104
return insert(v.first, v.second);
3105
}
3106
3107
inline bool try_insert(insert_result& result, const value_type& v)
3108
{
3109
return try_insert(result, v.first, v.second);
3110
}
3111
3112
inline insert_result insert(value_type&& v)
3113
{
3114
return insert(std::move(v.first), std::move(v.second));
3115
}
3116
3117
inline bool try_insert(insert_result& result, value_type&& v)
3118
{
3119
return try_insert(result, std::move(v.first), std::move(v.second));
3120
}
3121
3122
inline const_iterator find(const Key& k) const
3123
{
3124
return const_iterator(*this, find_index(k));
3125
}
3126
3127
inline iterator find(const Key& k)
3128
{
3129
return iterator(*this, find_index(k));
3130
}
3131
3132
inline bool contains(const Key& k) const
3133
{
3134
const size_t idx = find_index(k);
3135
return idx != m_values.size();
3136
}
3137
3138
inline bool erase(const Key& k)
3139
{
3140
size_t i = find_index(k);
3141
3142
if (i >= m_values.size())
3143
return false;
3144
3145
node* pDst = &get_node(i);
3146
destruct_value_type(pDst);
3147
pDst->state = cStateInvalid;
3148
3149
m_num_valid--;
3150
3151
for (; ; )
3152
{
3153
size_t r, j = i;
3154
3155
node* pSrc = pDst;
3156
3157
do
3158
{
3159
if (!i)
3160
{
3161
i = m_values.size() - 1;
3162
pSrc = &get_node(i);
3163
}
3164
else
3165
{
3166
i--;
3167
pSrc--;
3168
}
3169
3170
if (!pSrc->state)
3171
return true;
3172
3173
r = hash_key(pSrc->first);
3174
3175
} while ((i <= r && r < j) || (r < j && j < i) || (j < i && i <= r));
3176
3177
move_node(pDst, pSrc);
3178
3179
pDst = pSrc;
3180
}
3181
}
3182
3183
inline void swap(hash_map_type& other)
3184
{
3185
m_values.swap(other.m_values);
3186
std::swap(m_hash_shift, other.m_hash_shift);
3187
std::swap(m_num_valid, other.m_num_valid);
3188
std::swap(m_grow_threshold, other.m_grow_threshold);
3189
std::swap(m_hasher, other.m_hasher);
3190
std::swap(m_equals, other.m_equals);
3191
}
3192
3193
private:
3194
struct node : public value_type
3195
{
3196
uint8_t state;
3197
};
3198
3199
static inline void construct_value_type(value_type* pDst, const Key& k, const Value& v)
3200
{
3201
if (BASISU_IS_BITWISE_COPYABLE(Key))
3202
memcpy(&pDst->first, &k, sizeof(Key));
3203
else
3204
scalar_type<Key>::construct(&pDst->first, k);
3205
3206
if (BASISU_IS_BITWISE_COPYABLE(Value))
3207
memcpy(&pDst->second, &v, sizeof(Value));
3208
else
3209
scalar_type<Value>::construct(&pDst->second, v);
3210
}
3211
3212
static inline void construct_value_type(value_type* pDst, const value_type* pSrc)
3213
{
3214
if ((BASISU_IS_BITWISE_COPYABLE(Key)) && (BASISU_IS_BITWISE_COPYABLE(Value)))
3215
{
3216
memcpy(pDst, pSrc, sizeof(value_type));
3217
}
3218
else
3219
{
3220
if (BASISU_IS_BITWISE_COPYABLE(Key))
3221
memcpy(&pDst->first, &pSrc->first, sizeof(Key));
3222
else
3223
scalar_type<Key>::construct(&pDst->first, pSrc->first);
3224
3225
if (BASISU_IS_BITWISE_COPYABLE(Value))
3226
memcpy(&pDst->second, &pSrc->second, sizeof(Value));
3227
else
3228
scalar_type<Value>::construct(&pDst->second, pSrc->second);
3229
}
3230
}
3231
3232
static inline void destruct_value_type(value_type* p)
3233
{
3234
scalar_type<Key>::destruct(&p->first);
3235
scalar_type<Value>::destruct(&p->second);
3236
}
3237
3238
// Moves nodes *pSrc to *pDst efficiently from one hashmap to another.
3239
// pDst should NOT be constructed on entry.
3240
static inline void move_node(node* pDst, node* pSrc, bool update_src_state = true)
3241
{
3242
assert(!pDst->state);
3243
3244
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key) && BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
3245
{
3246
memcpy(pDst, pSrc, sizeof(node));
3247
3248
assert(pDst->state == cStateValid);
3249
}
3250
else
3251
{
3252
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key))
3253
memcpy(&pDst->first, &pSrc->first, sizeof(Key));
3254
else
3255
{
3256
new ((void*)&pDst->first) Key(std::move(pSrc->first));
3257
scalar_type<Key>::destruct(&pSrc->first);
3258
}
3259
3260
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
3261
memcpy(&pDst->second, &pSrc->second, sizeof(Value));
3262
else
3263
{
3264
new ((void*)&pDst->second) Value(std::move(pSrc->second));
3265
scalar_type<Value>::destruct(&pSrc->second);
3266
}
3267
3268
pDst->state = cStateValid;
3269
}
3270
3271
if (update_src_state)
3272
pSrc->state = cStateInvalid;
3273
}
3274
3275
struct raw_node
3276
{
3277
inline raw_node()
3278
{
3279
node* p = reinterpret_cast<node*>(this);
3280
p->state = cStateInvalid;
3281
}
3282
3283
// In practice, this should never be called (right?). We manage destruction ourselves.
3284
inline ~raw_node()
3285
{
3286
node* p = reinterpret_cast<node*>(this);
3287
if (p->state)
3288
hash_map_type::destruct_value_type(p);
3289
}
3290
3291
inline raw_node(const raw_node& other)
3292
{
3293
node* pDst = reinterpret_cast<node*>(this);
3294
const node* pSrc = reinterpret_cast<const node*>(&other);
3295
3296
if (pSrc->state)
3297
{
3298
hash_map_type::construct_value_type(pDst, pSrc);
3299
pDst->state = cStateValid;
3300
}
3301
else
3302
pDst->state = cStateInvalid;
3303
}
3304
3305
inline raw_node& operator= (const raw_node& rhs)
3306
{
3307
if (this == &rhs)
3308
return *this;
3309
3310
node* pDst = reinterpret_cast<node*>(this);
3311
const node* pSrc = reinterpret_cast<const node*>(&rhs);
3312
3313
if (pSrc->state)
3314
{
3315
if (pDst->state)
3316
{
3317
pDst->first = pSrc->first;
3318
pDst->second = pSrc->second;
3319
}
3320
else
3321
{
3322
hash_map_type::construct_value_type(pDst, pSrc);
3323
pDst->state = cStateValid;
3324
}
3325
}
3326
else if (pDst->state)
3327
{
3328
hash_map_type::destruct_value_type(pDst);
3329
pDst->state = cStateInvalid;
3330
}
3331
3332
return *this;
3333
}
3334
3335
uint8_t m_bits[sizeof(node)];
3336
};
3337
3338
typedef basisu::vector<raw_node> node_vector;
3339
3340
node_vector m_values;
3341
3342
size_t m_num_valid;
3343
size_t m_grow_threshold;
3344
3345
uint32_t m_hash_shift;
3346
3347
Hasher m_hasher;
3348
Equals m_equals;
3349
3350
inline size_t hash_key(const Key& k) const
3351
{
3352
assert((safe_shift_left(static_cast<uint64_t>(1), (SIZE_T_BITS - m_hash_shift))) == m_values.size());
3353
3354
// Fibonacci hashing
3355
if (SIZE_T_BITS == 32)
3356
{
3357
assert(m_hash_shift != 32);
3358
3359
uint32_t hash = static_cast<uint32_t>(m_hasher(k));
3360
hash = (2654435769U * hash) >> m_hash_shift;
3361
3362
assert(hash < m_values.size());
3363
return (size_t)hash;
3364
}
3365
else
3366
{
3367
assert(m_hash_shift != 64);
3368
3369
uint64_t hash = static_cast<uint64_t>(m_hasher(k));
3370
hash = (0x9E3779B97F4A7C15ULL * hash) >> m_hash_shift;
3371
3372
assert(hash < m_values.size());
3373
return (size_t)hash;
3374
}
3375
}
3376
3377
inline const node& get_node(size_t index) const
3378
{
3379
return *reinterpret_cast<const node*>(&m_values[index]);
3380
}
3381
3382
inline node& get_node(size_t index)
3383
{
3384
return *reinterpret_cast<node*>(&m_values[index]);
3385
}
3386
3387
inline state get_node_state(size_t index) const
3388
{
3389
return static_cast<state>(get_node(index).state);
3390
}
3391
3392
inline void set_node_state(size_t index, bool valid)
3393
{
3394
get_node(index).state = valid;
3395
}
3396
3397
inline bool try_grow()
3398
{
3399
uint64_t n = m_values.size() * 2ULL;
3400
3401
if (!helpers::is_power_of_2(n))
3402
n = helpers::next_pow2(n);
3403
3404
if (!can_fit_into_size_t(n))
3405
{
3406
assert(0);
3407
return false;
3408
}
3409
3410
return rehash(helpers::maximum<size_t>(cMinHashSize, (size_t)n));
3411
}
3412
3413
// new_hash_size must be a power of 2.
3414
inline bool rehash(size_t new_hash_size)
3415
{
3416
if (!helpers::is_power_of_2((uint64_t)new_hash_size))
3417
{
3418
assert(0);
3419
return false;
3420
}
3421
3422
if (new_hash_size < m_num_valid)
3423
{
3424
assert(0);
3425
return false;
3426
}
3427
3428
if (new_hash_size == m_values.size())
3429
return true;
3430
3431
hash_map new_map;
3432
if (!new_map.m_values.try_resize(new_hash_size))
3433
return false;
3434
3435
new_map.m_hash_shift = SIZE_T_BITS - helpers::floor_log2i((uint64_t)new_hash_size);
3436
assert(new_hash_size == safe_shift_left(static_cast<uint64_t>(1), SIZE_T_BITS - new_map.m_hash_shift));
3437
3438
new_map.m_grow_threshold = std::numeric_limits<size_t>::max();
3439
3440
node* pNode = reinterpret_cast<node*>(m_values.begin());
3441
node* pNode_end = pNode + m_values.size();
3442
3443
while (pNode != pNode_end)
3444
{
3445
if (pNode->state)
3446
{
3447
new_map.move_into(pNode);
3448
3449
if (new_map.m_num_valid == m_num_valid)
3450
break;
3451
}
3452
3453
pNode++;
3454
}
3455
3456
new_map.m_grow_threshold = new_hash_size >> 1U;
3457
if (new_hash_size & 1)
3458
new_map.m_grow_threshold++;
3459
3460
m_values.clear_no_destruction();
3461
m_hash_shift = SIZE_T_BITS;
3462
3463
swap(new_map);
3464
3465
return true;
3466
}
3467
3468
inline size_t find_next(size_t index) const
3469
{
3470
index++;
3471
3472
if (index >= m_values.size())
3473
return index;
3474
3475
const node* pNode = &get_node(index);
3476
3477
for (; ; )
3478
{
3479
if (pNode->state)
3480
break;
3481
3482
if (++index >= m_values.size())
3483
break;
3484
3485
pNode++;
3486
}
3487
3488
return index;
3489
}
3490
3491
inline size_t find_index(const Key& k) const
3492
{
3493
if (m_num_valid)
3494
{
3495
size_t index = hash_key(k);
3496
const node* pNode = &get_node(index);
3497
3498
if (pNode->state)
3499
{
3500
if (m_equals(pNode->first, k))
3501
return index;
3502
3503
const size_t orig_index = index;
3504
3505
for (; ; )
3506
{
3507
if (!index)
3508
{
3509
index = m_values.size() - 1;
3510
pNode = &get_node(index);
3511
}
3512
else
3513
{
3514
index--;
3515
pNode--;
3516
}
3517
3518
if (index == orig_index)
3519
break;
3520
3521
if (!pNode->state)
3522
break;
3523
3524
if (m_equals(pNode->first, k))
3525
return index;
3526
}
3527
}
3528
}
3529
3530
return m_values.size();
3531
}
3532
3533
inline bool insert_no_grow(insert_result& result, const Key& k, const Value& v)
3534
{
3535
if (!m_values.size())
3536
return false;
3537
3538
size_t index = hash_key(k);
3539
node* pNode = &get_node(index);
3540
3541
if (pNode->state)
3542
{
3543
if (m_equals(pNode->first, k))
3544
{
3545
result.first = iterator(*this, index);
3546
result.second = false;
3547
return true;
3548
}
3549
3550
const size_t orig_index = index;
3551
3552
for (; ; )
3553
{
3554
if (!index)
3555
{
3556
index = m_values.size() - 1;
3557
pNode = &get_node(index);
3558
}
3559
else
3560
{
3561
index--;
3562
pNode--;
3563
}
3564
3565
if (orig_index == index)
3566
return false;
3567
3568
if (!pNode->state)
3569
break;
3570
3571
if (m_equals(pNode->first, k))
3572
{
3573
result.first = iterator(*this, index);
3574
result.second = false;
3575
return true;
3576
}
3577
}
3578
}
3579
3580
if (m_num_valid >= m_grow_threshold)
3581
return false;
3582
3583
construct_value_type(pNode, k, v);
3584
3585
pNode->state = cStateValid;
3586
3587
m_num_valid++;
3588
assert(m_num_valid <= m_values.size());
3589
3590
result.first = iterator(*this, index);
3591
result.second = true;
3592
3593
return true;
3594
}
3595
3596
// Move user supplied key/value into a node.
3597
static inline void move_value_type(value_type* pDst, Key&& k, Value&& v)
3598
{
3599
// Not checking for is MOVABLE because the caller could later destruct k and/or v (what state do we set them to?)
3600
if (BASISU_IS_BITWISE_COPYABLE(Key))
3601
{
3602
memcpy(&pDst->first, &k, sizeof(Key));
3603
}
3604
else
3605
{
3606
new ((void*)&pDst->first) Key(std::move(k));
3607
// No destruction - user will do that (we don't own k).
3608
}
3609
3610
if (BASISU_IS_BITWISE_COPYABLE(Value))
3611
{
3612
memcpy(&pDst->second, &v, sizeof(Value));
3613
}
3614
else
3615
{
3616
new ((void*)&pDst->second) Value(std::move(v));
3617
// No destruction - user will do that (we don't own v).
3618
}
3619
}
3620
3621
// Insert user provided k/v, by moving, into the current hash table
3622
inline bool insert_no_grow_move(insert_result& result, Key&& k, Value&& v)
3623
{
3624
if (!m_values.size())
3625
return false;
3626
3627
size_t index = hash_key(k);
3628
node* pNode = &get_node(index);
3629
3630
if (pNode->state)
3631
{
3632
if (m_equals(pNode->first, k))
3633
{
3634
result.first = iterator(*this, index);
3635
result.second = false;
3636
return true;
3637
}
3638
3639
const size_t orig_index = index;
3640
3641
for (; ; )
3642
{
3643
if (!index)
3644
{
3645
index = m_values.size() - 1;
3646
pNode = &get_node(index);
3647
}
3648
else
3649
{
3650
index--;
3651
pNode--;
3652
}
3653
3654
if (orig_index == index)
3655
return false;
3656
3657
if (!pNode->state)
3658
break;
3659
3660
if (m_equals(pNode->first, k))
3661
{
3662
result.first = iterator(*this, index);
3663
result.second = false;
3664
return true;
3665
}
3666
}
3667
}
3668
3669
if (m_num_valid >= m_grow_threshold)
3670
return false;
3671
3672
move_value_type(pNode, std::move(k), std::move(v));
3673
3674
pNode->state = cStateValid;
3675
3676
m_num_valid++;
3677
assert(m_num_valid <= m_values.size());
3678
3679
result.first = iterator(*this, index);
3680
result.second = true;
3681
3682
return true;
3683
}
3684
3685
// Insert pNode by moving into the current hash table
3686
inline void move_into(node* pNode)
3687
{
3688
size_t index = hash_key(pNode->first);
3689
node* pDst_node = &get_node(index);
3690
3691
if (pDst_node->state)
3692
{
3693
const size_t orig_index = index;
3694
3695
for (; ; )
3696
{
3697
if (!index)
3698
{
3699
index = m_values.size() - 1;
3700
pDst_node = &get_node(index);
3701
}
3702
else
3703
{
3704
index--;
3705
pDst_node--;
3706
}
3707
3708
if (index == orig_index)
3709
{
3710
assert(false);
3711
return;
3712
}
3713
3714
if (!pDst_node->state)
3715
break;
3716
}
3717
}
3718
3719
// No need to update the source node's state (it's going away)
3720
move_node(pDst_node, pNode, false);
3721
3722
m_num_valid++;
3723
}
3724
};
3725
3726
template<typename Key, typename Value, typename Hasher, typename Equals>
3727
struct bitwise_movable< hash_map<Key, Value, Hasher, Equals> > { enum { cFlag = true }; };
3728
3729
#if BASISU_HASHMAP_TEST
3730
extern void hash_map_test();
3731
#endif
3732
3733
// String formatting
3734
inline std::string string_format(const char* pFmt, ...)
3735
{
3736
char buf[2048];
3737
3738
va_list args;
3739
va_start(args, pFmt);
3740
#ifdef _WIN32
3741
vsprintf_s(buf, sizeof(buf), pFmt, args);
3742
#else
3743
vsnprintf(buf, sizeof(buf), pFmt, args);
3744
#endif
3745
va_end(args);
3746
3747
return std::string(buf);
3748
}
3749
3750
enum class variant_type
3751
{
3752
cInvalid,
3753
cI32, cU32,
3754
cI64, cU64,
3755
cFlt, cDbl, cBool,
3756
cStrPtr, cStdStr
3757
};
3758
3759
struct fmt_variant
3760
{
3761
union
3762
{
3763
int32_t m_i32;
3764
uint32_t m_u32;
3765
int64_t m_i64;
3766
uint64_t m_u64;
3767
float m_flt;
3768
double m_dbl;
3769
bool m_bool;
3770
const char* m_pStr;
3771
};
3772
3773
std::string m_str;
3774
3775
variant_type m_type;
3776
3777
inline fmt_variant() :
3778
m_u64(0),
3779
m_type(variant_type::cInvalid)
3780
{
3781
}
3782
3783
inline fmt_variant(const fmt_variant& other) :
3784
m_u64(other.m_u64),
3785
m_str(other.m_str),
3786
m_type(other.m_type)
3787
{
3788
}
3789
3790
inline fmt_variant(fmt_variant&& other) :
3791
m_u64(other.m_u64),
3792
m_str(std::move(other.m_str)),
3793
m_type(other.m_type)
3794
{
3795
other.m_type = variant_type::cInvalid;
3796
other.m_u64 = 0;
3797
}
3798
3799
inline fmt_variant& operator= (fmt_variant&& other)
3800
{
3801
if (this == &other)
3802
return *this;
3803
3804
m_type = other.m_type;
3805
m_u64 = other.m_u64;
3806
m_str = std::move(other.m_str);
3807
3808
other.m_type = variant_type::cInvalid;
3809
other.m_u64 = 0;
3810
3811
return *this;
3812
}
3813
3814
inline fmt_variant& operator= (const fmt_variant& rhs)
3815
{
3816
if (this == &rhs)
3817
return *this;
3818
3819
m_u64 = rhs.m_u64;
3820
m_type = rhs.m_type;
3821
m_str = rhs.m_str;
3822
3823
return *this;
3824
}
3825
3826
inline fmt_variant(int32_t v) : m_i32(v), m_type(variant_type::cI32) { }
3827
inline fmt_variant(uint32_t v) : m_u32(v), m_type(variant_type::cU32) { }
3828
inline fmt_variant(int64_t v) : m_i64(v), m_type(variant_type::cI64) { }
3829
inline fmt_variant(uint64_t v) : m_u64(v), m_type(variant_type::cU64) { }
3830
#ifdef _MSC_VER
3831
inline fmt_variant(unsigned long v) : m_u64(v), m_type(variant_type::cU64) {}
3832
inline fmt_variant(long v) : m_i64(v), m_type(variant_type::cI64) {}
3833
#endif
3834
inline fmt_variant(float v) : m_flt(v), m_type(variant_type::cFlt) { }
3835
inline fmt_variant(double v) : m_dbl(v), m_type(variant_type::cDbl) { }
3836
inline fmt_variant(const char* pStr) : m_pStr(pStr), m_type(variant_type::cStrPtr) { }
3837
inline fmt_variant(const std::string& str) : m_u64(0), m_str(str), m_type(variant_type::cStdStr) { }
3838
inline fmt_variant(bool val) : m_bool(val), m_type(variant_type::cBool) { }
3839
3840
bool to_string(std::string& res, std::string& fmt) const;
3841
};
3842
3843
typedef basisu::vector<fmt_variant> fmt_variant_vec;
3844
3845
bool fmt_variants(std::string& res, const char* pFmt, const fmt_variant_vec& variants);
3846
3847
template <typename... Args>
3848
inline bool fmt_string(std::string& res, const char* pFmt, Args&&... args)
3849
{
3850
return fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... });
3851
}
3852
3853
template <typename... Args>
3854
inline std::string fmt_string(const char* pFmt, Args&&... args)
3855
{
3856
std::string res;
3857
fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... });
3858
return res;
3859
}
3860
3861
template <typename... Args>
3862
inline int fmt_printf(const char* pFmt, Args&&... args)
3863
{
3864
std::string res;
3865
if (!fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... }))
3866
return EOF;
3867
3868
return fputs(res.c_str(), stdout);
3869
}
3870
3871
template <typename... Args>
3872
inline int fmt_fprintf(FILE* pFile, const char* pFmt, Args&&... args)
3873
{
3874
std::string res;
3875
if (!fmt_variants(res, pFmt, fmt_variant_vec{ fmt_variant(std::forward<Args>(args))... }))
3876
return EOF;
3877
3878
return fputs(res.c_str(), pFile);
3879
}
3880
3881
// fixed_array - zero initialized by default, operator[] is always bounds checked.
3882
template <std::size_t N, typename T>
3883
class fixed_array
3884
{
3885
static_assert(N >= 1, "fixed_array size must be at least 1");
3886
3887
public:
3888
using value_type = T;
3889
using size_type = std::size_t;
3890
using difference_type = std::ptrdiff_t;
3891
using reference = T&;
3892
using const_reference = const T&;
3893
using pointer = T*;
3894
using const_pointer = const T*;
3895
using iterator = T*;
3896
using const_iterator = const T*;
3897
3898
T m_data[N];
3899
3900
BASISU_FORCE_INLINE fixed_array()
3901
{
3902
initialize_array();
3903
}
3904
3905
BASISU_FORCE_INLINE fixed_array(std::initializer_list<T> list)
3906
{
3907
assert(list.size() <= N);
3908
3909
std::size_t copy_size = std::min(list.size(), N);
3910
std::copy_n(list.begin(), copy_size, m_data); // Copy up to min(list.size(), N)
3911
3912
if (list.size() < N)
3913
{
3914
// Initialize the rest of the array
3915
std::fill(m_data + copy_size, m_data + N, T{});
3916
}
3917
}
3918
3919
BASISU_FORCE_INLINE T& operator[](std::size_t index)
3920
{
3921
if (index >= N)
3922
container_abort("fixed_array: Index out of bounds.");
3923
return m_data[index];
3924
}
3925
3926
BASISU_FORCE_INLINE const T& operator[](std::size_t index) const
3927
{
3928
if (index >= N)
3929
container_abort("fixed_array: Index out of bounds.");
3930
return m_data[index];
3931
}
3932
3933
BASISU_FORCE_INLINE T* begin() { return m_data; }
3934
BASISU_FORCE_INLINE const T* begin() const { return m_data; }
3935
3936
BASISU_FORCE_INLINE T* end() { return m_data + N; }
3937
BASISU_FORCE_INLINE const T* end() const { return m_data + N; }
3938
3939
BASISU_FORCE_INLINE const T* data() const { return m_data; }
3940
BASISU_FORCE_INLINE T* data() { return m_data; }
3941
3942
BASISU_FORCE_INLINE const T& front() const { return m_data[0]; }
3943
BASISU_FORCE_INLINE T& front() { return m_data[0]; }
3944
3945
BASISU_FORCE_INLINE const T& back() const { return m_data[N - 1]; }
3946
BASISU_FORCE_INLINE T& back() { return m_data[N - 1]; }
3947
3948
BASISU_FORCE_INLINE constexpr std::size_t size() const { return N; }
3949
3950
BASISU_FORCE_INLINE void clear()
3951
{
3952
initialize_array(); // Reinitialize the array
3953
}
3954
3955
BASISU_FORCE_INLINE void set_all(const T& value)
3956
{
3957
std::fill(m_data, m_data + N, value);
3958
}
3959
3960
BASISU_FORCE_INLINE readable_span<T> get_readable_span() const
3961
{
3962
return readable_span<T>(m_data, N);
3963
}
3964
3965
BASISU_FORCE_INLINE writable_span<T> get_writable_span()
3966
{
3967
return writable_span<T>(m_data, N);
3968
}
3969
3970
private:
3971
BASISU_FORCE_INLINE void initialize_array()
3972
{
3973
if constexpr (std::is_integral<T>::value || std::is_floating_point<T>::value)
3974
memset(m_data, 0, sizeof(m_data));
3975
else
3976
std::fill(m_data, m_data + N, T{});
3977
}
3978
3979
BASISU_FORCE_INLINE T& access_element(std::size_t index)
3980
{
3981
if (index >= N)
3982
container_abort("fixed_array: Index out of bounds.");
3983
return m_data[index];
3984
}
3985
3986
BASISU_FORCE_INLINE const T& access_element(std::size_t index) const
3987
{
3988
if (index >= N)
3989
container_abort("fixed_array: Index out of bounds.");
3990
return m_data[index];
3991
}
3992
};
3993
3994
// 2D array
3995
3996
template<typename T>
3997
class vector2D
3998
{
3999
typedef basisu::vector<T> vec_type;
4000
4001
uint32_t m_width, m_height;
4002
vec_type m_values;
4003
4004
public:
4005
vector2D() :
4006
m_width(0),
4007
m_height(0)
4008
{
4009
}
4010
4011
vector2D(uint32_t w, uint32_t h) :
4012
m_width(0),
4013
m_height(0)
4014
{
4015
resize(w, h);
4016
}
4017
4018
vector2D(const vector2D& other)
4019
{
4020
*this = other;
4021
}
4022
4023
vector2D(vector2D&& other) :
4024
m_width(0),
4025
m_height(0)
4026
{
4027
*this = std::move(other);
4028
}
4029
4030
vector2D& operator= (const vector2D& other)
4031
{
4032
if (this != &other)
4033
{
4034
m_width = other.m_width;
4035
m_height = other.m_height;
4036
m_values = other.m_values;
4037
}
4038
return *this;
4039
}
4040
4041
vector2D& operator= (vector2D&& other)
4042
{
4043
if (this != &other)
4044
{
4045
m_width = other.m_width;
4046
m_height = other.m_height;
4047
m_values = std::move(other.m_values);
4048
4049
other.m_width = 0;
4050
other.m_height = 0;
4051
}
4052
return *this;
4053
}
4054
4055
inline bool operator== (const vector2D& rhs) const
4056
{
4057
return (m_width == rhs.m_width) && (m_height == rhs.m_height) && (m_values == rhs.m_values);
4058
}
4059
4060
inline size_t size_in_bytes() const { return m_values.size_in_bytes(); }
4061
4062
inline uint32_t get_width() const { return m_width; }
4063
inline uint32_t get_height() const { return m_height; }
4064
4065
inline const T& operator() (uint32_t x, uint32_t y) const { assert(x < m_width && y < m_height); return m_values[x + y * m_width]; }
4066
inline T& operator() (uint32_t x, uint32_t y) { assert(x < m_width && y < m_height); return m_values[x + y * m_width]; }
4067
4068
inline size_t size() const { return m_values.size(); }
4069
4070
inline const T& operator[] (uint32_t i) const { return m_values[i]; }
4071
inline T& operator[] (uint32_t i) { return m_values[i]; }
4072
4073
inline const T& at_clamped(int x, int y) const { return (*this)(clamp<int>(x, 0, m_width - 1), clamp<int>(y, 0, m_height - 1)); }
4074
inline T& at_clamped(int x, int y) { return (*this)(clamp<int>(x, 0, m_width - 1), clamp<int>(y, 0, m_height - 1)); }
4075
4076
void clear()
4077
{
4078
m_width = 0;
4079
m_height = 0;
4080
m_values.clear();
4081
}
4082
4083
void set_all(const T& val)
4084
{
4085
vector_set_all(m_values, val);
4086
}
4087
4088
inline const T* get_ptr() const { return m_values.data(); }
4089
inline T* get_ptr() { return m_values.data(); }
4090
4091
vector2D& resize(uint32_t new_width, uint32_t new_height)
4092
{
4093
if ((m_width == new_width) && (m_height == new_height))
4094
return *this;
4095
4096
const uint64_t total_vals = (uint64_t)new_width * new_height;
4097
4098
if (!can_fit_into_size_t(total_vals))
4099
{
4100
// What can we do?
4101
assert(0);
4102
return *this;
4103
}
4104
4105
vec_type oldVals((size_t)total_vals);
4106
oldVals.swap(m_values);
4107
4108
const uint32_t w = minimum(m_width, new_width);
4109
const uint32_t h = minimum(m_height, new_height);
4110
4111
if ((w) && (h))
4112
{
4113
for (uint32_t y = 0; y < h; y++)
4114
for (uint32_t x = 0; x < w; x++)
4115
m_values[x + y * new_width] = oldVals[x + y * m_width];
4116
}
4117
4118
m_width = new_width;
4119
m_height = new_height;
4120
4121
return *this;
4122
}
4123
4124
bool try_resize(uint32_t new_width, uint32_t new_height)
4125
{
4126
if ((m_width == new_width) && (m_height == new_height))
4127
return true;
4128
4129
const uint64_t total_vals = (uint64_t)new_width * new_height;
4130
4131
if (!can_fit_into_size_t(total_vals))
4132
{
4133
// What can we do?
4134
assert(0);
4135
return false;
4136
}
4137
4138
vec_type oldVals;
4139
if (!oldVals.try_resize((size_t)total_vals))
4140
return false;
4141
4142
oldVals.swap(m_values);
4143
4144
const uint32_t w = minimum(m_width, new_width);
4145
const uint32_t h = minimum(m_height, new_height);
4146
4147
if ((w) && (h))
4148
{
4149
for (uint32_t y = 0; y < h; y++)
4150
for (uint32_t x = 0; x < w; x++)
4151
m_values[x + y * new_width] = oldVals[x + y * m_width];
4152
}
4153
4154
m_width = new_width;
4155
m_height = new_height;
4156
4157
return true;
4158
}
4159
4160
const vector2D& extract_block_clamped(T* pDst, uint32_t src_x, uint32_t src_y, uint32_t w, uint32_t h) const
4161
{
4162
// HACK HACK
4163
if (((src_x + w) > m_width) || ((src_y + h) > m_height))
4164
{
4165
// Slower clamping case
4166
for (uint32_t y = 0; y < h; y++)
4167
for (uint32_t x = 0; x < w; x++)
4168
*pDst++ = at_clamped(src_x + x, src_y + y);
4169
}
4170
else
4171
{
4172
const T* pSrc = &m_values[src_x + src_y * m_width];
4173
4174
for (uint32_t y = 0; y < h; y++)
4175
{
4176
memcpy(pDst, pSrc, w * sizeof(T));
4177
pSrc += m_width;
4178
pDst += w;
4179
}
4180
}
4181
4182
return *this;
4183
}
4184
};
4185
4186
} // namespace basisu
4187
4188
namespace std
4189
{
4190
template<typename T>
4191
inline void swap(basisu::vector<T>& a, basisu::vector<T>& b)
4192
{
4193
a.swap(b);
4194
}
4195
4196
template<typename Key, typename Value, typename Hasher, typename Equals>
4197
inline void swap(basisu::hash_map<Key, Value, Hasher, Equals>& a, basisu::hash_map<Key, Value, Hasher, Equals>& b)
4198
{
4199
a.swap(b);
4200
}
4201
4202
} // namespace std
4203
4204