Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/meshoptimizer/indexgenerator.cpp
9896 views
1
// This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
2
#include "meshoptimizer.h"
3
4
#include <assert.h>
5
#include <string.h>
6
7
// This work is based on:
8
// Matthias Teschner, Bruno Heidelberger, Matthias Mueller, Danat Pomeranets, Markus Gross. Optimized Spatial Hashing for Collision Detection of Deformable Objects. 2003
9
// John McDonald, Mark Kilgard. Crack-Free Point-Normal Triangles using Adjacent Edge Normals. 2010
10
// John Hable. Variable Rate Shading with Visibility Buffer Rendering. 2024
11
namespace meshopt
12
{
13
14
static unsigned int hashUpdate4(unsigned int h, const unsigned char* key, size_t len)
15
{
16
// MurmurHash2
17
const unsigned int m = 0x5bd1e995;
18
const int r = 24;
19
20
while (len >= 4)
21
{
22
unsigned int k = *reinterpret_cast<const unsigned int*>(key);
23
24
k *= m;
25
k ^= k >> r;
26
k *= m;
27
28
h *= m;
29
h ^= k;
30
31
key += 4;
32
len -= 4;
33
}
34
35
return h;
36
}
37
38
struct VertexHasher
39
{
40
const unsigned char* vertices;
41
size_t vertex_size;
42
size_t vertex_stride;
43
44
size_t hash(unsigned int index) const
45
{
46
return hashUpdate4(0, vertices + index * vertex_stride, vertex_size);
47
}
48
49
bool equal(unsigned int lhs, unsigned int rhs) const
50
{
51
return memcmp(vertices + lhs * vertex_stride, vertices + rhs * vertex_stride, vertex_size) == 0;
52
}
53
};
54
55
struct VertexStreamHasher
56
{
57
const meshopt_Stream* streams;
58
size_t stream_count;
59
60
size_t hash(unsigned int index) const
61
{
62
unsigned int h = 0;
63
64
for (size_t i = 0; i < stream_count; ++i)
65
{
66
const meshopt_Stream& s = streams[i];
67
const unsigned char* data = static_cast<const unsigned char*>(s.data);
68
69
h = hashUpdate4(h, data + index * s.stride, s.size);
70
}
71
72
return h;
73
}
74
75
bool equal(unsigned int lhs, unsigned int rhs) const
76
{
77
for (size_t i = 0; i < stream_count; ++i)
78
{
79
const meshopt_Stream& s = streams[i];
80
const unsigned char* data = static_cast<const unsigned char*>(s.data);
81
82
if (memcmp(data + lhs * s.stride, data + rhs * s.stride, s.size) != 0)
83
return false;
84
}
85
86
return true;
87
}
88
};
89
90
struct VertexCustomHasher
91
{
92
const float* vertex_positions;
93
size_t vertex_stride_float;
94
95
int (*callback)(void*, unsigned int, unsigned int);
96
void* context;
97
98
size_t hash(unsigned int index) const
99
{
100
const unsigned int* key = reinterpret_cast<const unsigned int*>(vertex_positions + index * vertex_stride_float);
101
102
unsigned int x = key[0], y = key[1], z = key[2];
103
104
// replace negative zero with zero
105
x = (x == 0x80000000) ? 0 : x;
106
y = (y == 0x80000000) ? 0 : y;
107
z = (z == 0x80000000) ? 0 : z;
108
109
// scramble bits to make sure that integer coordinates have entropy in lower bits
110
x ^= x >> 17;
111
y ^= y >> 17;
112
z ^= z >> 17;
113
114
// Optimized Spatial Hashing for Collision Detection of Deformable Objects
115
return (x * 73856093) ^ (y * 19349663) ^ (z * 83492791);
116
}
117
118
bool equal(unsigned int lhs, unsigned int rhs) const
119
{
120
const float* lp = vertex_positions + lhs * vertex_stride_float;
121
const float* rp = vertex_positions + rhs * vertex_stride_float;
122
123
if (lp[0] != rp[0] || lp[1] != rp[1] || lp[2] != rp[2])
124
return false;
125
126
return callback ? callback(context, lhs, rhs) : true;
127
}
128
};
129
130
struct EdgeHasher
131
{
132
const unsigned int* remap;
133
134
size_t hash(unsigned long long edge) const
135
{
136
unsigned int e0 = unsigned(edge >> 32);
137
unsigned int e1 = unsigned(edge);
138
139
unsigned int h1 = remap[e0];
140
unsigned int h2 = remap[e1];
141
142
const unsigned int m = 0x5bd1e995;
143
144
// MurmurHash64B finalizer
145
h1 ^= h2 >> 18;
146
h1 *= m;
147
h2 ^= h1 >> 22;
148
h2 *= m;
149
h1 ^= h2 >> 17;
150
h1 *= m;
151
h2 ^= h1 >> 19;
152
h2 *= m;
153
154
return h2;
155
}
156
157
bool equal(unsigned long long lhs, unsigned long long rhs) const
158
{
159
unsigned int l0 = unsigned(lhs >> 32);
160
unsigned int l1 = unsigned(lhs);
161
162
unsigned int r0 = unsigned(rhs >> 32);
163
unsigned int r1 = unsigned(rhs);
164
165
return remap[l0] == remap[r0] && remap[l1] == remap[r1];
166
}
167
};
168
169
static size_t hashBuckets(size_t count)
170
{
171
size_t buckets = 1;
172
while (buckets < count + count / 4)
173
buckets *= 2;
174
175
return buckets;
176
}
177
178
template <typename T, typename Hash>
179
static T* hashLookup(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
180
{
181
assert(buckets > 0);
182
assert((buckets & (buckets - 1)) == 0);
183
184
size_t hashmod = buckets - 1;
185
size_t bucket = hash.hash(key) & hashmod;
186
187
for (size_t probe = 0; probe <= hashmod; ++probe)
188
{
189
T& item = table[bucket];
190
191
if (item == empty)
192
return &item;
193
194
if (hash.equal(item, key))
195
return &item;
196
197
// hash collision, quadratic probing
198
bucket = (bucket + probe + 1) & hashmod;
199
}
200
201
assert(false && "Hash table is full"); // unreachable
202
return NULL;
203
}
204
205
static void buildPositionRemap(unsigned int* remap, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, meshopt_Allocator& allocator)
206
{
207
VertexHasher vertex_hasher = {reinterpret_cast<const unsigned char*>(vertex_positions), 3 * sizeof(float), vertex_positions_stride};
208
209
size_t vertex_table_size = hashBuckets(vertex_count);
210
unsigned int* vertex_table = allocator.allocate<unsigned int>(vertex_table_size);
211
memset(vertex_table, -1, vertex_table_size * sizeof(unsigned int));
212
213
for (size_t i = 0; i < vertex_count; ++i)
214
{
215
unsigned int index = unsigned(i);
216
unsigned int* entry = hashLookup(vertex_table, vertex_table_size, vertex_hasher, index, ~0u);
217
218
if (*entry == ~0u)
219
*entry = index;
220
221
remap[index] = *entry;
222
}
223
224
allocator.deallocate(vertex_table);
225
}
226
227
template <typename Hash>
228
static size_t generateVertexRemap(unsigned int* remap, const unsigned int* indices, size_t index_count, size_t vertex_count, const Hash& hash, meshopt_Allocator& allocator)
229
{
230
memset(remap, -1, vertex_count * sizeof(unsigned int));
231
232
size_t table_size = hashBuckets(vertex_count);
233
unsigned int* table = allocator.allocate<unsigned int>(table_size);
234
memset(table, -1, table_size * sizeof(unsigned int));
235
236
unsigned int next_vertex = 0;
237
238
for (size_t i = 0; i < index_count; ++i)
239
{
240
unsigned int index = indices ? indices[i] : unsigned(i);
241
assert(index < vertex_count);
242
243
if (remap[index] != ~0u)
244
continue;
245
246
unsigned int* entry = hashLookup(table, table_size, hash, index, ~0u);
247
248
if (*entry == ~0u)
249
{
250
*entry = index;
251
remap[index] = next_vertex++;
252
}
253
else
254
{
255
assert(remap[*entry] != ~0u);
256
remap[index] = remap[*entry];
257
}
258
}
259
260
assert(next_vertex <= vertex_count);
261
return next_vertex;
262
}
263
264
template <size_t BlockSize>
265
static void remapVertices(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
266
{
267
size_t block_size = BlockSize == 0 ? vertex_size : BlockSize;
268
assert(block_size == vertex_size);
269
270
for (size_t i = 0; i < vertex_count; ++i)
271
if (remap[i] != ~0u)
272
{
273
assert(remap[i] < vertex_count);
274
memcpy(static_cast<unsigned char*>(destination) + remap[i] * block_size, static_cast<const unsigned char*>(vertices) + i * block_size, block_size);
275
}
276
}
277
278
template <typename Hash>
279
static void generateShadowBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const Hash& hash, meshopt_Allocator& allocator)
280
{
281
unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
282
memset(remap, -1, vertex_count * sizeof(unsigned int));
283
284
size_t table_size = hashBuckets(vertex_count);
285
unsigned int* table = allocator.allocate<unsigned int>(table_size);
286
memset(table, -1, table_size * sizeof(unsigned int));
287
288
for (size_t i = 0; i < index_count; ++i)
289
{
290
unsigned int index = indices[i];
291
assert(index < vertex_count);
292
293
if (remap[index] == ~0u)
294
{
295
unsigned int* entry = hashLookup(table, table_size, hash, index, ~0u);
296
297
if (*entry == ~0u)
298
*entry = index;
299
300
remap[index] = *entry;
301
}
302
303
destination[i] = remap[index];
304
}
305
}
306
307
} // namespace meshopt
308
309
size_t meshopt_generateVertexRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size)
310
{
311
using namespace meshopt;
312
313
assert(indices || index_count == vertex_count);
314
assert(!indices || index_count % 3 == 0);
315
assert(vertex_size > 0 && vertex_size <= 256);
316
317
meshopt_Allocator allocator;
318
VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_size};
319
320
return generateVertexRemap(destination, indices, index_count, vertex_count, hasher, allocator);
321
}
322
323
size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
324
{
325
using namespace meshopt;
326
327
assert(indices || index_count == vertex_count);
328
assert(index_count % 3 == 0);
329
assert(stream_count > 0 && stream_count <= 16);
330
331
for (size_t i = 0; i < stream_count; ++i)
332
{
333
assert(streams[i].size > 0 && streams[i].size <= 256);
334
assert(streams[i].size <= streams[i].stride);
335
}
336
337
meshopt_Allocator allocator;
338
VertexStreamHasher hasher = {streams, stream_count};
339
340
return generateVertexRemap(destination, indices, index_count, vertex_count, hasher, allocator);
341
}
342
343
size_t meshopt_generateVertexRemapCustom(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, int (*callback)(void*, unsigned int, unsigned int), void* context)
344
{
345
using namespace meshopt;
346
347
assert(indices || index_count == vertex_count);
348
assert(!indices || index_count % 3 == 0);
349
assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
350
assert(vertex_positions_stride % sizeof(float) == 0);
351
352
meshopt_Allocator allocator;
353
VertexCustomHasher hasher = {vertex_positions, vertex_positions_stride / sizeof(float), callback, context};
354
355
return generateVertexRemap(destination, indices, index_count, vertex_count, hasher, allocator);
356
}
357
358
void meshopt_remapVertexBuffer(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
359
{
360
using namespace meshopt;
361
362
assert(vertex_size > 0 && vertex_size <= 256);
363
364
meshopt_Allocator allocator;
365
366
// support in-place remap
367
if (destination == vertices)
368
{
369
unsigned char* vertices_copy = allocator.allocate<unsigned char>(vertex_count * vertex_size);
370
memcpy(vertices_copy, vertices, vertex_count * vertex_size);
371
vertices = vertices_copy;
372
}
373
374
// specialize the loop for common vertex sizes to ensure memcpy is compiled as an inlined intrinsic
375
switch (vertex_size)
376
{
377
case 4:
378
return remapVertices<4>(destination, vertices, vertex_count, vertex_size, remap);
379
380
case 8:
381
return remapVertices<8>(destination, vertices, vertex_count, vertex_size, remap);
382
383
case 12:
384
return remapVertices<12>(destination, vertices, vertex_count, vertex_size, remap);
385
386
case 16:
387
return remapVertices<16>(destination, vertices, vertex_count, vertex_size, remap);
388
389
default:
390
return remapVertices<0>(destination, vertices, vertex_count, vertex_size, remap);
391
}
392
}
393
394
void meshopt_remapIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const unsigned int* remap)
395
{
396
assert(index_count % 3 == 0);
397
398
for (size_t i = 0; i < index_count; ++i)
399
{
400
unsigned int index = indices ? indices[i] : unsigned(i);
401
assert(remap[index] != ~0u);
402
403
destination[i] = remap[index];
404
}
405
}
406
407
void meshopt_generateShadowIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride)
408
{
409
using namespace meshopt;
410
411
assert(indices);
412
assert(index_count % 3 == 0);
413
assert(vertex_size > 0 && vertex_size <= 256);
414
assert(vertex_size <= vertex_stride);
415
416
meshopt_Allocator allocator;
417
VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_stride};
418
419
generateShadowBuffer(destination, indices, index_count, vertex_count, hasher, allocator);
420
}
421
422
void meshopt_generateShadowIndexBufferMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
423
{
424
using namespace meshopt;
425
426
assert(indices);
427
assert(index_count % 3 == 0);
428
assert(stream_count > 0 && stream_count <= 16);
429
430
for (size_t i = 0; i < stream_count; ++i)
431
{
432
assert(streams[i].size > 0 && streams[i].size <= 256);
433
assert(streams[i].size <= streams[i].stride);
434
}
435
436
meshopt_Allocator allocator;
437
VertexStreamHasher hasher = {streams, stream_count};
438
439
generateShadowBuffer(destination, indices, index_count, vertex_count, hasher, allocator);
440
}
441
442
void meshopt_generateAdjacencyIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
443
{
444
using namespace meshopt;
445
446
assert(index_count % 3 == 0);
447
assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
448
assert(vertex_positions_stride % sizeof(float) == 0);
449
450
meshopt_Allocator allocator;
451
452
static const int next[4] = {1, 2, 0, 1};
453
454
// build position remap: for each vertex, which other (canonical) vertex does it map to?
455
unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
456
buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
457
458
// build edge set; this stores all triangle edges but we can look these up by any other wedge
459
EdgeHasher edge_hasher = {remap};
460
461
size_t edge_table_size = hashBuckets(index_count);
462
unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
463
unsigned int* edge_vertex_table = allocator.allocate<unsigned int>(edge_table_size);
464
465
memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
466
memset(edge_vertex_table, -1, edge_table_size * sizeof(unsigned int));
467
468
for (size_t i = 0; i < index_count; i += 3)
469
{
470
for (int e = 0; e < 3; ++e)
471
{
472
unsigned int i0 = indices[i + e];
473
unsigned int i1 = indices[i + next[e]];
474
unsigned int i2 = indices[i + next[e + 1]];
475
assert(i0 < vertex_count && i1 < vertex_count && i2 < vertex_count);
476
477
unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
478
unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
479
480
if (*entry == ~0ull)
481
{
482
*entry = edge;
483
484
// store vertex opposite to the edge
485
edge_vertex_table[entry - edge_table] = i2;
486
}
487
}
488
}
489
490
// build resulting index buffer: 6 indices for each input triangle
491
for (size_t i = 0; i < index_count; i += 3)
492
{
493
unsigned int patch[6];
494
495
for (int e = 0; e < 3; ++e)
496
{
497
unsigned int i0 = indices[i + e];
498
unsigned int i1 = indices[i + next[e]];
499
assert(i0 < vertex_count && i1 < vertex_count);
500
501
// note: this refers to the opposite edge!
502
unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
503
unsigned long long* oppe = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
504
505
patch[e * 2 + 0] = i0;
506
patch[e * 2 + 1] = (*oppe == ~0ull) ? i0 : edge_vertex_table[oppe - edge_table];
507
}
508
509
memcpy(destination + i * 2, patch, sizeof(patch));
510
}
511
}
512
513
void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
514
{
515
using namespace meshopt;
516
517
assert(index_count % 3 == 0);
518
assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
519
assert(vertex_positions_stride % sizeof(float) == 0);
520
521
meshopt_Allocator allocator;
522
523
static const int next[3] = {1, 2, 0};
524
525
// build position remap: for each vertex, which other (canonical) vertex does it map to?
526
unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
527
buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
528
529
// build edge set; this stores all triangle edges but we can look these up by any other wedge
530
EdgeHasher edge_hasher = {remap};
531
532
size_t edge_table_size = hashBuckets(index_count);
533
unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
534
memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
535
536
for (size_t i = 0; i < index_count; i += 3)
537
{
538
for (int e = 0; e < 3; ++e)
539
{
540
unsigned int i0 = indices[i + e];
541
unsigned int i1 = indices[i + next[e]];
542
assert(i0 < vertex_count && i1 < vertex_count);
543
544
unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
545
unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
546
547
if (*entry == ~0ull)
548
*entry = edge;
549
}
550
}
551
552
// build resulting index buffer: 12 indices for each input triangle
553
for (size_t i = 0; i < index_count; i += 3)
554
{
555
unsigned int patch[12];
556
557
for (int e = 0; e < 3; ++e)
558
{
559
unsigned int i0 = indices[i + e];
560
unsigned int i1 = indices[i + next[e]];
561
assert(i0 < vertex_count && i1 < vertex_count);
562
563
// note: this refers to the opposite edge!
564
unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
565
unsigned long long oppe = *hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
566
567
// use the same edge if opposite edge doesn't exist (border)
568
oppe = (oppe == ~0ull) ? edge : oppe;
569
570
// triangle index (0, 1, 2)
571
patch[e] = i0;
572
573
// opposite edge (3, 4; 5, 6; 7, 8)
574
patch[3 + e * 2 + 0] = unsigned(oppe);
575
patch[3 + e * 2 + 1] = unsigned(oppe >> 32);
576
577
// dominant vertex (9, 10, 11)
578
patch[9 + e] = remap[i0];
579
}
580
581
memcpy(destination + i * 4, patch, sizeof(patch));
582
}
583
}
584
585
size_t meshopt_generateProvokingIndexBuffer(unsigned int* destination, unsigned int* reorder, const unsigned int* indices, size_t index_count, size_t vertex_count)
586
{
587
assert(index_count % 3 == 0);
588
589
meshopt_Allocator allocator;
590
591
unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
592
memset(remap, -1, vertex_count * sizeof(unsigned int));
593
594
// compute vertex valence; this is used to prioritize least used corner
595
// note: we use 8-bit counters for performance; for outlier vertices the valence is incorrect but that just affects the heuristic
596
unsigned char* valence = allocator.allocate<unsigned char>(vertex_count);
597
memset(valence, 0, vertex_count);
598
599
for (size_t i = 0; i < index_count; ++i)
600
{
601
unsigned int index = indices[i];
602
assert(index < vertex_count);
603
604
valence[index]++;
605
}
606
607
unsigned int reorder_offset = 0;
608
609
// assign provoking vertices; leave the rest for the next pass
610
for (size_t i = 0; i < index_count; i += 3)
611
{
612
unsigned int a = indices[i + 0], b = indices[i + 1], c = indices[i + 2];
613
assert(a < vertex_count && b < vertex_count && c < vertex_count);
614
615
// try to rotate triangle such that provoking vertex hasn't been seen before
616
// if multiple vertices are new, prioritize the one with least valence
617
// this reduces the risk that a future triangle will have all three vertices seen
618
unsigned int va = remap[a] == ~0u ? valence[a] : ~0u;
619
unsigned int vb = remap[b] == ~0u ? valence[b] : ~0u;
620
unsigned int vc = remap[c] == ~0u ? valence[c] : ~0u;
621
622
if (vb != ~0u && vb <= va && vb <= vc)
623
{
624
// abc -> bca
625
unsigned int t = a;
626
a = b, b = c, c = t;
627
}
628
else if (vc != ~0u && vc <= va && vc <= vb)
629
{
630
// abc -> cab
631
unsigned int t = c;
632
c = b, b = a, a = t;
633
}
634
635
unsigned int newidx = reorder_offset;
636
637
// now remap[a] = ~0u or all three vertices are old
638
// recording remap[a] makes it possible to remap future references to the same index, conserving space
639
if (remap[a] == ~0u)
640
remap[a] = newidx;
641
642
// we need to clone the provoking vertex to get a unique index
643
// if all three are used the choice is arbitrary since no future triangle will be able to reuse any of these
644
reorder[reorder_offset++] = a;
645
646
// note: first vertex is final, the other two will be fixed up in next pass
647
destination[i + 0] = newidx;
648
destination[i + 1] = b;
649
destination[i + 2] = c;
650
651
// update vertex valences for corner heuristic
652
valence[a]--;
653
valence[b]--;
654
valence[c]--;
655
}
656
657
// remap or clone non-provoking vertices (iterating to skip provoking vertices)
658
int step = 1;
659
660
for (size_t i = 1; i < index_count; i += step, step ^= 3)
661
{
662
unsigned int index = destination[i];
663
664
if (remap[index] == ~0u)
665
{
666
// we haven't seen the vertex before as a provoking vertex
667
// to maintain the reference to the original vertex we need to clone it
668
unsigned int newidx = reorder_offset;
669
670
remap[index] = newidx;
671
reorder[reorder_offset++] = index;
672
}
673
674
destination[i] = remap[index];
675
}
676
677
assert(reorder_offset <= vertex_count + index_count / 3);
678
return reorder_offset;
679
}
680
681