Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/meshoptimizer/partition.cpp
9902 views
1
// This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
2
#include "meshoptimizer.h"
3
4
#include <assert.h>
5
#include <math.h>
6
#include <string.h>
7
8
// This work is based on:
9
// Takio Kurita. An efficient agglomerative clustering algorithm using a heap. 1991
10
namespace meshopt
11
{
12
13
struct ClusterAdjacency
14
{
15
unsigned int* offsets;
16
unsigned int* clusters;
17
unsigned int* shared;
18
};
19
20
static void filterClusterIndices(unsigned int* data, unsigned int* offsets, const unsigned int* cluster_indices, const unsigned int* cluster_index_counts, size_t cluster_count, unsigned char* used, size_t vertex_count, size_t total_index_count)
21
{
22
(void)vertex_count;
23
(void)total_index_count;
24
25
size_t cluster_start = 0;
26
size_t cluster_write = 0;
27
28
for (size_t i = 0; i < cluster_count; ++i)
29
{
30
offsets[i] = unsigned(cluster_write);
31
32
// copy cluster indices, skipping duplicates
33
for (size_t j = 0; j < cluster_index_counts[i]; ++j)
34
{
35
unsigned int v = cluster_indices[cluster_start + j];
36
assert(v < vertex_count);
37
38
data[cluster_write] = v;
39
cluster_write += 1 - used[v];
40
used[v] = 1;
41
}
42
43
// reset used flags for the next cluster
44
for (size_t j = offsets[i]; j < cluster_write; ++j)
45
used[data[j]] = 0;
46
47
cluster_start += cluster_index_counts[i];
48
}
49
50
assert(cluster_start == total_index_count);
51
assert(cluster_write <= total_index_count);
52
offsets[cluster_count] = unsigned(cluster_write);
53
}
54
55
static void computeClusterBounds(float* cluster_bounds, const unsigned int* cluster_indices, const unsigned int* cluster_offsets, size_t cluster_count, const float* vertex_positions, size_t vertex_positions_stride)
56
{
57
size_t vertex_stride_float = vertex_positions_stride / sizeof(float);
58
59
for (size_t i = 0; i < cluster_count; ++i)
60
{
61
float center[3] = {0, 0, 0};
62
63
// approximate center of the cluster by averaging all vertex positions
64
for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
65
{
66
const float* p = vertex_positions + cluster_indices[j] * vertex_stride_float;
67
68
center[0] += p[0];
69
center[1] += p[1];
70
center[2] += p[2];
71
}
72
73
// note: technically clusters can't be empty per meshopt_partitionCluster but we check for a division by zero in case that changes
74
if (size_t cluster_size = cluster_offsets[i + 1] - cluster_offsets[i])
75
{
76
center[0] /= float(cluster_size);
77
center[1] /= float(cluster_size);
78
center[2] /= float(cluster_size);
79
}
80
81
// compute radius of the bounding sphere for each cluster
82
float radiussq = 0;
83
84
for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
85
{
86
const float* p = vertex_positions + cluster_indices[j] * vertex_stride_float;
87
88
float d2 = (p[0] - center[0]) * (p[0] - center[0]) + (p[1] - center[1]) * (p[1] - center[1]) + (p[2] - center[2]) * (p[2] - center[2]);
89
90
radiussq = radiussq < d2 ? d2 : radiussq;
91
}
92
93
cluster_bounds[i * 4 + 0] = center[0];
94
cluster_bounds[i * 4 + 1] = center[1];
95
cluster_bounds[i * 4 + 2] = center[2];
96
cluster_bounds[i * 4 + 3] = sqrtf(radiussq);
97
}
98
}
99
100
static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned int* cluster_indices, const unsigned int* cluster_offsets, size_t cluster_count, size_t vertex_count, meshopt_Allocator& allocator)
101
{
102
unsigned int* ref_offsets = allocator.allocate<unsigned int>(vertex_count + 1);
103
104
// compute number of clusters referenced by each vertex
105
memset(ref_offsets, 0, vertex_count * sizeof(unsigned int));
106
107
for (size_t i = 0; i < cluster_count; ++i)
108
{
109
for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
110
ref_offsets[cluster_indices[j]]++;
111
}
112
113
// compute (worst-case) number of adjacent clusters for each cluster
114
size_t total_adjacency = 0;
115
116
for (size_t i = 0; i < cluster_count; ++i)
117
{
118
size_t count = 0;
119
120
// worst case is every vertex has a disjoint cluster list
121
for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
122
count += ref_offsets[cluster_indices[j]] - 1;
123
124
// ... but only every other cluster can be adjacent in the end
125
total_adjacency += count < cluster_count - 1 ? count : cluster_count - 1;
126
}
127
128
// we can now allocate adjacency buffers
129
adjacency.offsets = allocator.allocate<unsigned int>(cluster_count + 1);
130
adjacency.clusters = allocator.allocate<unsigned int>(total_adjacency);
131
adjacency.shared = allocator.allocate<unsigned int>(total_adjacency);
132
133
// convert ref counts to offsets
134
size_t total_refs = 0;
135
136
for (size_t i = 0; i < vertex_count; ++i)
137
{
138
size_t count = ref_offsets[i];
139
ref_offsets[i] = unsigned(total_refs);
140
total_refs += count;
141
}
142
143
unsigned int* ref_data = allocator.allocate<unsigned int>(total_refs);
144
145
// fill cluster refs for each vertex
146
for (size_t i = 0; i < cluster_count; ++i)
147
{
148
for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
149
ref_data[ref_offsets[cluster_indices[j]]++] = unsigned(i);
150
}
151
152
// after the previous pass, ref_offsets contain the end of the data for each vertex; shift it forward to get the start
153
memmove(ref_offsets + 1, ref_offsets, vertex_count * sizeof(unsigned int));
154
ref_offsets[0] = 0;
155
156
// fill cluster adjacency for each cluster...
157
adjacency.offsets[0] = 0;
158
159
for (size_t i = 0; i < cluster_count; ++i)
160
{
161
unsigned int* adj = adjacency.clusters + adjacency.offsets[i];
162
unsigned int* shd = adjacency.shared + adjacency.offsets[i];
163
size_t count = 0;
164
165
for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
166
{
167
unsigned int v = cluster_indices[j];
168
169
// merge the entire cluster list of each vertex into current list
170
for (size_t k = ref_offsets[v]; k < ref_offsets[v + 1]; ++k)
171
{
172
unsigned int c = ref_data[k];
173
assert(c < cluster_count);
174
175
if (c == unsigned(i))
176
continue;
177
178
// if the cluster is already in the list, increment the shared count
179
bool found = false;
180
for (size_t l = 0; l < count; ++l)
181
if (adj[l] == c)
182
{
183
found = true;
184
shd[l]++;
185
break;
186
}
187
188
// .. or append a new cluster
189
if (!found)
190
{
191
adj[count] = c;
192
shd[count] = 1;
193
count++;
194
}
195
}
196
}
197
198
// mark the end of the adjacency list; the next cluster will start there as well
199
adjacency.offsets[i + 1] = adjacency.offsets[i] + unsigned(count);
200
}
201
202
assert(adjacency.offsets[cluster_count] <= total_adjacency);
203
204
// ref_offsets can't be deallocated as it was allocated before adjacency
205
allocator.deallocate(ref_data);
206
}
207
208
struct ClusterGroup
209
{
210
int group;
211
int next;
212
unsigned int size; // 0 unless root
213
unsigned int vertices;
214
};
215
216
struct GroupOrder
217
{
218
unsigned int id;
219
int order;
220
};
221
222
static void heapPush(GroupOrder* heap, size_t size, GroupOrder item)
223
{
224
// insert a new element at the end (breaks heap invariant)
225
heap[size++] = item;
226
227
// bubble up the new element to its correct position
228
size_t i = size - 1;
229
while (i > 0 && heap[i].order < heap[(i - 1) / 2].order)
230
{
231
size_t p = (i - 1) / 2;
232
233
GroupOrder temp = heap[i];
234
heap[i] = heap[p];
235
heap[p] = temp;
236
i = p;
237
}
238
}
239
240
static GroupOrder heapPop(GroupOrder* heap, size_t size)
241
{
242
assert(size > 0);
243
GroupOrder top = heap[0];
244
245
// move the last element to the top (breaks heap invariant)
246
heap[0] = heap[--size];
247
248
// bubble down the new top element to its correct position
249
size_t i = 0;
250
while (i * 2 + 1 < size)
251
{
252
// find the smallest child
253
size_t j = i * 2 + 1;
254
j += (j + 1 < size && heap[j + 1].order < heap[j].order);
255
256
// if the parent is already smaller than both children, we're done
257
if (heap[j].order >= heap[i].order)
258
break;
259
260
// otherwise, swap the parent and child and continue
261
GroupOrder temp = heap[i];
262
heap[i] = heap[j];
263
heap[j] = temp;
264
i = j;
265
}
266
267
return top;
268
}
269
270
static unsigned int countShared(const ClusterGroup* groups, int group1, int group2, const ClusterAdjacency& adjacency)
271
{
272
unsigned int total = 0;
273
274
for (int i1 = group1; i1 >= 0; i1 = groups[i1].next)
275
for (int i2 = group2; i2 >= 0; i2 = groups[i2].next)
276
{
277
for (unsigned int adj = adjacency.offsets[i1]; adj < adjacency.offsets[i1 + 1]; ++adj)
278
if (adjacency.clusters[adj] == unsigned(i2))
279
{
280
total += adjacency.shared[adj];
281
break;
282
}
283
}
284
285
return total;
286
}
287
288
static void mergeBounds(float* target, const float* source)
289
{
290
float r1 = target[3], r2 = source[3];
291
float dx = source[0] - target[0], dy = source[1] - target[1], dz = source[2] - target[2];
292
float d = sqrtf(dx * dx + dy * dy + dz * dz);
293
294
if (d + r1 < r2)
295
{
296
memcpy(target, source, 4 * sizeof(float));
297
return;
298
}
299
300
if (d + r2 > r1)
301
{
302
float k = d > 0 ? (d + r2 - r1) / (2 * d) : 0.f;
303
304
target[0] += dx * k;
305
target[1] += dy * k;
306
target[2] += dz * k;
307
target[3] = (d + r2 + r1) / 2;
308
}
309
}
310
311
static float boundsScore(const float* target, const float* source)
312
{
313
float r1 = target[3], r2 = source[3];
314
float dx = source[0] - target[0], dy = source[1] - target[1], dz = source[2] - target[2];
315
float d = sqrtf(dx * dx + dy * dy + dz * dz);
316
317
float mr = d + r1 < r2 ? r2 : (d + r2 < r1 ? r1 : (d + r2 + r1) / 2);
318
319
return mr > 0 ? r1 / mr : 0.f;
320
}
321
322
static int pickGroupToMerge(const ClusterGroup* groups, int id, const ClusterAdjacency& adjacency, size_t max_partition_size, const float* cluster_bounds)
323
{
324
assert(groups[id].size > 0);
325
326
float group_rsqrt = 1.f / sqrtf(float(int(groups[id].vertices)));
327
328
int best_group = -1;
329
float best_score = 0;
330
331
for (int ci = id; ci >= 0; ci = groups[ci].next)
332
{
333
for (unsigned int adj = adjacency.offsets[ci]; adj != adjacency.offsets[ci + 1]; ++adj)
334
{
335
int other = groups[adjacency.clusters[adj]].group;
336
if (other < 0)
337
continue;
338
339
assert(groups[other].size > 0);
340
if (groups[id].size + groups[other].size > max_partition_size)
341
continue;
342
343
unsigned int shared = countShared(groups, id, other, adjacency);
344
float other_rsqrt = 1.f / sqrtf(float(int(groups[other].vertices)));
345
346
// normalize shared count by the expected boundary of each group (+ keeps scoring symmetric)
347
float score = float(int(shared)) * (group_rsqrt + other_rsqrt);
348
349
// incorporate spatial score to favor merging nearby groups
350
if (cluster_bounds)
351
score *= 1.f + 0.4f * boundsScore(&cluster_bounds[id * 4], &cluster_bounds[other * 4]);
352
353
if (score > best_score)
354
{
355
best_group = other;
356
best_score = score;
357
}
358
}
359
}
360
361
return best_group;
362
}
363
364
} // namespace meshopt
365
366
size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_partition_size)
367
{
368
using namespace meshopt;
369
370
assert((vertex_positions == NULL || vertex_positions_stride >= 12) && vertex_positions_stride <= 256);
371
assert(vertex_positions_stride % sizeof(float) == 0);
372
assert(target_partition_size > 0);
373
374
size_t max_partition_size = target_partition_size + target_partition_size * 3 / 8;
375
376
meshopt_Allocator allocator;
377
378
unsigned char* used = allocator.allocate<unsigned char>(vertex_count);
379
memset(used, 0, vertex_count);
380
381
unsigned int* cluster_newindices = allocator.allocate<unsigned int>(total_index_count);
382
unsigned int* cluster_offsets = allocator.allocate<unsigned int>(cluster_count + 1);
383
384
// make new cluster index list that filters out duplicate indices
385
filterClusterIndices(cluster_newindices, cluster_offsets, cluster_indices, cluster_index_counts, cluster_count, used, vertex_count, total_index_count);
386
cluster_indices = cluster_newindices;
387
388
// compute bounding sphere for each cluster if positions are provided
389
float* cluster_bounds = NULL;
390
391
if (vertex_positions)
392
{
393
cluster_bounds = allocator.allocate<float>(cluster_count * 4);
394
computeClusterBounds(cluster_bounds, cluster_indices, cluster_offsets, cluster_count, vertex_positions, vertex_positions_stride);
395
}
396
397
// build cluster adjacency along with edge weights (shared vertex count)
398
ClusterAdjacency adjacency = {};
399
buildClusterAdjacency(adjacency, cluster_indices, cluster_offsets, cluster_count, vertex_count, allocator);
400
401
ClusterGroup* groups = allocator.allocate<ClusterGroup>(cluster_count);
402
403
GroupOrder* order = allocator.allocate<GroupOrder>(cluster_count);
404
size_t pending = 0;
405
406
// create a singleton group for each cluster and order them by priority
407
for (size_t i = 0; i < cluster_count; ++i)
408
{
409
groups[i].group = int(i);
410
groups[i].next = -1;
411
groups[i].size = 1;
412
groups[i].vertices = cluster_offsets[i + 1] - cluster_offsets[i];
413
assert(groups[i].vertices > 0);
414
415
GroupOrder item = {};
416
item.id = unsigned(i);
417
item.order = groups[i].vertices;
418
419
heapPush(order, pending++, item);
420
}
421
422
// iteratively merge the smallest group with the best group
423
while (pending)
424
{
425
GroupOrder top = heapPop(order, pending--);
426
427
// this group was merged into another group earlier
428
if (groups[top.id].size == 0)
429
continue;
430
431
// disassociate clusters from the group to prevent them from being merged again; we will re-associate them if the group is reinserted
432
for (int i = top.id; i >= 0; i = groups[i].next)
433
{
434
assert(groups[i].group == int(top.id));
435
groups[i].group = -1;
436
}
437
438
// the group is large enough, emit as is
439
if (groups[top.id].size >= target_partition_size)
440
continue;
441
442
int best_group = pickGroupToMerge(groups, top.id, adjacency, max_partition_size, cluster_bounds);
443
444
// we can't grow the group any more, emit as is
445
if (best_group == -1)
446
continue;
447
448
// compute shared vertices to adjust the total vertices estimate after merging
449
unsigned int shared = countShared(groups, top.id, best_group, adjacency);
450
451
// combine groups by linking them together
452
assert(groups[best_group].size > 0);
453
454
for (int i = top.id; i >= 0; i = groups[i].next)
455
if (groups[i].next < 0)
456
{
457
groups[i].next = best_group;
458
break;
459
}
460
461
// update group sizes; note, the vertex update is a O(1) approximation which avoids recomputing the true size
462
groups[top.id].size += groups[best_group].size;
463
groups[top.id].vertices += groups[best_group].vertices;
464
groups[top.id].vertices = (groups[top.id].vertices > shared) ? groups[top.id].vertices - shared : 1;
465
466
groups[best_group].size = 0;
467
groups[best_group].vertices = 0;
468
469
// merge bounding spheres if bounds are available
470
if (cluster_bounds)
471
{
472
mergeBounds(&cluster_bounds[top.id * 4], &cluster_bounds[best_group * 4]);
473
memset(&cluster_bounds[best_group * 4], 0, 4 * sizeof(float));
474
}
475
476
// re-associate all clusters back to the merged group
477
for (int i = top.id; i >= 0; i = groups[i].next)
478
groups[i].group = int(top.id);
479
480
top.order = groups[top.id].vertices;
481
heapPush(order, pending++, top);
482
}
483
484
size_t next_group = 0;
485
486
for (size_t i = 0; i < cluster_count; ++i)
487
{
488
if (groups[i].size == 0)
489
continue;
490
491
for (int j = int(i); j >= 0; j = groups[j].next)
492
destination[j] = unsigned(next_group);
493
494
next_group++;
495
}
496
497
assert(next_group <= cluster_count);
498
return next_group;
499
}
500
501