Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/core/templates/rid_owner.h
9973 views
1
/**************************************************************************/
2
/* rid_owner.h */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#pragma once
32
33
#include "core/os/memory.h"
34
#include "core/os/mutex.h"
35
#include "core/string/print_string.h"
36
#include "core/templates/local_vector.h"
37
#include "core/templates/rid.h"
38
#include "core/templates/safe_refcount.h"
39
40
#include <cstdio>
41
#include <typeinfo> // IWYU pragma: keep // Used in macro.
42
43
#ifdef SANITIZERS_ENABLED
44
#ifdef __has_feature
45
#if __has_feature(thread_sanitizer)
46
#define TSAN_ENABLED
47
#endif
48
#elif defined(__SANITIZE_THREAD__)
49
#define TSAN_ENABLED
50
#endif
51
#endif
52
53
#ifdef TSAN_ENABLED
54
#include <sanitizer/tsan_interface.h>
55
#endif
56
57
// The following macros would need to be implemented somehow
58
// for purely weakly ordered architectures. There's a test case
59
// ("[RID_Owner] Thread safety") with potential to catch issues
60
// on such architectures if these primitives fail to be implemented.
61
// For now, they will be just markers about needs that may arise.
62
#define WEAK_MEMORY_ORDER 0
63
#if WEAK_MEMORY_ORDER
64
// Ideally, we'd have implementations that collaborate with the
65
// sync mechanism used (e.g., the mutex) so instead of some full
66
// memory barriers being issued, some acquire-release on the
67
// primitive itself. However, these implementations will at least
68
// provide correctness.
69
#define SYNC_ACQUIRE std::atomic_thread_fence(std::memory_order_acquire);
70
#define SYNC_RELEASE std::atomic_thread_fence(std::memory_order_release);
71
#else
72
// Compiler barriers are enough in this case.
73
#define SYNC_ACQUIRE std::atomic_signal_fence(std::memory_order_acquire);
74
#define SYNC_RELEASE std::atomic_signal_fence(std::memory_order_release);
75
#endif
76
77
class RID_AllocBase {
78
static SafeNumeric<uint64_t> base_id;
79
80
protected:
81
static RID _make_from_id(uint64_t p_id) {
82
RID rid;
83
rid._id = p_id;
84
return rid;
85
}
86
87
static RID _gen_rid() {
88
return _make_from_id(_gen_id());
89
}
90
91
friend struct VariantUtilityFunctions;
92
93
static uint64_t _gen_id() {
94
return base_id.increment();
95
}
96
97
public:
98
virtual ~RID_AllocBase() {}
99
};
100
101
template <typename T, bool THREAD_SAFE = false>
102
class RID_Alloc : public RID_AllocBase {
103
struct Chunk {
104
T data;
105
uint32_t validator;
106
};
107
Chunk **chunks = nullptr;
108
uint32_t **free_list_chunks = nullptr;
109
110
uint32_t elements_in_chunk;
111
uint32_t max_alloc = 0;
112
uint32_t alloc_count = 0;
113
uint32_t chunk_limit = 0;
114
115
const char *description = nullptr;
116
117
mutable Mutex mutex;
118
119
_FORCE_INLINE_ RID _allocate_rid() {
120
if constexpr (THREAD_SAFE) {
121
mutex.lock();
122
}
123
124
if (alloc_count == max_alloc) {
125
//allocate a new chunk
126
uint32_t chunk_count = alloc_count == 0 ? 0 : (max_alloc / elements_in_chunk);
127
if (THREAD_SAFE && chunk_count == chunk_limit) {
128
mutex.unlock();
129
if (description != nullptr) {
130
ERR_FAIL_V_MSG(RID(), vformat("Element limit for RID of type '%s' reached.", String(description)));
131
} else {
132
ERR_FAIL_V_MSG(RID(), "Element limit reached.");
133
}
134
}
135
136
//grow chunks
137
if constexpr (!THREAD_SAFE) {
138
chunks = (Chunk **)memrealloc(chunks, sizeof(Chunk *) * (chunk_count + 1));
139
}
140
chunks[chunk_count] = (Chunk *)memalloc(sizeof(Chunk) * elements_in_chunk); //but don't initialize
141
//grow free lists
142
if constexpr (!THREAD_SAFE) {
143
free_list_chunks = (uint32_t **)memrealloc(free_list_chunks, sizeof(uint32_t *) * (chunk_count + 1));
144
}
145
free_list_chunks[chunk_count] = (uint32_t *)memalloc(sizeof(uint32_t) * elements_in_chunk);
146
147
//initialize
148
for (uint32_t i = 0; i < elements_in_chunk; i++) {
149
// Don't initialize chunk.
150
chunks[chunk_count][i].validator = 0xFFFFFFFF;
151
free_list_chunks[chunk_count][i] = alloc_count + i;
152
}
153
154
if constexpr (THREAD_SAFE) {
155
// Store atomically to avoid data race with the load in get_or_null().
156
((std::atomic<uint32_t> *)&max_alloc)->store(max_alloc + elements_in_chunk, std::memory_order_relaxed);
157
} else {
158
max_alloc += elements_in_chunk;
159
}
160
}
161
162
uint32_t free_index = free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk];
163
164
uint32_t free_chunk = free_index / elements_in_chunk;
165
uint32_t free_element = free_index % elements_in_chunk;
166
167
uint32_t validator = 1 + (uint32_t)(_gen_id() % 0x7FFFFFFF);
168
uint64_t id = validator;
169
id <<= 32;
170
id |= free_index;
171
172
chunks[free_chunk][free_element].validator = validator;
173
chunks[free_chunk][free_element].validator |= 0x80000000; //mark uninitialized bit
174
175
alloc_count++;
176
177
if constexpr (THREAD_SAFE) {
178
mutex.unlock();
179
}
180
181
return _make_from_id(id);
182
}
183
184
public:
185
RID make_rid() {
186
RID rid = _allocate_rid();
187
initialize_rid(rid);
188
return rid;
189
}
190
RID make_rid(const T &p_value) {
191
RID rid = _allocate_rid();
192
initialize_rid(rid, p_value);
193
return rid;
194
}
195
196
//allocate but don't initialize, use initialize_rid afterwards
197
RID allocate_rid() {
198
return _allocate_rid();
199
}
200
201
_FORCE_INLINE_ T *get_or_null(const RID &p_rid, bool p_initialize = false) {
202
if (p_rid == RID()) {
203
return nullptr;
204
}
205
206
if constexpr (THREAD_SAFE) {
207
SYNC_ACQUIRE;
208
}
209
210
uint64_t id = p_rid.get_id();
211
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
212
uint32_t ma;
213
if constexpr (THREAD_SAFE) { // Read atomically to avoid data race with the store in _allocate_rid().
214
ma = ((std::atomic<uint32_t> *)&max_alloc)->load(std::memory_order_relaxed);
215
} else {
216
ma = max_alloc;
217
}
218
if (unlikely(idx >= ma)) {
219
return nullptr;
220
}
221
222
uint32_t idx_chunk = idx / elements_in_chunk;
223
uint32_t idx_element = idx % elements_in_chunk;
224
225
uint32_t validator = uint32_t(id >> 32);
226
227
if constexpr (THREAD_SAFE) {
228
#ifdef TSAN_ENABLED
229
__tsan_acquire(&chunks[idx_chunk]); // We know not a race in practice.
230
__tsan_acquire(&chunks[idx_chunk][idx_element]); // We know not a race in practice.
231
#endif
232
}
233
234
Chunk &c = chunks[idx_chunk][idx_element];
235
236
if constexpr (THREAD_SAFE) {
237
#ifdef TSAN_ENABLED
238
__tsan_release(&chunks[idx_chunk]);
239
__tsan_release(&chunks[idx_chunk][idx_element]);
240
__tsan_acquire(&c.validator); // We know not a race in practice.
241
#endif
242
}
243
244
if (unlikely(p_initialize)) {
245
if (unlikely(!(c.validator & 0x80000000))) {
246
ERR_FAIL_V_MSG(nullptr, "Initializing already initialized RID");
247
}
248
249
if (unlikely((c.validator & 0x7FFFFFFF) != validator)) {
250
ERR_FAIL_V_MSG(nullptr, "Attempting to initialize the wrong RID");
251
}
252
253
c.validator &= 0x7FFFFFFF; //initialized
254
255
} else if (unlikely(c.validator != validator)) {
256
if ((c.validator & 0x80000000) && c.validator != 0xFFFFFFFF) {
257
ERR_FAIL_V_MSG(nullptr, "Attempting to use an uninitialized RID");
258
}
259
return nullptr;
260
}
261
262
if constexpr (THREAD_SAFE) {
263
#ifdef TSAN_ENABLED
264
__tsan_release(&c.validator);
265
#endif
266
}
267
268
T *ptr = &c.data;
269
270
return ptr;
271
}
272
void initialize_rid(RID p_rid) {
273
T *mem = get_or_null(p_rid, true);
274
ERR_FAIL_NULL(mem);
275
276
if constexpr (THREAD_SAFE) {
277
#ifdef TSAN_ENABLED
278
__tsan_acquire(mem); // We know not a race in practice.
279
#endif
280
}
281
282
memnew_placement(mem, T);
283
284
if constexpr (THREAD_SAFE) {
285
#ifdef TSAN_ENABLED
286
__tsan_release(mem);
287
#endif
288
SYNC_RELEASE;
289
}
290
}
291
292
void initialize_rid(RID p_rid, const T &p_value) {
293
T *mem = get_or_null(p_rid, true);
294
ERR_FAIL_NULL(mem);
295
296
if constexpr (THREAD_SAFE) {
297
#ifdef TSAN_ENABLED
298
__tsan_acquire(mem); // We know not a race in practice.
299
#endif
300
}
301
302
memnew_placement(mem, T(p_value));
303
304
if constexpr (THREAD_SAFE) {
305
#ifdef TSAN_ENABLED
306
__tsan_release(mem);
307
#endif
308
SYNC_RELEASE;
309
}
310
}
311
312
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
313
if constexpr (THREAD_SAFE) {
314
mutex.lock();
315
}
316
317
uint64_t id = p_rid.get_id();
318
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
319
if (unlikely(idx >= max_alloc)) {
320
if constexpr (THREAD_SAFE) {
321
mutex.unlock();
322
}
323
return false;
324
}
325
326
uint32_t idx_chunk = idx / elements_in_chunk;
327
uint32_t idx_element = idx % elements_in_chunk;
328
329
uint32_t validator = uint32_t(id >> 32);
330
331
bool owned = (chunks[idx_chunk][idx_element].validator & 0x7FFFFFFF) == validator;
332
333
if constexpr (THREAD_SAFE) {
334
mutex.unlock();
335
}
336
337
return owned;
338
}
339
340
_FORCE_INLINE_ void free(const RID &p_rid) {
341
if constexpr (THREAD_SAFE) {
342
mutex.lock();
343
}
344
345
uint64_t id = p_rid.get_id();
346
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
347
if (unlikely(idx >= max_alloc)) {
348
if constexpr (THREAD_SAFE) {
349
mutex.unlock();
350
}
351
ERR_FAIL();
352
}
353
354
uint32_t idx_chunk = idx / elements_in_chunk;
355
uint32_t idx_element = idx % elements_in_chunk;
356
357
uint32_t validator = uint32_t(id >> 32);
358
if (unlikely(chunks[idx_chunk][idx_element].validator & 0x80000000)) {
359
if constexpr (THREAD_SAFE) {
360
mutex.unlock();
361
}
362
ERR_FAIL_MSG("Attempted to free an uninitialized or invalid RID");
363
} else if (unlikely(chunks[idx_chunk][idx_element].validator != validator)) {
364
if constexpr (THREAD_SAFE) {
365
mutex.unlock();
366
}
367
ERR_FAIL();
368
}
369
370
chunks[idx_chunk][idx_element].data.~T();
371
chunks[idx_chunk][idx_element].validator = 0xFFFFFFFF; // go invalid
372
373
alloc_count--;
374
free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk] = idx;
375
376
if constexpr (THREAD_SAFE) {
377
mutex.unlock();
378
}
379
}
380
381
_FORCE_INLINE_ uint32_t get_rid_count() const {
382
return alloc_count;
383
}
384
LocalVector<RID> get_owned_list() const {
385
LocalVector<RID> owned;
386
if constexpr (THREAD_SAFE) {
387
mutex.lock();
388
}
389
for (size_t i = 0; i < max_alloc; i++) {
390
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
391
if (validator != 0xFFFFFFFF) {
392
owned.push_back(_make_from_id((validator << 32) | i));
393
}
394
}
395
if constexpr (THREAD_SAFE) {
396
mutex.unlock();
397
}
398
return owned;
399
}
400
401
//used for fast iteration in the elements or RIDs
402
void fill_owned_buffer(RID *p_rid_buffer) const {
403
if constexpr (THREAD_SAFE) {
404
mutex.lock();
405
}
406
uint32_t idx = 0;
407
for (size_t i = 0; i < max_alloc; i++) {
408
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
409
if (validator != 0xFFFFFFFF) {
410
p_rid_buffer[idx] = _make_from_id((validator << 32) | i);
411
idx++;
412
}
413
}
414
415
if constexpr (THREAD_SAFE) {
416
mutex.unlock();
417
}
418
}
419
420
void set_description(const char *p_description) {
421
description = p_description;
422
}
423
424
RID_Alloc(uint32_t p_target_chunk_byte_size = 65536, uint32_t p_maximum_number_of_elements = 262144) {
425
elements_in_chunk = sizeof(T) > p_target_chunk_byte_size ? 1 : (p_target_chunk_byte_size / sizeof(T));
426
if constexpr (THREAD_SAFE) {
427
chunk_limit = (p_maximum_number_of_elements / elements_in_chunk) + 1;
428
chunks = (Chunk **)memalloc(sizeof(Chunk *) * chunk_limit);
429
free_list_chunks = (uint32_t **)memalloc(sizeof(uint32_t *) * chunk_limit);
430
SYNC_RELEASE;
431
}
432
}
433
434
~RID_Alloc() {
435
if constexpr (THREAD_SAFE) {
436
SYNC_ACQUIRE;
437
}
438
439
if (alloc_count) {
440
print_error(vformat("ERROR: %d RID allocations of type '%s' were leaked at exit.",
441
alloc_count, description ? description : typeid(T).name()));
442
443
for (size_t i = 0; i < max_alloc; i++) {
444
uint32_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
445
if (validator & 0x80000000) {
446
continue; //uninitialized
447
}
448
if (validator != 0xFFFFFFFF) {
449
chunks[i / elements_in_chunk][i % elements_in_chunk].data.~T();
450
}
451
}
452
}
453
454
uint32_t chunk_count = max_alloc / elements_in_chunk;
455
for (uint32_t i = 0; i < chunk_count; i++) {
456
memfree(chunks[i]);
457
memfree(free_list_chunks[i]);
458
}
459
460
if (chunks) {
461
memfree(chunks);
462
memfree(free_list_chunks);
463
}
464
}
465
};
466
467
template <typename T, bool THREAD_SAFE = false>
468
class RID_PtrOwner {
469
RID_Alloc<T *, THREAD_SAFE> alloc;
470
471
public:
472
_FORCE_INLINE_ RID make_rid(T *p_ptr) {
473
return alloc.make_rid(p_ptr);
474
}
475
476
_FORCE_INLINE_ RID allocate_rid() {
477
return alloc.allocate_rid();
478
}
479
480
_FORCE_INLINE_ void initialize_rid(RID p_rid, T *p_ptr) {
481
alloc.initialize_rid(p_rid, p_ptr);
482
}
483
484
_FORCE_INLINE_ T *get_or_null(const RID &p_rid) {
485
T **ptr = alloc.get_or_null(p_rid);
486
if (unlikely(!ptr)) {
487
return nullptr;
488
}
489
return *ptr;
490
}
491
492
_FORCE_INLINE_ void replace(const RID &p_rid, T *p_new_ptr) {
493
T **ptr = alloc.get_or_null(p_rid);
494
ERR_FAIL_NULL(ptr);
495
*ptr = p_new_ptr;
496
}
497
498
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
499
return alloc.owns(p_rid);
500
}
501
502
_FORCE_INLINE_ void free(const RID &p_rid) {
503
alloc.free(p_rid);
504
}
505
506
_FORCE_INLINE_ uint32_t get_rid_count() const {
507
return alloc.get_rid_count();
508
}
509
510
_FORCE_INLINE_ LocalVector<RID> get_owned_list() const {
511
return alloc.get_owned_list();
512
}
513
514
void fill_owned_buffer(RID *p_rid_buffer) const {
515
alloc.fill_owned_buffer(p_rid_buffer);
516
}
517
518
void set_description(const char *p_description) {
519
alloc.set_description(p_description);
520
}
521
522
RID_PtrOwner(uint32_t p_target_chunk_byte_size = 65536, uint32_t p_maximum_number_of_elements = 262144) :
523
alloc(p_target_chunk_byte_size, p_maximum_number_of_elements) {}
524
};
525
526
template <typename T, bool THREAD_SAFE = false>
527
class RID_Owner {
528
RID_Alloc<T, THREAD_SAFE> alloc;
529
530
public:
531
_FORCE_INLINE_ RID make_rid() {
532
return alloc.make_rid();
533
}
534
_FORCE_INLINE_ RID make_rid(const T &p_ptr) {
535
return alloc.make_rid(p_ptr);
536
}
537
538
_FORCE_INLINE_ RID allocate_rid() {
539
return alloc.allocate_rid();
540
}
541
542
_FORCE_INLINE_ void initialize_rid(RID p_rid) {
543
alloc.initialize_rid(p_rid);
544
}
545
546
_FORCE_INLINE_ void initialize_rid(RID p_rid, const T &p_ptr) {
547
alloc.initialize_rid(p_rid, p_ptr);
548
}
549
550
_FORCE_INLINE_ T *get_or_null(const RID &p_rid) {
551
return alloc.get_or_null(p_rid);
552
}
553
554
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
555
return alloc.owns(p_rid);
556
}
557
558
_FORCE_INLINE_ void free(const RID &p_rid) {
559
alloc.free(p_rid);
560
}
561
562
_FORCE_INLINE_ uint32_t get_rid_count() const {
563
return alloc.get_rid_count();
564
}
565
566
_FORCE_INLINE_ LocalVector<RID> get_owned_list() const {
567
return alloc.get_owned_list();
568
}
569
void fill_owned_buffer(RID *p_rid_buffer) const {
570
alloc.fill_owned_buffer(p_rid_buffer);
571
}
572
573
void set_description(const char *p_description) {
574
alloc.set_description(p_description);
575
}
576
RID_Owner(uint32_t p_target_chunk_byte_size = 65536, uint32_t p_maximum_number_of_elements = 262144) :
577
alloc(p_target_chunk_byte_size, p_maximum_number_of_elements) {}
578
};
579
580