Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/servers/rendering/rendering_device.cpp
11351 views
1
/**************************************************************************/
2
/* rendering_device.cpp */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#include "rendering_device.h"
32
#include "rendering_device.compat.inc"
33
34
#include "rendering_device_binds.h"
35
#include "shader_include_db.h"
36
37
#include "core/config/project_settings.h"
38
#include "core/io/dir_access.h"
39
#include "core/io/file_access.h"
40
#include "modules/modules_enabled.gen.h"
41
#include "servers/rendering/rendering_shader_container.h"
42
43
#ifdef MODULE_GLSLANG_ENABLED
44
#include "modules/glslang/shader_compile.h"
45
#endif
46
47
#define FORCE_SEPARATE_PRESENT_QUEUE 0
48
#define PRINT_FRAMEBUFFER_FORMAT 0
49
50
#define ERR_RENDER_THREAD_MSG String("This function (") + String(__func__) + String(") can only be called from the render thread. ")
51
#define ERR_RENDER_THREAD_GUARD() ERR_FAIL_COND_MSG(render_thread_id != Thread::get_caller_id(), ERR_RENDER_THREAD_MSG);
52
#define ERR_RENDER_THREAD_GUARD_V(m_ret) ERR_FAIL_COND_V_MSG(render_thread_id != Thread::get_caller_id(), (m_ret), ERR_RENDER_THREAD_MSG);
53
54
/**************************/
55
/**** HELPER FUNCTIONS ****/
56
/**************************/
57
58
static String _get_device_vendor_name(const RenderingContextDriver::Device &p_device) {
59
switch (p_device.vendor) {
60
case RenderingContextDriver::Vendor::VENDOR_AMD:
61
return "AMD";
62
case RenderingContextDriver::Vendor::VENDOR_IMGTEC:
63
return "ImgTec";
64
case RenderingContextDriver::Vendor::VENDOR_APPLE:
65
return "Apple";
66
case RenderingContextDriver::Vendor::VENDOR_NVIDIA:
67
return "NVIDIA";
68
case RenderingContextDriver::Vendor::VENDOR_ARM:
69
return "ARM";
70
case RenderingContextDriver::Vendor::VENDOR_MICROSOFT:
71
return "Microsoft";
72
case RenderingContextDriver::Vendor::VENDOR_QUALCOMM:
73
return "Qualcomm";
74
case RenderingContextDriver::Vendor::VENDOR_INTEL:
75
return "Intel";
76
default:
77
return "Unknown";
78
}
79
}
80
81
static String _get_device_type_name(const RenderingContextDriver::Device &p_device) {
82
switch (p_device.type) {
83
case RenderingContextDriver::DEVICE_TYPE_INTEGRATED_GPU:
84
return "Integrated";
85
case RenderingContextDriver::DEVICE_TYPE_DISCRETE_GPU:
86
return "Discrete";
87
case RenderingContextDriver::DEVICE_TYPE_VIRTUAL_GPU:
88
return "Virtual";
89
case RenderingContextDriver::DEVICE_TYPE_CPU:
90
return "CPU";
91
case RenderingContextDriver::DEVICE_TYPE_OTHER:
92
default:
93
return "Other";
94
}
95
}
96
97
static uint32_t _get_device_type_score(const RenderingContextDriver::Device &p_device) {
98
static const bool prefer_integrated = OS::get_singleton()->get_user_prefers_integrated_gpu();
99
switch (p_device.type) {
100
case RenderingContextDriver::DEVICE_TYPE_INTEGRATED_GPU:
101
return prefer_integrated ? 5 : 4;
102
case RenderingContextDriver::DEVICE_TYPE_DISCRETE_GPU:
103
return prefer_integrated ? 4 : 5;
104
case RenderingContextDriver::DEVICE_TYPE_VIRTUAL_GPU:
105
return 3;
106
case RenderingContextDriver::DEVICE_TYPE_CPU:
107
return 2;
108
case RenderingContextDriver::DEVICE_TYPE_OTHER:
109
default:
110
return 1;
111
}
112
}
113
114
/**************************/
115
/**** RENDERING DEVICE ****/
116
/**************************/
117
118
// When true, the command graph will attempt to reorder the rendering commands submitted by the user based on the dependencies detected from
119
// the commands automatically. This should improve rendering performance in most scenarios at the cost of some extra CPU overhead.
120
//
121
// This behavior can be disabled if it's suspected that the graph is not detecting dependencies correctly and more control over the order of
122
// the commands is desired (e.g. debugging).
123
124
#define RENDER_GRAPH_REORDER 1
125
126
// Synchronization barriers are issued between the graph's levels only with the necessary amount of detail to achieve the correct result. If
127
// it's suspected that the graph is not doing this correctly, full barriers can be issued instead that will block all types of operations
128
// between the synchronization levels. This setting will have a very negative impact on performance when enabled, so it's only intended for
129
// debugging purposes.
130
131
#define RENDER_GRAPH_FULL_BARRIERS 0
132
133
// The command graph can automatically issue secondary command buffers and record them on background threads when they reach an arbitrary
134
// size threshold. This can be very beneficial towards reducing the time the main thread takes to record all the rendering commands. However,
135
// this setting is not enabled by default as it's been shown to cause some strange issues with certain IHVs that have yet to be understood.
136
137
#define SECONDARY_COMMAND_BUFFERS_PER_FRAME 0
138
139
RenderingDevice *RenderingDevice::singleton = nullptr;
140
141
RenderingDevice *RenderingDevice::get_singleton() {
142
return singleton;
143
}
144
145
/***************************/
146
/**** ID INFRASTRUCTURE ****/
147
/***************************/
148
149
void RenderingDevice::_add_dependency(RID p_id, RID p_depends_on) {
150
_THREAD_SAFE_METHOD_
151
152
HashSet<RID> *set = dependency_map.getptr(p_depends_on);
153
if (set == nullptr) {
154
set = &dependency_map.insert(p_depends_on, HashSet<RID>())->value;
155
}
156
set->insert(p_id);
157
158
set = reverse_dependency_map.getptr(p_id);
159
if (set == nullptr) {
160
set = &reverse_dependency_map.insert(p_id, HashSet<RID>())->value;
161
}
162
set->insert(p_depends_on);
163
}
164
165
void RenderingDevice::_free_dependencies(RID p_id) {
166
_THREAD_SAFE_METHOD_
167
168
// Direct dependencies must be freed.
169
170
HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id);
171
if (E) {
172
while (E->value.size()) {
173
free_rid(*E->value.begin());
174
}
175
dependency_map.remove(E);
176
}
177
178
// Reverse dependencies must be unreferenced.
179
E = reverse_dependency_map.find(p_id);
180
181
if (E) {
182
for (const RID &F : E->value) {
183
HashMap<RID, HashSet<RID>>::Iterator G = dependency_map.find(F);
184
ERR_CONTINUE(!G);
185
ERR_CONTINUE(!G->value.has(p_id));
186
G->value.erase(p_id);
187
}
188
189
reverse_dependency_map.remove(E);
190
}
191
}
192
193
/*******************************/
194
/**** SHADER INFRASTRUCTURE ****/
195
/*******************************/
196
197
Vector<uint8_t> RenderingDevice::shader_compile_spirv_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language, String *r_error, bool p_allow_cache) {
198
switch (p_language) {
199
#ifdef MODULE_GLSLANG_ENABLED
200
case ShaderLanguage::SHADER_LANGUAGE_GLSL: {
201
ShaderLanguageVersion language_version = driver->get_shader_container_format().get_shader_language_version();
202
ShaderSpirvVersion spirv_version = driver->get_shader_container_format().get_shader_spirv_version();
203
return compile_glslang_shader(p_stage, ShaderIncludeDB::parse_include_files(p_source_code), language_version, spirv_version, r_error);
204
}
205
#endif
206
default:
207
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Shader language is not supported.");
208
}
209
}
210
211
RID RenderingDevice::shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
212
Vector<uint8_t> bytecode = shader_compile_binary_from_spirv(p_spirv, p_shader_name);
213
ERR_FAIL_COND_V(bytecode.is_empty(), RID());
214
return shader_create_from_bytecode(bytecode);
215
}
216
217
/***************************/
218
/**** BUFFER MANAGEMENT ****/
219
/***************************/
220
221
RenderingDevice::Buffer *RenderingDevice::_get_buffer_from_owner(RID p_buffer) {
222
Buffer *buffer = nullptr;
223
if (vertex_buffer_owner.owns(p_buffer)) {
224
buffer = vertex_buffer_owner.get_or_null(p_buffer);
225
} else if (index_buffer_owner.owns(p_buffer)) {
226
buffer = index_buffer_owner.get_or_null(p_buffer);
227
} else if (uniform_buffer_owner.owns(p_buffer)) {
228
buffer = uniform_buffer_owner.get_or_null(p_buffer);
229
} else if (texture_buffer_owner.owns(p_buffer)) {
230
DEV_ASSERT(false && "FIXME: Broken.");
231
//buffer = texture_buffer_owner.get_or_null(p_buffer)->buffer;
232
} else if (storage_buffer_owner.owns(p_buffer)) {
233
buffer = storage_buffer_owner.get_or_null(p_buffer);
234
}
235
return buffer;
236
}
237
238
Error RenderingDevice::_buffer_initialize(Buffer *p_buffer, Span<uint8_t> p_data, uint32_t p_required_align) {
239
uint32_t transfer_worker_offset;
240
TransferWorker *transfer_worker = _acquire_transfer_worker(p_data.size(), p_required_align, transfer_worker_offset);
241
p_buffer->transfer_worker_index = transfer_worker->index;
242
243
{
244
MutexLock lock(transfer_worker->operations_mutex);
245
p_buffer->transfer_worker_operation = ++transfer_worker->operations_counter;
246
}
247
248
// Copy to the worker's staging buffer.
249
uint8_t *data_ptr = driver->buffer_map(transfer_worker->staging_buffer);
250
ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
251
252
memcpy(data_ptr + transfer_worker_offset, p_data.ptr(), p_data.size());
253
driver->buffer_unmap(transfer_worker->staging_buffer);
254
255
// Copy from the staging buffer to the real buffer.
256
RDD::BufferCopyRegion region;
257
region.src_offset = transfer_worker_offset;
258
region.dst_offset = 0;
259
region.size = p_data.size();
260
driver->command_copy_buffer(transfer_worker->command_buffer, transfer_worker->staging_buffer, p_buffer->driver_id, region);
261
262
_release_transfer_worker(transfer_worker);
263
264
return OK;
265
}
266
267
Error RenderingDevice::_insert_staging_block(StagingBuffers &p_staging_buffers) {
268
StagingBufferBlock block;
269
270
block.driver_id = driver->buffer_create(p_staging_buffers.block_size, p_staging_buffers.usage_bits, RDD::MEMORY_ALLOCATION_TYPE_CPU);
271
ERR_FAIL_COND_V(!block.driver_id, ERR_CANT_CREATE);
272
273
block.frame_used = 0;
274
block.fill_amount = 0;
275
276
p_staging_buffers.blocks.insert(p_staging_buffers.current, block);
277
return OK;
278
}
279
280
Error RenderingDevice::_staging_buffer_allocate(StagingBuffers &p_staging_buffers, uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, StagingRequiredAction &r_required_action, bool p_can_segment) {
281
// Determine a block to use.
282
283
r_alloc_size = p_amount;
284
r_required_action = STAGING_REQUIRED_ACTION_NONE;
285
286
while (true) {
287
r_alloc_offset = 0;
288
289
// See if we can use current block.
290
if (p_staging_buffers.blocks[p_staging_buffers.current].frame_used == frames_drawn) {
291
// We used this block this frame, let's see if there is still room.
292
293
uint32_t write_from = p_staging_buffers.blocks[p_staging_buffers.current].fill_amount;
294
295
{
296
uint32_t align_remainder = write_from % p_required_align;
297
if (align_remainder != 0) {
298
write_from += p_required_align - align_remainder;
299
}
300
}
301
302
int32_t available_bytes = int32_t(p_staging_buffers.block_size) - int32_t(write_from);
303
304
if ((int32_t)p_amount < available_bytes) {
305
// All is good, we should be ok, all will fit.
306
r_alloc_offset = write_from;
307
} else if (p_can_segment && available_bytes >= (int32_t)p_required_align) {
308
// Ok all won't fit but at least we can fit a chunkie.
309
// All is good, update what needs to be written to.
310
r_alloc_offset = write_from;
311
r_alloc_size = available_bytes - (available_bytes % p_required_align);
312
313
} else {
314
// Can't fit it into this buffer.
315
// Will need to try next buffer.
316
317
p_staging_buffers.current = (p_staging_buffers.current + 1) % p_staging_buffers.blocks.size();
318
319
// Before doing anything, though, let's check that we didn't manage to fill all blocks.
320
// Possible in a single frame.
321
if (p_staging_buffers.blocks[p_staging_buffers.current].frame_used == frames_drawn) {
322
// Guess we did.. ok, let's see if we can insert a new block.
323
if ((uint64_t)p_staging_buffers.blocks.size() * p_staging_buffers.block_size < p_staging_buffers.max_size) {
324
// We can, so we are safe.
325
Error err = _insert_staging_block(p_staging_buffers);
326
if (err) {
327
return err;
328
}
329
// Claim for this frame.
330
p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
331
} else {
332
// Ok, worst case scenario, all the staging buffers belong to this frame
333
// and this frame is not even done.
334
// If this is the main thread, it means the user is likely loading a lot of resources at once,.
335
// Otherwise, the thread should just be blocked until the next frame (currently unimplemented).
336
r_required_action = STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL;
337
}
338
339
} else {
340
// Not from current frame, so continue and try again.
341
continue;
342
}
343
}
344
345
} else if (p_staging_buffers.blocks[p_staging_buffers.current].frame_used <= frames_drawn - frames.size()) {
346
// This is an old block, which was already processed, let's reuse.
347
p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
348
p_staging_buffers.blocks.write[p_staging_buffers.current].fill_amount = 0;
349
} else {
350
// This block may still be in use, let's not touch it unless we have to, so.. can we create a new one?
351
if ((uint64_t)p_staging_buffers.blocks.size() * p_staging_buffers.block_size < p_staging_buffers.max_size) {
352
// We are still allowed to create a new block, so let's do that and insert it for current pos.
353
Error err = _insert_staging_block(p_staging_buffers);
354
if (err) {
355
return err;
356
}
357
// Claim for this frame.
358
p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
359
} else {
360
// Oops, we are out of room and we can't create more.
361
// Let's flush older frames.
362
// The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway.
363
// If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though).
364
r_required_action = STAGING_REQUIRED_ACTION_STALL_PREVIOUS;
365
}
366
}
367
368
// All was good, break.
369
break;
370
}
371
372
p_staging_buffers.used = true;
373
374
return OK;
375
}
376
377
void RenderingDevice::_staging_buffer_execute_required_action(StagingBuffers &p_staging_buffers, StagingRequiredAction p_required_action) {
378
switch (p_required_action) {
379
case STAGING_REQUIRED_ACTION_NONE: {
380
// Do nothing.
381
} break;
382
case STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL: {
383
_flush_and_stall_for_all_frames();
384
385
// Clear the whole staging buffer.
386
for (int i = 0; i < p_staging_buffers.blocks.size(); i++) {
387
p_staging_buffers.blocks.write[i].frame_used = 0;
388
p_staging_buffers.blocks.write[i].fill_amount = 0;
389
}
390
391
// Claim for current frame.
392
p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
393
} break;
394
case STAGING_REQUIRED_ACTION_STALL_PREVIOUS: {
395
_stall_for_previous_frames();
396
397
for (int i = 0; i < p_staging_buffers.blocks.size(); i++) {
398
// Clear all blocks but the ones from this frame.
399
int block_idx = (i + p_staging_buffers.current) % p_staging_buffers.blocks.size();
400
if (p_staging_buffers.blocks[block_idx].frame_used == frames_drawn) {
401
break; // Ok, we reached something from this frame, abort.
402
}
403
404
p_staging_buffers.blocks.write[block_idx].frame_used = 0;
405
p_staging_buffers.blocks.write[block_idx].fill_amount = 0;
406
}
407
408
// Claim for current frame.
409
p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
410
} break;
411
default: {
412
DEV_ASSERT(false && "Unknown required action.");
413
} break;
414
}
415
}
416
417
Error RenderingDevice::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size) {
418
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
419
420
ERR_FAIL_COND_V_MSG(draw_list.active, ERR_INVALID_PARAMETER,
421
"Copying buffers is forbidden during creation of a draw list");
422
ERR_FAIL_COND_V_MSG(compute_list.active, ERR_INVALID_PARAMETER,
423
"Copying buffers is forbidden during creation of a compute list");
424
425
Buffer *src_buffer = _get_buffer_from_owner(p_src_buffer);
426
if (!src_buffer) {
427
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Source buffer argument is not a valid buffer of any type.");
428
}
429
430
Buffer *dst_buffer = _get_buffer_from_owner(p_dst_buffer);
431
if (!dst_buffer) {
432
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Destination buffer argument is not a valid buffer of any type.");
433
}
434
435
// Validate the copy's dimensions for both buffers.
436
ERR_FAIL_COND_V_MSG((p_size + p_src_offset) > src_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the source buffer.");
437
ERR_FAIL_COND_V_MSG((p_size + p_dst_offset) > dst_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the destination buffer.");
438
439
_check_transfer_worker_buffer(src_buffer);
440
_check_transfer_worker_buffer(dst_buffer);
441
442
// Perform the copy.
443
RDD::BufferCopyRegion region;
444
region.src_offset = p_src_offset;
445
region.dst_offset = p_dst_offset;
446
region.size = p_size;
447
448
if (_buffer_make_mutable(dst_buffer, p_dst_buffer)) {
449
// The destination buffer must be mutable to be used as a copy destination.
450
draw_graph.add_synchronization();
451
}
452
453
draw_graph.add_buffer_copy(src_buffer->driver_id, src_buffer->draw_tracker, dst_buffer->driver_id, dst_buffer->draw_tracker, region);
454
455
return OK;
456
}
457
458
Error RenderingDevice::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data) {
459
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
460
461
copy_bytes_count += p_size;
462
ERR_FAIL_COND_V_MSG(draw_list.active, ERR_INVALID_PARAMETER,
463
"Updating buffers is forbidden during creation of a draw list");
464
ERR_FAIL_COND_V_MSG(compute_list.active, ERR_INVALID_PARAMETER,
465
"Updating buffers is forbidden during creation of a compute list");
466
467
Buffer *buffer = _get_buffer_from_owner(p_buffer);
468
ERR_FAIL_NULL_V_MSG(buffer, ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
469
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
470
471
_check_transfer_worker_buffer(buffer);
472
473
// Submitting may get chunked for various reasons, so convert this to a task.
474
size_t to_submit = p_size;
475
size_t submit_from = 0;
476
477
thread_local LocalVector<RDG::RecordedBufferCopy> command_buffer_copies_vector;
478
command_buffer_copies_vector.clear();
479
480
const uint8_t *src_data = reinterpret_cast<const uint8_t *>(p_data);
481
const uint32_t required_align = 32;
482
while (to_submit > 0) {
483
uint32_t block_write_offset;
484
uint32_t block_write_amount;
485
StagingRequiredAction required_action;
486
487
Error err = _staging_buffer_allocate(upload_staging_buffers, MIN(to_submit, upload_staging_buffers.block_size), required_align, block_write_offset, block_write_amount, required_action);
488
if (err) {
489
return err;
490
}
491
492
if (!command_buffer_copies_vector.is_empty() && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL) {
493
if (_buffer_make_mutable(buffer, p_buffer)) {
494
// The buffer must be mutable to be used as a copy destination.
495
draw_graph.add_synchronization();
496
}
497
498
draw_graph.add_buffer_update(buffer->driver_id, buffer->draw_tracker, command_buffer_copies_vector);
499
command_buffer_copies_vector.clear();
500
}
501
502
_staging_buffer_execute_required_action(upload_staging_buffers, required_action);
503
504
// Map staging buffer (It's CPU and coherent).
505
uint8_t *data_ptr = driver->buffer_map(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
506
ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
507
508
// Copy to staging buffer.
509
memcpy(data_ptr + block_write_offset, src_data + submit_from, block_write_amount);
510
511
// Unmap.
512
driver->buffer_unmap(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
513
514
// Insert a command to copy this.
515
RDD::BufferCopyRegion region;
516
region.src_offset = block_write_offset;
517
region.dst_offset = submit_from + p_offset;
518
region.size = block_write_amount;
519
520
RDG::RecordedBufferCopy buffer_copy;
521
buffer_copy.source = upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id;
522
buffer_copy.region = region;
523
command_buffer_copies_vector.push_back(buffer_copy);
524
525
upload_staging_buffers.blocks.write[upload_staging_buffers.current].fill_amount = block_write_offset + block_write_amount;
526
527
to_submit -= block_write_amount;
528
submit_from += block_write_amount;
529
}
530
531
if (!command_buffer_copies_vector.is_empty()) {
532
if (_buffer_make_mutable(buffer, p_buffer)) {
533
// The buffer must be mutable to be used as a copy destination.
534
draw_graph.add_synchronization();
535
}
536
537
draw_graph.add_buffer_update(buffer->driver_id, buffer->draw_tracker, command_buffer_copies_vector);
538
}
539
540
gpu_copy_count++;
541
542
return OK;
543
}
544
545
Error RenderingDevice::driver_callback_add(RDD::DriverCallback p_callback, void *p_userdata, VectorView<CallbackResource> p_resources) {
546
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
547
548
ERR_FAIL_COND_V_MSG(draw_list.active, ERR_INVALID_PARAMETER,
549
"Driver callback is forbidden during creation of a draw list");
550
ERR_FAIL_COND_V_MSG(compute_list.active, ERR_INVALID_PARAMETER,
551
"Driver callback is forbidden during creation of a compute list");
552
553
thread_local LocalVector<RDG::ResourceTracker *> trackers;
554
thread_local LocalVector<RDG::ResourceUsage> usages;
555
556
uint32_t resource_count = p_resources.size();
557
trackers.resize(resource_count);
558
usages.resize(resource_count);
559
560
if (resource_count > 0) {
561
for (uint32_t i = 0; i < p_resources.size(); i++) {
562
const CallbackResource &cr = p_resources[i];
563
switch (cr.type) {
564
case CALLBACK_RESOURCE_TYPE_BUFFER: {
565
Buffer *buffer = _get_buffer_from_owner(cr.rid);
566
if (!buffer) {
567
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, vformat("Argument %d is not a valid buffer of any type.", i));
568
}
569
if (_buffer_make_mutable(buffer, cr.rid)) {
570
draw_graph.add_synchronization();
571
}
572
trackers[i] = buffer->draw_tracker;
573
usages[i] = (RDG::ResourceUsage)cr.usage;
574
} break;
575
case CALLBACK_RESOURCE_TYPE_TEXTURE: {
576
Texture *texture = texture_owner.get_or_null(cr.rid);
577
if (!texture) {
578
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, vformat("Argument %d is not a valid texture.", i));
579
}
580
if (_texture_make_mutable(texture, cr.rid)) {
581
draw_graph.add_synchronization();
582
}
583
trackers[i] = texture->draw_tracker;
584
usages[i] = (RDG::ResourceUsage)cr.usage;
585
} break;
586
default: {
587
CRASH_NOW_MSG("Invalid callback resource type.");
588
} break;
589
}
590
}
591
}
592
593
draw_graph.add_driver_callback(p_callback, p_userdata, trackers, usages);
594
595
return OK;
596
}
597
598
String RenderingDevice::get_perf_report() const {
599
String perf_report_text;
600
perf_report_text += " gpu:" + String::num_int64(prev_gpu_copy_count);
601
perf_report_text += " bytes:" + String::num_int64(prev_copy_bytes_count);
602
603
perf_report_text += " lazily alloc:" + String::num_int64(driver->get_lazily_memory_used());
604
return perf_report_text;
605
}
606
607
void RenderingDevice::update_perf_report() {
608
prev_gpu_copy_count = gpu_copy_count;
609
prev_copy_bytes_count = copy_bytes_count;
610
gpu_copy_count = 0;
611
copy_bytes_count = 0;
612
}
613
614
Error RenderingDevice::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
615
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
616
617
ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
618
"Size must be a multiple of four");
619
ERR_FAIL_COND_V_MSG(draw_list.active, ERR_INVALID_PARAMETER,
620
"Updating buffers in is forbidden during creation of a draw list");
621
ERR_FAIL_COND_V_MSG(compute_list.active, ERR_INVALID_PARAMETER,
622
"Updating buffers is forbidden during creation of a compute list");
623
624
Buffer *buffer = _get_buffer_from_owner(p_buffer);
625
if (!buffer) {
626
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
627
}
628
629
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
630
"Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
631
632
_check_transfer_worker_buffer(buffer);
633
634
if (_buffer_make_mutable(buffer, p_buffer)) {
635
// The destination buffer must be mutable to be used as a clear destination.
636
draw_graph.add_synchronization();
637
}
638
639
draw_graph.add_buffer_clear(buffer->driver_id, buffer->draw_tracker, p_offset, p_size);
640
641
return OK;
642
}
643
644
Vector<uint8_t> RenderingDevice::buffer_get_data(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
645
ERR_RENDER_THREAD_GUARD_V(Vector<uint8_t>());
646
647
Buffer *buffer = _get_buffer_from_owner(p_buffer);
648
if (!buffer) {
649
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved.");
650
}
651
652
// Size of buffer to retrieve.
653
if (!p_size) {
654
p_size = buffer->size;
655
} else {
656
ERR_FAIL_COND_V_MSG(p_size + p_offset > buffer->size, Vector<uint8_t>(),
657
"Size is larger than the buffer.");
658
}
659
660
_check_transfer_worker_buffer(buffer);
661
662
RDD::BufferID tmp_buffer = driver->buffer_create(buffer->size, RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
663
ERR_FAIL_COND_V(!tmp_buffer, Vector<uint8_t>());
664
665
RDD::BufferCopyRegion region;
666
region.src_offset = p_offset;
667
region.size = p_size;
668
669
draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, tmp_buffer, region);
670
671
// Flush everything so memory can be safely mapped.
672
_flush_and_stall_for_all_frames();
673
674
uint8_t *buffer_mem = driver->buffer_map(tmp_buffer);
675
ERR_FAIL_NULL_V(buffer_mem, Vector<uint8_t>());
676
677
Vector<uint8_t> buffer_data;
678
{
679
buffer_data.resize(p_size);
680
uint8_t *w = buffer_data.ptrw();
681
memcpy(w, buffer_mem, p_size);
682
}
683
684
driver->buffer_unmap(tmp_buffer);
685
686
driver->buffer_free(tmp_buffer);
687
688
return buffer_data;
689
}
690
691
Error RenderingDevice::buffer_get_data_async(RID p_buffer, const Callable &p_callback, uint32_t p_offset, uint32_t p_size) {
692
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
693
694
Buffer *buffer = _get_buffer_from_owner(p_buffer);
695
if (buffer == nullptr) {
696
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer is either invalid or this type of buffer can't be retrieved.");
697
}
698
699
if (p_size == 0) {
700
p_size = buffer->size;
701
}
702
703
ERR_FAIL_COND_V_MSG(p_size + p_offset > buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the buffer.");
704
ERR_FAIL_COND_V_MSG(!p_callback.is_valid(), ERR_INVALID_PARAMETER, "Callback must be valid.");
705
706
_check_transfer_worker_buffer(buffer);
707
708
BufferGetDataRequest get_data_request;
709
get_data_request.callback = p_callback;
710
get_data_request.frame_local_index = frames[frame].download_buffer_copy_regions.size();
711
get_data_request.size = p_size;
712
713
const uint32_t required_align = 32;
714
uint32_t block_write_offset;
715
uint32_t block_write_amount;
716
StagingRequiredAction required_action;
717
uint32_t to_submit = p_size;
718
uint32_t submit_from = 0;
719
while (to_submit > 0) {
720
Error err = _staging_buffer_allocate(download_staging_buffers, MIN(to_submit, download_staging_buffers.block_size), required_align, block_write_offset, block_write_amount, required_action);
721
if (err) {
722
return err;
723
}
724
725
const bool flush_frames = (get_data_request.frame_local_count > 0) && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL;
726
if (flush_frames) {
727
if (_buffer_make_mutable(buffer, p_buffer)) {
728
// The buffer must be mutable to be used as a copy source.
729
draw_graph.add_synchronization();
730
}
731
732
for (uint32_t i = 0; i < get_data_request.frame_local_count; i++) {
733
uint32_t local_index = get_data_request.frame_local_index + i;
734
draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, frames[frame].download_buffer_staging_buffers[local_index], frames[frame].download_buffer_copy_regions[local_index]);
735
}
736
}
737
738
_staging_buffer_execute_required_action(download_staging_buffers, required_action);
739
740
if (flush_frames) {
741
get_data_request.frame_local_count = 0;
742
get_data_request.frame_local_index = frames[frame].download_buffer_copy_regions.size();
743
}
744
745
RDD::BufferCopyRegion region;
746
region.src_offset = submit_from + p_offset;
747
region.dst_offset = block_write_offset;
748
region.size = block_write_amount;
749
750
frames[frame].download_buffer_staging_buffers.push_back(download_staging_buffers.blocks[download_staging_buffers.current].driver_id);
751
frames[frame].download_buffer_copy_regions.push_back(region);
752
get_data_request.frame_local_count++;
753
754
download_staging_buffers.blocks.write[download_staging_buffers.current].fill_amount = block_write_offset + block_write_amount;
755
756
to_submit -= block_write_amount;
757
submit_from += block_write_amount;
758
}
759
760
if (get_data_request.frame_local_count > 0) {
761
if (_buffer_make_mutable(buffer, p_buffer)) {
762
// The buffer must be mutable to be used as a copy source.
763
draw_graph.add_synchronization();
764
}
765
766
for (uint32_t i = 0; i < get_data_request.frame_local_count; i++) {
767
uint32_t local_index = get_data_request.frame_local_index + i;
768
draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, frames[frame].download_buffer_staging_buffers[local_index], frames[frame].download_buffer_copy_regions[local_index]);
769
}
770
771
frames[frame].download_buffer_get_data_requests.push_back(get_data_request);
772
}
773
774
return OK;
775
}
776
777
uint64_t RenderingDevice::buffer_get_device_address(RID p_buffer) {
778
ERR_RENDER_THREAD_GUARD_V(0);
779
780
Buffer *buffer = _get_buffer_from_owner(p_buffer);
781
ERR_FAIL_NULL_V_MSG(buffer, 0, "Buffer argument is not a valid buffer of any type.");
782
ERR_FAIL_COND_V_MSG(!buffer->usage.has_flag(RDD::BUFFER_USAGE_DEVICE_ADDRESS_BIT), 0, "Buffer was not created with device address flag.");
783
784
return driver->buffer_get_device_address(buffer->driver_id);
785
}
786
787
RID RenderingDevice::storage_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data, BitField<StorageBufferUsage> p_usage, BitField<BufferCreationBits> p_creation_bits) {
788
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
789
790
Buffer buffer;
791
buffer.size = p_size_bytes;
792
buffer.usage = (RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_STORAGE_BIT);
793
if (p_usage.has_flag(STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT)) {
794
buffer.usage.set_flag(RDD::BUFFER_USAGE_INDIRECT_BIT);
795
}
796
if (p_creation_bits.has_flag(BUFFER_CREATION_DEVICE_ADDRESS_BIT)) {
797
#ifdef DEBUG_ENABLED
798
ERR_FAIL_COND_V_MSG(!has_feature(SUPPORTS_BUFFER_DEVICE_ADDRESS), RID(),
799
"The GPU doesn't support buffer address flag.");
800
#endif
801
802
buffer.usage.set_flag(RDD::BUFFER_USAGE_DEVICE_ADDRESS_BIT);
803
}
804
buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
805
ERR_FAIL_COND_V(!buffer.driver_id, RID());
806
807
// Storage buffers are assumed to be mutable.
808
buffer.draw_tracker = RDG::resource_tracker_create();
809
buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
810
811
if (p_data.size()) {
812
_buffer_initialize(&buffer, p_data);
813
}
814
815
_THREAD_SAFE_LOCK_
816
buffer_memory += buffer.size;
817
_THREAD_SAFE_UNLOCK_
818
819
RID id = storage_buffer_owner.make_rid(buffer);
820
#ifdef DEV_ENABLED
821
set_resource_name(id, "RID:" + itos(id.get_id()));
822
#endif
823
return id;
824
}
825
826
RID RenderingDevice::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, Span<uint8_t> p_data) {
827
uint32_t element_size = get_format_vertex_size(p_format);
828
ERR_FAIL_COND_V_MSG(element_size == 0, RID(), "Format requested is not supported for texture buffers");
829
uint64_t size_bytes = uint64_t(element_size) * p_size_elements;
830
831
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != size_bytes, RID());
832
833
Buffer texture_buffer;
834
texture_buffer.size = size_bytes;
835
BitField<RDD::BufferUsageBits> usage = (RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_TEXEL_BIT);
836
texture_buffer.driver_id = driver->buffer_create(size_bytes, usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
837
ERR_FAIL_COND_V(!texture_buffer.driver_id, RID());
838
839
// Texture buffers are assumed to be immutable unless they don't have initial data.
840
if (p_data.is_empty()) {
841
texture_buffer.draw_tracker = RDG::resource_tracker_create();
842
texture_buffer.draw_tracker->buffer_driver_id = texture_buffer.driver_id;
843
}
844
845
bool ok = driver->buffer_set_texel_format(texture_buffer.driver_id, p_format);
846
if (!ok) {
847
driver->buffer_free(texture_buffer.driver_id);
848
ERR_FAIL_V(RID());
849
}
850
851
if (p_data.size()) {
852
_buffer_initialize(&texture_buffer, p_data);
853
}
854
855
_THREAD_SAFE_LOCK_
856
buffer_memory += size_bytes;
857
_THREAD_SAFE_UNLOCK_
858
859
RID id = texture_buffer_owner.make_rid(texture_buffer);
860
#ifdef DEV_ENABLED
861
set_resource_name(id, "RID:" + itos(id.get_id()));
862
#endif
863
return id;
864
}
865
866
/*****************/
867
/**** TEXTURE ****/
868
/*****************/
869
870
RID RenderingDevice::texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t>> &p_data) {
871
// Some adjustments will happen.
872
TextureFormat format = p_format;
873
874
if (format.shareable_formats.size()) {
875
ERR_FAIL_COND_V_MSG(!format.shareable_formats.has(format.format), RID(),
876
"If supplied a list of shareable formats, the current format must be present in the list");
877
ERR_FAIL_COND_V_MSG(p_view.format_override != DATA_FORMAT_MAX && !format.shareable_formats.has(p_view.format_override), RID(),
878
"If supplied a list of shareable formats, the current view format override must be present in the list");
879
}
880
881
ERR_FAIL_INDEX_V(format.texture_type, RDD::TEXTURE_TYPE_MAX, RID());
882
883
ERR_FAIL_COND_V_MSG(format.width < 1, RID(), "Width must be equal or greater than 1 for all textures");
884
885
if (format.texture_type != TEXTURE_TYPE_1D && format.texture_type != TEXTURE_TYPE_1D_ARRAY) {
886
ERR_FAIL_COND_V_MSG(format.height < 1, RID(), "Height must be equal or greater than 1 for 2D and 3D textures");
887
}
888
889
if (format.texture_type == TEXTURE_TYPE_3D) {
890
ERR_FAIL_COND_V_MSG(format.depth < 1, RID(), "Depth must be equal or greater than 1 for 3D textures");
891
}
892
893
ERR_FAIL_COND_V(format.mipmaps < 1, RID());
894
895
if (format.texture_type == TEXTURE_TYPE_1D_ARRAY || format.texture_type == TEXTURE_TYPE_2D_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE) {
896
ERR_FAIL_COND_V_MSG(format.array_layers < 1, RID(),
897
"Number of layers must be equal or greater than 1 for arrays and cubemaps.");
898
ERR_FAIL_COND_V_MSG((format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE) && (format.array_layers % 6) != 0, RID(),
899
"Cubemap and cubemap array textures must provide a layer number that is multiple of 6");
900
ERR_FAIL_COND_V_MSG(((format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE)) && (format.width != format.height), RID(),
901
"Cubemap and cubemap array textures must have equal width and height.");
902
ERR_FAIL_COND_V_MSG(format.array_layers > driver->limit_get(LIMIT_MAX_TEXTURE_ARRAY_LAYERS), RID(), "Number of layers exceeds device maximum.");
903
} else {
904
format.array_layers = 1;
905
}
906
907
ERR_FAIL_INDEX_V(format.samples, TEXTURE_SAMPLES_MAX, RID());
908
909
ERR_FAIL_COND_V_MSG(format.usage_bits == 0, RID(), "No usage bits specified (at least one is needed)");
910
911
format.height = format.texture_type != TEXTURE_TYPE_1D && format.texture_type != TEXTURE_TYPE_1D_ARRAY ? format.height : 1;
912
format.depth = format.texture_type == TEXTURE_TYPE_3D ? format.depth : 1;
913
914
uint64_t size_max = 0;
915
switch (format.texture_type) {
916
case TEXTURE_TYPE_1D:
917
case TEXTURE_TYPE_1D_ARRAY:
918
size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_1D);
919
break;
920
case TEXTURE_TYPE_2D:
921
case TEXTURE_TYPE_2D_ARRAY:
922
size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_2D);
923
break;
924
case TEXTURE_TYPE_CUBE:
925
case TEXTURE_TYPE_CUBE_ARRAY:
926
size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_CUBE);
927
break;
928
case TEXTURE_TYPE_3D:
929
size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_3D);
930
break;
931
case TEXTURE_TYPE_MAX:
932
break;
933
}
934
ERR_FAIL_COND_V_MSG(format.width > size_max || format.height > size_max || format.depth > size_max, RID(), "Texture dimensions exceed device maximum.");
935
936
uint32_t required_mipmaps = get_image_required_mipmaps(format.width, format.height, format.depth);
937
938
ERR_FAIL_COND_V_MSG(required_mipmaps < format.mipmaps, RID(),
939
"Too many mipmaps requested for texture format and dimensions (" + itos(format.mipmaps) + "), maximum allowed: (" + itos(required_mipmaps) + ").");
940
941
Vector<Vector<uint8_t>> data = p_data;
942
bool immediate_flush = false;
943
944
// If this is a VRS texture, we make sure that it is created with valid initial data. This prevents a crash on Qualcomm Snapdragon XR2 Gen 1
945
// (used in Quest 2, Quest Pro, Pico 4, HTC Vive XR Elite and others) where the driver will read the texture before we've had time to finish updating it.
946
if (data.is_empty() && (p_format.usage_bits & TEXTURE_USAGE_VRS_ATTACHMENT_BIT)) {
947
immediate_flush = true;
948
for (uint32_t i = 0; i < format.array_layers; i++) {
949
uint32_t required_size = get_image_format_required_size(format.format, format.width, format.height, format.depth, format.mipmaps);
950
Vector<uint8_t> layer;
951
layer.resize(required_size);
952
layer.fill(255);
953
data.push_back(layer);
954
}
955
}
956
957
uint32_t forced_usage_bits = _texture_vrs_method_to_usage_bits();
958
if (data.size()) {
959
ERR_FAIL_COND_V_MSG(data.size() != (int)format.array_layers, RID(),
960
"Default supplied data for image format is of invalid length (" + itos(data.size()) + "), should be (" + itos(format.array_layers) + ").");
961
962
for (uint32_t i = 0; i < format.array_layers; i++) {
963
uint32_t required_size = get_image_format_required_size(format.format, format.width, format.height, format.depth, format.mipmaps);
964
ERR_FAIL_COND_V_MSG((uint32_t)data[i].size() != required_size, RID(),
965
"Data for slice index " + itos(i) + " (mapped to layer " + itos(i) + ") differs in size (supplied: " + itos(data[i].size()) + ") than what is required by the format (" + itos(required_size) + ").");
966
}
967
968
ERR_FAIL_COND_V_MSG(format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, RID(),
969
"Textures created as depth attachments can't be initialized with data directly. Use RenderingDevice::texture_update() instead.");
970
971
if (!(format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT)) {
972
forced_usage_bits |= TEXTURE_USAGE_CAN_UPDATE_BIT;
973
}
974
}
975
976
{
977
// Validate that this image is supported for the intended use.
978
bool cpu_readable = (format.usage_bits & RDD::TEXTURE_USAGE_CPU_READ_BIT);
979
BitField<RDD::TextureUsageBits> supported_usage = driver->texture_get_usages_supported_by_format(format.format, cpu_readable);
980
981
String format_text = "'" + String(FORMAT_NAMES[format.format]) + "'";
982
983
if ((format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_SAMPLING_BIT)) {
984
ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as sampling texture.");
985
}
986
if ((format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
987
ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as color attachment.");
988
}
989
if ((format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
990
ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as depth-stencil attachment.");
991
}
992
if ((format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_STORAGE_BIT)) {
993
ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as storage image.");
994
}
995
if ((format.usage_bits & TEXTURE_USAGE_STORAGE_ATOMIC_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_STORAGE_ATOMIC_BIT)) {
996
ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as atomic storage image.");
997
}
998
if ((format.usage_bits & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_VRS_ATTACHMENT_BIT)) {
999
ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as variable shading rate attachment.");
1000
}
1001
}
1002
1003
// Transfer and validate view info.
1004
1005
RDD::TextureView tv;
1006
if (p_view.format_override == DATA_FORMAT_MAX) {
1007
tv.format = format.format;
1008
} else {
1009
ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
1010
tv.format = p_view.format_override;
1011
}
1012
ERR_FAIL_INDEX_V(p_view.swizzle_r, TEXTURE_SWIZZLE_MAX, RID());
1013
ERR_FAIL_INDEX_V(p_view.swizzle_g, TEXTURE_SWIZZLE_MAX, RID());
1014
ERR_FAIL_INDEX_V(p_view.swizzle_b, TEXTURE_SWIZZLE_MAX, RID());
1015
ERR_FAIL_INDEX_V(p_view.swizzle_a, TEXTURE_SWIZZLE_MAX, RID());
1016
tv.swizzle_r = p_view.swizzle_r;
1017
tv.swizzle_g = p_view.swizzle_g;
1018
tv.swizzle_b = p_view.swizzle_b;
1019
tv.swizzle_a = p_view.swizzle_a;
1020
1021
// Create.
1022
1023
Texture texture;
1024
format.usage_bits |= forced_usage_bits;
1025
texture.driver_id = driver->texture_create(format, tv);
1026
ERR_FAIL_COND_V(!texture.driver_id, RID());
1027
texture.type = format.texture_type;
1028
texture.format = format.format;
1029
texture.width = format.width;
1030
texture.height = format.height;
1031
texture.depth = format.depth;
1032
texture.layers = format.array_layers;
1033
texture.mipmaps = format.mipmaps;
1034
texture.base_mipmap = 0;
1035
texture.base_layer = 0;
1036
texture.is_resolve_buffer = format.is_resolve_buffer;
1037
texture.is_discardable = format.is_discardable;
1038
texture.usage_flags = format.usage_bits & ~forced_usage_bits;
1039
texture.samples = format.samples;
1040
texture.allowed_shared_formats = format.shareable_formats;
1041
texture.has_initial_data = !data.is_empty();
1042
1043
if ((format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
1044
texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
1045
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
1046
if (format_has_stencil(format.format)) {
1047
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
1048
}
1049
} else {
1050
texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
1051
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
1052
}
1053
1054
texture.bound = false;
1055
1056
// Textures are only assumed to be immutable if they have initial data and none of the other bits that indicate write usage are enabled.
1057
bool texture_mutable_by_default = texture.usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_STORAGE_BIT | TEXTURE_USAGE_STORAGE_ATOMIC_BIT);
1058
if (data.is_empty() || texture_mutable_by_default) {
1059
_texture_make_mutable(&texture, RID());
1060
}
1061
1062
texture_memory += driver->texture_get_allocation_size(texture.driver_id);
1063
1064
RID id = texture_owner.make_rid(texture);
1065
#ifdef DEV_ENABLED
1066
set_resource_name(id, "RID:" + itos(id.get_id()));
1067
#endif
1068
1069
if (data.size()) {
1070
const bool use_general_in_copy_queues = driver->api_trait_get(RDD::API_TRAIT_USE_GENERAL_IN_COPY_QUEUES);
1071
const RDD::TextureLayout dst_layout = use_general_in_copy_queues ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COPY_DST_OPTIMAL;
1072
for (uint32_t i = 0; i < format.array_layers; i++) {
1073
_texture_initialize(id, i, data[i], dst_layout, immediate_flush);
1074
}
1075
1076
if (texture.draw_tracker != nullptr) {
1077
texture.draw_tracker->usage = use_general_in_copy_queues ? RDG::RESOURCE_USAGE_GENERAL : RDG::RESOURCE_USAGE_COPY_TO;
1078
}
1079
}
1080
1081
return id;
1082
}
1083
1084
RID RenderingDevice::texture_create_shared(const TextureView &p_view, RID p_with_texture) {
1085
Texture *src_texture = texture_owner.get_or_null(p_with_texture);
1086
ERR_FAIL_NULL_V(src_texture, RID());
1087
1088
if (src_texture->owner.is_valid()) { // Ahh this is a share. The RenderingDeviceDriver needs the actual owner.
1089
p_with_texture = src_texture->owner;
1090
src_texture = texture_owner.get_or_null(src_texture->owner);
1091
ERR_FAIL_NULL_V(src_texture, RID()); // This is a bug.
1092
}
1093
1094
// Create view.
1095
1096
Texture texture = *src_texture;
1097
texture.slice_trackers = nullptr;
1098
texture.shared_fallback = nullptr;
1099
1100
RDD::TextureView tv;
1101
bool create_shared = true;
1102
bool raw_reintepretation = false;
1103
if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
1104
tv.format = texture.format;
1105
} else {
1106
ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
1107
1108
ERR_FAIL_COND_V_MSG(!texture.allowed_shared_formats.has(p_view.format_override), RID(),
1109
"Format override is not in the list of allowed shareable formats for original texture.");
1110
tv.format = p_view.format_override;
1111
create_shared = driver->texture_can_make_shared_with_format(texture.driver_id, p_view.format_override, raw_reintepretation);
1112
}
1113
tv.swizzle_r = p_view.swizzle_r;
1114
tv.swizzle_g = p_view.swizzle_g;
1115
tv.swizzle_b = p_view.swizzle_b;
1116
tv.swizzle_a = p_view.swizzle_a;
1117
1118
if (create_shared) {
1119
texture.driver_id = driver->texture_create_shared(texture.driver_id, tv);
1120
} else {
1121
// The regular view will use the same format as the main texture.
1122
RDD::TextureView regular_view = tv;
1123
regular_view.format = src_texture->format;
1124
texture.driver_id = driver->texture_create_shared(texture.driver_id, regular_view);
1125
1126
// Create the independent texture for the alias.
1127
RDD::TextureFormat alias_format = texture.texture_format();
1128
alias_format.format = tv.format;
1129
alias_format.usage_bits = TEXTURE_USAGE_SAMPLING_BIT | TEXTURE_USAGE_CAN_COPY_TO_BIT;
1130
1131
_texture_check_shared_fallback(src_texture);
1132
_texture_check_shared_fallback(&texture);
1133
1134
texture.shared_fallback->texture = driver->texture_create(alias_format, tv);
1135
texture.shared_fallback->raw_reinterpretation = raw_reintepretation;
1136
texture_memory += driver->texture_get_allocation_size(texture.shared_fallback->texture);
1137
1138
RDG::ResourceTracker *tracker = RDG::resource_tracker_create();
1139
tracker->texture_driver_id = texture.shared_fallback->texture;
1140
tracker->texture_size = Size2i(texture.width, texture.height);
1141
tracker->texture_subresources = texture.barrier_range();
1142
tracker->texture_usage = alias_format.usage_bits;
1143
tracker->is_discardable = texture.is_discardable;
1144
tracker->reference_count = 1;
1145
texture.shared_fallback->texture_tracker = tracker;
1146
texture.shared_fallback->revision = 0;
1147
1148
if (raw_reintepretation && src_texture->shared_fallback->buffer.id == 0) {
1149
// For shared textures of the same size, we create the buffer on the main texture if it doesn't have it already.
1150
_texture_create_reinterpret_buffer(src_texture);
1151
}
1152
}
1153
1154
ERR_FAIL_COND_V(!texture.driver_id, RID());
1155
1156
if (texture.draw_tracker != nullptr) {
1157
texture.draw_tracker->reference_count++;
1158
}
1159
1160
texture.owner = p_with_texture;
1161
RID id = texture_owner.make_rid(texture);
1162
#ifdef DEV_ENABLED
1163
set_resource_name(id, "RID:" + itos(id.get_id()));
1164
#endif
1165
_add_dependency(id, p_with_texture);
1166
1167
return id;
1168
}
1169
1170
RID RenderingDevice::texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_usage, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers, uint64_t p_mipmaps) {
1171
// This method creates a texture object using a VkImage created by an extension, module or other external source (OpenXR uses this).
1172
1173
Texture texture;
1174
texture.type = p_type;
1175
texture.format = p_format;
1176
texture.samples = p_samples;
1177
texture.width = p_width;
1178
texture.height = p_height;
1179
texture.depth = p_depth;
1180
texture.layers = p_layers;
1181
texture.mipmaps = p_mipmaps;
1182
texture.usage_flags = p_usage;
1183
texture.base_mipmap = 0;
1184
texture.base_layer = 0;
1185
texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_UNORM);
1186
texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_SRGB);
1187
1188
if (p_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
1189
texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
1190
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
1191
/*if (format_has_stencil(p_format.format)) {
1192
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
1193
}*/
1194
} else {
1195
texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
1196
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
1197
}
1198
1199
texture.driver_id = driver->texture_create_from_extension(p_image, p_type, p_format, p_layers, (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT), p_mipmaps);
1200
ERR_FAIL_COND_V(!texture.driver_id, RID());
1201
1202
_texture_make_mutable(&texture, RID());
1203
1204
RID id = texture_owner.make_rid(texture);
1205
#ifdef DEV_ENABLED
1206
set_resource_name(id, "RID:" + itos(id.get_id()));
1207
#endif
1208
1209
return id;
1210
}
1211
1212
RID RenderingDevice::texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type, uint32_t p_layers) {
1213
Texture *src_texture = texture_owner.get_or_null(p_with_texture);
1214
ERR_FAIL_NULL_V(src_texture, RID());
1215
1216
if (src_texture->owner.is_valid()) { // // Ahh this is a share. The RenderingDeviceDriver needs the actual owner.
1217
p_with_texture = src_texture->owner;
1218
src_texture = texture_owner.get_or_null(src_texture->owner);
1219
ERR_FAIL_NULL_V(src_texture, RID()); // This is a bug.
1220
}
1221
1222
ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_CUBEMAP && (src_texture->type != TEXTURE_TYPE_CUBE && src_texture->type != TEXTURE_TYPE_CUBE_ARRAY), RID(),
1223
"Can only create a cubemap slice from a cubemap or cubemap array mipmap");
1224
1225
ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_3D && src_texture->type != TEXTURE_TYPE_3D, RID(),
1226
"Can only create a 3D slice from a 3D texture");
1227
1228
ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_2D_ARRAY && (src_texture->type != TEXTURE_TYPE_2D_ARRAY), RID(),
1229
"Can only create an array slice from a 2D array mipmap");
1230
1231
// Create view.
1232
1233
ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID());
1234
ERR_FAIL_COND_V(p_mipmap + p_mipmaps > src_texture->mipmaps, RID());
1235
ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID());
1236
1237
int slice_layers = 1;
1238
if (p_layers != 0) {
1239
ERR_FAIL_COND_V_MSG(p_layers > 1 && p_slice_type != TEXTURE_SLICE_2D_ARRAY, RID(), "layer slicing only supported for 2D arrays");
1240
ERR_FAIL_COND_V_MSG(p_layer + p_layers > src_texture->layers, RID(), "layer slice is out of bounds");
1241
slice_layers = p_layers;
1242
} else if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
1243
ERR_FAIL_COND_V_MSG(p_layer != 0, RID(), "layer must be 0 when obtaining a 2D array mipmap slice");
1244
slice_layers = src_texture->layers;
1245
} else if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
1246
slice_layers = 6;
1247
}
1248
1249
Texture texture = *src_texture;
1250
texture.slice_trackers = nullptr;
1251
texture.shared_fallback = nullptr;
1252
1253
get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height);
1254
texture.mipmaps = p_mipmaps;
1255
texture.layers = slice_layers;
1256
texture.base_mipmap = p_mipmap;
1257
texture.base_layer = p_layer;
1258
1259
if (p_slice_type == TEXTURE_SLICE_2D) {
1260
texture.type = TEXTURE_TYPE_2D;
1261
} else if (p_slice_type == TEXTURE_SLICE_3D) {
1262
texture.type = TEXTURE_TYPE_3D;
1263
}
1264
1265
RDD::TextureView tv;
1266
bool create_shared = true;
1267
bool raw_reintepretation = false;
1268
if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
1269
tv.format = texture.format;
1270
} else {
1271
ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
1272
1273
ERR_FAIL_COND_V_MSG(!texture.allowed_shared_formats.has(p_view.format_override), RID(),
1274
"Format override is not in the list of allowed shareable formats for original texture.");
1275
tv.format = p_view.format_override;
1276
create_shared = driver->texture_can_make_shared_with_format(texture.driver_id, p_view.format_override, raw_reintepretation);
1277
}
1278
1279
tv.swizzle_r = p_view.swizzle_r;
1280
tv.swizzle_g = p_view.swizzle_g;
1281
tv.swizzle_b = p_view.swizzle_b;
1282
tv.swizzle_a = p_view.swizzle_a;
1283
1284
if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
1285
ERR_FAIL_COND_V_MSG(p_layer >= src_texture->layers, RID(),
1286
"Specified layer is invalid for cubemap");
1287
ERR_FAIL_COND_V_MSG((p_layer % 6) != 0, RID(),
1288
"Specified layer must be a multiple of 6.");
1289
}
1290
1291
if (create_shared) {
1292
texture.driver_id = driver->texture_create_shared_from_slice(src_texture->driver_id, tv, p_slice_type, p_layer, slice_layers, p_mipmap, p_mipmaps);
1293
} else {
1294
// The regular view will use the same format as the main texture.
1295
RDD::TextureView regular_view = tv;
1296
regular_view.format = src_texture->format;
1297
texture.driver_id = driver->texture_create_shared_from_slice(src_texture->driver_id, regular_view, p_slice_type, p_layer, slice_layers, p_mipmap, p_mipmaps);
1298
1299
// Create the independent texture for the slice.
1300
RDD::TextureSubresourceRange slice_range = texture.barrier_range();
1301
slice_range.base_mipmap = 0;
1302
slice_range.base_layer = 0;
1303
1304
RDD::TextureFormat slice_format = texture.texture_format();
1305
slice_format.width = MAX(texture.width >> p_mipmap, 1U);
1306
slice_format.height = MAX(texture.height >> p_mipmap, 1U);
1307
slice_format.depth = MAX(texture.depth >> p_mipmap, 1U);
1308
slice_format.format = tv.format;
1309
slice_format.usage_bits = TEXTURE_USAGE_SAMPLING_BIT | TEXTURE_USAGE_CAN_COPY_TO_BIT;
1310
1311
_texture_check_shared_fallback(src_texture);
1312
_texture_check_shared_fallback(&texture);
1313
1314
texture.shared_fallback->texture = driver->texture_create(slice_format, tv);
1315
texture.shared_fallback->raw_reinterpretation = raw_reintepretation;
1316
texture_memory += driver->texture_get_allocation_size(texture.shared_fallback->texture);
1317
1318
RDG::ResourceTracker *tracker = RDG::resource_tracker_create();
1319
tracker->texture_driver_id = texture.shared_fallback->texture;
1320
tracker->texture_size = Size2i(texture.width, texture.height);
1321
tracker->texture_subresources = slice_range;
1322
tracker->texture_usage = slice_format.usage_bits;
1323
tracker->is_discardable = slice_format.is_discardable;
1324
tracker->reference_count = 1;
1325
texture.shared_fallback->texture_tracker = tracker;
1326
texture.shared_fallback->revision = 0;
1327
1328
if (raw_reintepretation && src_texture->shared_fallback->buffer.id == 0) {
1329
// For shared texture slices, we create the buffer on the slice if the source texture has no reinterpretation buffer.
1330
_texture_create_reinterpret_buffer(&texture);
1331
}
1332
}
1333
1334
ERR_FAIL_COND_V(!texture.driver_id, RID());
1335
1336
const Rect2i slice_rect(p_mipmap, p_layer, p_mipmaps, slice_layers);
1337
texture.owner = p_with_texture;
1338
texture.slice_type = p_slice_type;
1339
texture.slice_rect = slice_rect;
1340
1341
// If parent is mutable, make slice mutable by default.
1342
if (src_texture->draw_tracker != nullptr) {
1343
texture.draw_tracker = nullptr;
1344
_texture_make_mutable(&texture, RID());
1345
}
1346
1347
RID id = texture_owner.make_rid(texture);
1348
#ifdef DEV_ENABLED
1349
set_resource_name(id, "RID:" + itos(id.get_id()));
1350
#endif
1351
_add_dependency(id, p_with_texture);
1352
1353
return id;
1354
}
1355
1356
static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_t *__restrict p_dst, uint32_t p_src_x, uint32_t p_src_y, uint32_t p_src_w, uint32_t p_src_h, uint32_t p_src_full_w, uint32_t p_dst_pitch, uint32_t p_unit_size) {
1357
uint32_t src_offset = (p_src_y * p_src_full_w + p_src_x) * p_unit_size;
1358
uint32_t dst_offset = 0;
1359
for (uint32_t y = p_src_h; y > 0; y--) {
1360
uint8_t const *__restrict src = p_src + src_offset;
1361
uint8_t *__restrict dst = p_dst + dst_offset;
1362
for (uint32_t x = p_src_w * p_unit_size; x > 0; x--) {
1363
*dst = *src;
1364
src++;
1365
dst++;
1366
}
1367
src_offset += p_src_full_w * p_unit_size;
1368
dst_offset += p_dst_pitch;
1369
}
1370
}
1371
1372
static _ALWAYS_INLINE_ void _copy_region_block_or_regular(const uint8_t *p_read_ptr, uint8_t *p_write_ptr, uint32_t p_x, uint32_t p_y, uint32_t p_width, uint32_t p_region_w, uint32_t p_region_h, uint32_t p_block_w, uint32_t p_block_h, uint32_t p_dst_pitch, uint32_t p_pixel_size, uint32_t p_block_size) {
1373
if (p_block_w != 1 || p_block_h != 1) {
1374
// Block format.
1375
uint32_t xb = p_x / p_block_w;
1376
uint32_t yb = p_y / p_block_h;
1377
uint32_t wb = p_width / p_block_w;
1378
uint32_t region_wb = p_region_w / p_block_w;
1379
uint32_t region_hb = p_region_h / p_block_h;
1380
_copy_region(p_read_ptr, p_write_ptr, xb, yb, region_wb, region_hb, wb, p_dst_pitch, p_block_size);
1381
} else {
1382
// Regular format.
1383
_copy_region(p_read_ptr, p_write_ptr, p_x, p_y, p_region_w, p_region_h, p_width, p_dst_pitch, p_pixel_size);
1384
}
1385
}
1386
1387
uint32_t RenderingDevice::_texture_layer_count(Texture *p_texture) const {
1388
switch (p_texture->type) {
1389
case TEXTURE_TYPE_CUBE:
1390
case TEXTURE_TYPE_CUBE_ARRAY:
1391
return p_texture->layers * 6;
1392
default:
1393
return p_texture->layers;
1394
}
1395
}
1396
1397
uint32_t greatest_common_denominator(uint32_t a, uint32_t b) {
1398
// Euclidean algorithm.
1399
uint32_t t;
1400
while (b != 0) {
1401
t = b;
1402
b = a % b;
1403
a = t;
1404
}
1405
1406
return a;
1407
}
1408
1409
uint32_t least_common_multiple(uint32_t a, uint32_t b) {
1410
if (a == 0 || b == 0) {
1411
return 0;
1412
}
1413
1414
return (a / greatest_common_denominator(a, b)) * b;
1415
}
1416
1417
uint32_t RenderingDevice::_texture_alignment(Texture *p_texture) const {
1418
uint32_t alignment = get_compressed_image_format_block_byte_size(p_texture->format);
1419
if (alignment == 1) {
1420
alignment = get_image_format_pixel_size(p_texture->format);
1421
}
1422
1423
return least_common_multiple(alignment, driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT));
1424
}
1425
1426
Error RenderingDevice::_texture_initialize(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, RDD::TextureLayout p_dst_layout, bool p_immediate_flush) {
1427
Texture *texture = texture_owner.get_or_null(p_texture);
1428
ERR_FAIL_NULL_V(texture, ERR_INVALID_PARAMETER);
1429
1430
if (texture->owner != RID()) {
1431
p_texture = texture->owner;
1432
texture = texture_owner.get_or_null(texture->owner);
1433
ERR_FAIL_NULL_V(texture, ERR_BUG); // This is a bug.
1434
}
1435
1436
uint32_t layer_count = _texture_layer_count(texture);
1437
ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
1438
1439
uint32_t width, height;
1440
uint32_t tight_mip_size = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, texture->mipmaps, &width, &height);
1441
uint32_t required_size = tight_mip_size;
1442
uint32_t required_align = _texture_alignment(texture);
1443
1444
ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
1445
"Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
1446
1447
uint32_t block_w, block_h;
1448
get_compressed_image_format_block_dimensions(texture->format, block_w, block_h);
1449
1450
uint32_t pixel_size = get_image_format_pixel_size(texture->format);
1451
uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(texture->format);
1452
uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format);
1453
1454
// The algorithm operates on two passes, one to figure out the total size the staging buffer will require to allocate and another one where the copy is actually performed.
1455
uint32_t staging_worker_offset = 0;
1456
uint32_t staging_local_offset = 0;
1457
TransferWorker *transfer_worker = nullptr;
1458
const uint8_t *read_ptr = p_data.ptr();
1459
uint8_t *write_ptr = nullptr;
1460
for (uint32_t pass = 0; pass < 2; pass++) {
1461
const bool copy_pass = (pass == 1);
1462
if (copy_pass) {
1463
transfer_worker = _acquire_transfer_worker(staging_local_offset, required_align, staging_worker_offset);
1464
texture->transfer_worker_index = transfer_worker->index;
1465
1466
{
1467
MutexLock lock(transfer_worker->operations_mutex);
1468
texture->transfer_worker_operation = ++transfer_worker->operations_counter;
1469
}
1470
1471
staging_local_offset = 0;
1472
1473
write_ptr = driver->buffer_map(transfer_worker->staging_buffer);
1474
ERR_FAIL_NULL_V(write_ptr, ERR_CANT_CREATE);
1475
1476
if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
1477
// Transition the texture to the optimal layout.
1478
RDD::TextureBarrier tb;
1479
tb.texture = texture->driver_id;
1480
tb.dst_access = RDD::BARRIER_ACCESS_COPY_WRITE_BIT;
1481
tb.prev_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
1482
tb.next_layout = p_dst_layout;
1483
tb.subresources.aspect = texture->barrier_aspect_flags;
1484
tb.subresources.mipmap_count = texture->mipmaps;
1485
tb.subresources.base_layer = p_layer;
1486
tb.subresources.layer_count = 1;
1487
driver->command_pipeline_barrier(transfer_worker->command_buffer, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, RDD::PIPELINE_STAGE_COPY_BIT, {}, {}, tb);
1488
}
1489
}
1490
1491
uint32_t mipmap_offset = 0;
1492
uint32_t logic_width = texture->width;
1493
uint32_t logic_height = texture->height;
1494
for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) {
1495
uint32_t depth = 0;
1496
uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth);
1497
1498
const uint8_t *read_ptr_mipmap = read_ptr + mipmap_offset;
1499
tight_mip_size = image_total - mipmap_offset;
1500
1501
for (uint32_t z = 0; z < depth; z++) {
1502
if (required_align > 0) {
1503
uint32_t align_offset = staging_local_offset % required_align;
1504
if (align_offset != 0) {
1505
staging_local_offset += required_align - align_offset;
1506
}
1507
}
1508
1509
uint32_t pitch = (width * pixel_size * block_w) >> pixel_rshift;
1510
uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
1511
pitch = STEPIFY(pitch, pitch_step);
1512
uint32_t to_allocate = pitch * height;
1513
to_allocate >>= pixel_rshift;
1514
1515
if (copy_pass) {
1516
const uint8_t *read_ptr_mipmap_layer = read_ptr_mipmap + (tight_mip_size / depth) * z;
1517
uint64_t staging_buffer_offset = staging_worker_offset + staging_local_offset;
1518
uint8_t *write_ptr_mipmap_layer = write_ptr + staging_buffer_offset;
1519
_copy_region_block_or_regular(read_ptr_mipmap_layer, write_ptr_mipmap_layer, 0, 0, width, width, height, block_w, block_h, pitch, pixel_size, block_size);
1520
1521
RDD::BufferTextureCopyRegion copy_region;
1522
copy_region.buffer_offset = staging_buffer_offset;
1523
copy_region.texture_subresources.aspect = texture->read_aspect_flags;
1524
copy_region.texture_subresources.mipmap = mm_i;
1525
copy_region.texture_subresources.base_layer = p_layer;
1526
copy_region.texture_subresources.layer_count = 1;
1527
copy_region.texture_offset = Vector3i(0, 0, z);
1528
copy_region.texture_region_size = Vector3i(logic_width, logic_height, 1);
1529
driver->command_copy_buffer_to_texture(transfer_worker->command_buffer, transfer_worker->staging_buffer, texture->driver_id, p_dst_layout, copy_region);
1530
}
1531
1532
staging_local_offset += to_allocate;
1533
}
1534
1535
mipmap_offset = image_total;
1536
logic_width = MAX(1u, logic_width >> 1);
1537
logic_height = MAX(1u, logic_height >> 1);
1538
}
1539
1540
if (copy_pass) {
1541
driver->buffer_unmap(transfer_worker->staging_buffer);
1542
1543
// If the texture does not have a tracker, it means it must be transitioned to the sampling state.
1544
if (texture->draw_tracker == nullptr && driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
1545
RDD::TextureBarrier tb;
1546
tb.texture = texture->driver_id;
1547
tb.src_access = RDD::BARRIER_ACCESS_COPY_WRITE_BIT;
1548
tb.prev_layout = p_dst_layout;
1549
tb.next_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1550
tb.subresources.aspect = texture->barrier_aspect_flags;
1551
tb.subresources.mipmap_count = texture->mipmaps;
1552
tb.subresources.base_layer = p_layer;
1553
tb.subresources.layer_count = 1;
1554
transfer_worker->texture_barriers.push_back(tb);
1555
}
1556
1557
if (p_immediate_flush) {
1558
_end_transfer_worker(transfer_worker);
1559
_submit_transfer_worker(transfer_worker);
1560
_wait_for_transfer_worker(transfer_worker);
1561
}
1562
1563
_release_transfer_worker(transfer_worker);
1564
}
1565
}
1566
1567
return OK;
1568
}
1569
1570
Error RenderingDevice::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data) {
1571
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
1572
1573
ERR_FAIL_COND_V_MSG(draw_list.active || compute_list.active, ERR_INVALID_PARAMETER, "Updating textures is forbidden during creation of a draw or compute list");
1574
1575
Texture *texture = texture_owner.get_or_null(p_texture);
1576
ERR_FAIL_NULL_V(texture, ERR_INVALID_PARAMETER);
1577
1578
if (texture->owner != RID()) {
1579
p_texture = texture->owner;
1580
texture = texture_owner.get_or_null(texture->owner);
1581
ERR_FAIL_NULL_V(texture, ERR_BUG); // This is a bug.
1582
}
1583
1584
ERR_FAIL_COND_V_MSG(texture->bound, ERR_CANT_ACQUIRE_RESOURCE,
1585
"Texture can't be updated while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to update this texture.");
1586
1587
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER, "Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_UPDATE_BIT` to be set to be updatable.");
1588
1589
uint32_t layer_count = _texture_layer_count(texture);
1590
ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
1591
1592
uint32_t width, height;
1593
uint32_t tight_mip_size = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, texture->mipmaps, &width, &height);
1594
uint32_t required_size = tight_mip_size;
1595
uint32_t required_align = _texture_alignment(texture);
1596
1597
ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
1598
"Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
1599
1600
_check_transfer_worker_texture(texture);
1601
1602
uint32_t block_w, block_h;
1603
get_compressed_image_format_block_dimensions(texture->format, block_w, block_h);
1604
1605
uint32_t pixel_size = get_image_format_pixel_size(texture->format);
1606
uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(texture->format);
1607
uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format);
1608
1609
uint32_t region_size = texture_upload_region_size_px;
1610
1611
const uint8_t *read_ptr = p_data.ptr();
1612
1613
thread_local LocalVector<RDG::RecordedBufferToTextureCopy> command_buffer_to_texture_copies_vector;
1614
command_buffer_to_texture_copies_vector.clear();
1615
1616
// Indicate the texture will get modified for the shared texture fallback.
1617
_texture_update_shared_fallback(p_texture, texture, true);
1618
1619
uint32_t mipmap_offset = 0;
1620
1621
uint32_t logic_width = texture->width;
1622
uint32_t logic_height = texture->height;
1623
1624
for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) {
1625
uint32_t depth = 0;
1626
uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth);
1627
1628
const uint8_t *read_ptr_mipmap = read_ptr + mipmap_offset;
1629
tight_mip_size = image_total - mipmap_offset;
1630
1631
for (uint32_t z = 0; z < depth; z++) {
1632
const uint8_t *read_ptr_mipmap_layer = read_ptr_mipmap + (tight_mip_size / depth) * z;
1633
for (uint32_t y = 0; y < height; y += region_size) {
1634
for (uint32_t x = 0; x < width; x += region_size) {
1635
uint32_t region_w = MIN(region_size, width - x);
1636
uint32_t region_h = MIN(region_size, height - y);
1637
1638
uint32_t region_logic_w = MIN(region_size, logic_width - x);
1639
uint32_t region_logic_h = MIN(region_size, logic_height - y);
1640
1641
uint32_t region_pitch = (region_w * pixel_size * block_w) >> pixel_rshift;
1642
uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
1643
region_pitch = STEPIFY(region_pitch, pitch_step);
1644
uint32_t to_allocate = region_pitch * region_h;
1645
uint32_t alloc_offset = 0, alloc_size = 0;
1646
StagingRequiredAction required_action;
1647
Error err = _staging_buffer_allocate(upload_staging_buffers, to_allocate, required_align, alloc_offset, alloc_size, required_action, false);
1648
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
1649
1650
if (!command_buffer_to_texture_copies_vector.is_empty() && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL) {
1651
if (_texture_make_mutable(texture, p_texture)) {
1652
// The texture must be mutable to be used as a copy destination.
1653
draw_graph.add_synchronization();
1654
}
1655
1656
// If the staging buffer requires flushing everything, we submit the command early and clear the current vector.
1657
draw_graph.add_texture_update(texture->driver_id, texture->draw_tracker, command_buffer_to_texture_copies_vector);
1658
command_buffer_to_texture_copies_vector.clear();
1659
}
1660
1661
_staging_buffer_execute_required_action(upload_staging_buffers, required_action);
1662
1663
uint8_t *write_ptr;
1664
1665
{ // Map.
1666
uint8_t *data_ptr = driver->buffer_map(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
1667
ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
1668
write_ptr = data_ptr;
1669
write_ptr += alloc_offset;
1670
}
1671
1672
ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
1673
ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
1674
1675
_copy_region_block_or_regular(read_ptr_mipmap_layer, write_ptr, x, y, width, region_w, region_h, block_w, block_h, region_pitch, pixel_size, block_size);
1676
1677
{ // Unmap.
1678
driver->buffer_unmap(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
1679
}
1680
1681
RDD::BufferTextureCopyRegion copy_region;
1682
copy_region.buffer_offset = alloc_offset;
1683
copy_region.texture_subresources.aspect = texture->read_aspect_flags;
1684
copy_region.texture_subresources.mipmap = mm_i;
1685
copy_region.texture_subresources.base_layer = p_layer;
1686
copy_region.texture_subresources.layer_count = 1;
1687
copy_region.texture_offset = Vector3i(x, y, z);
1688
copy_region.texture_region_size = Vector3i(region_logic_w, region_logic_h, 1);
1689
1690
RDG::RecordedBufferToTextureCopy buffer_to_texture_copy;
1691
buffer_to_texture_copy.from_buffer = upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id;
1692
buffer_to_texture_copy.region = copy_region;
1693
command_buffer_to_texture_copies_vector.push_back(buffer_to_texture_copy);
1694
1695
upload_staging_buffers.blocks.write[upload_staging_buffers.current].fill_amount = alloc_offset + alloc_size;
1696
}
1697
}
1698
}
1699
1700
mipmap_offset = image_total;
1701
logic_width = MAX(1u, logic_width >> 1);
1702
logic_height = MAX(1u, logic_height >> 1);
1703
}
1704
1705
if (_texture_make_mutable(texture, p_texture)) {
1706
// The texture must be mutable to be used as a copy destination.
1707
draw_graph.add_synchronization();
1708
}
1709
1710
draw_graph.add_texture_update(texture->driver_id, texture->draw_tracker, command_buffer_to_texture_copies_vector);
1711
1712
return OK;
1713
}
1714
1715
void RenderingDevice::_texture_check_shared_fallback(Texture *p_texture) {
1716
if (p_texture->shared_fallback == nullptr) {
1717
p_texture->shared_fallback = memnew(Texture::SharedFallback);
1718
}
1719
}
1720
1721
void RenderingDevice::_texture_update_shared_fallback(RID p_texture_rid, Texture *p_texture, bool p_for_writing) {
1722
if (p_texture->shared_fallback == nullptr) {
1723
// This texture does not use any of the shared texture fallbacks.
1724
return;
1725
}
1726
1727
if (p_texture->owner.is_valid()) {
1728
Texture *owner_texture = texture_owner.get_or_null(p_texture->owner);
1729
ERR_FAIL_NULL(owner_texture);
1730
if (p_for_writing) {
1731
// Only the main texture is used for writing when using the shared fallback.
1732
owner_texture->shared_fallback->revision++;
1733
} else if (p_texture->shared_fallback->revision != owner_texture->shared_fallback->revision) {
1734
// Copy the contents of the main texture into the shared texture fallback slice. Update the revision.
1735
_texture_copy_shared(p_texture->owner, owner_texture, p_texture_rid, p_texture);
1736
p_texture->shared_fallback->revision = owner_texture->shared_fallback->revision;
1737
}
1738
} else if (p_for_writing) {
1739
// Increment the revision of the texture so shared texture fallback slices must be updated.
1740
p_texture->shared_fallback->revision++;
1741
}
1742
}
1743
1744
void RenderingDevice::_texture_free_shared_fallback(Texture *p_texture) {
1745
if (p_texture->shared_fallback != nullptr) {
1746
if (p_texture->shared_fallback->texture_tracker != nullptr) {
1747
RDG::resource_tracker_free(p_texture->shared_fallback->texture_tracker);
1748
}
1749
1750
if (p_texture->shared_fallback->buffer_tracker != nullptr) {
1751
RDG::resource_tracker_free(p_texture->shared_fallback->buffer_tracker);
1752
}
1753
1754
if (p_texture->shared_fallback->texture.id != 0) {
1755
texture_memory -= driver->texture_get_allocation_size(p_texture->shared_fallback->texture);
1756
driver->texture_free(p_texture->shared_fallback->texture);
1757
}
1758
1759
if (p_texture->shared_fallback->buffer.id != 0) {
1760
buffer_memory -= driver->buffer_get_allocation_size(p_texture->shared_fallback->buffer);
1761
driver->buffer_free(p_texture->shared_fallback->buffer);
1762
}
1763
1764
memdelete(p_texture->shared_fallback);
1765
p_texture->shared_fallback = nullptr;
1766
}
1767
}
1768
1769
void RenderingDevice::_texture_copy_shared(RID p_src_texture_rid, Texture *p_src_texture, RID p_dst_texture_rid, Texture *p_dst_texture) {
1770
// The only type of copying allowed is from the main texture to the slice texture, as slice textures are not allowed to be used for writing when using this fallback.
1771
DEV_ASSERT(p_src_texture != nullptr);
1772
DEV_ASSERT(p_dst_texture != nullptr);
1773
DEV_ASSERT(p_src_texture->owner.is_null());
1774
DEV_ASSERT(p_dst_texture->owner == p_src_texture_rid);
1775
1776
bool src_made_mutable = _texture_make_mutable(p_src_texture, p_src_texture_rid);
1777
bool dst_made_mutable = _texture_make_mutable(p_dst_texture, p_dst_texture_rid);
1778
if (src_made_mutable || dst_made_mutable) {
1779
draw_graph.add_synchronization();
1780
}
1781
1782
if (p_dst_texture->shared_fallback->raw_reinterpretation) {
1783
// If one of the textures is a main texture and they have a reinterpret buffer, we prefer using that as it's guaranteed to be big enough to hold
1784
// anything and it's how the shared textures that don't use slices are created.
1785
bool src_has_buffer = p_src_texture->shared_fallback->buffer.id != 0;
1786
bool dst_has_buffer = p_dst_texture->shared_fallback->buffer.id != 0;
1787
bool from_src = p_src_texture->owner.is_null() && src_has_buffer;
1788
bool from_dst = p_dst_texture->owner.is_null() && dst_has_buffer;
1789
if (!from_src && !from_dst) {
1790
// If neither texture passed the condition, we just pick whichever texture has a reinterpretation buffer.
1791
from_src = src_has_buffer;
1792
from_dst = dst_has_buffer;
1793
}
1794
1795
// Pick the buffer and tracker to use from the right texture.
1796
RDD::BufferID shared_buffer;
1797
RDG::ResourceTracker *shared_buffer_tracker = nullptr;
1798
if (from_src) {
1799
shared_buffer = p_src_texture->shared_fallback->buffer;
1800
shared_buffer_tracker = p_src_texture->shared_fallback->buffer_tracker;
1801
} else if (from_dst) {
1802
shared_buffer = p_dst_texture->shared_fallback->buffer;
1803
shared_buffer_tracker = p_dst_texture->shared_fallback->buffer_tracker;
1804
} else {
1805
DEV_ASSERT(false && "This path should not be reachable.");
1806
}
1807
1808
// FIXME: When using reinterpretation buffers, the only texture aspect supported is color. Depth or stencil contents won't get copied.
1809
RDD::BufferTextureCopyRegion get_data_region;
1810
RDG::RecordedBufferToTextureCopy update_copy;
1811
RDD::TextureCopyableLayout first_copyable_layout;
1812
RDD::TextureCopyableLayout copyable_layout;
1813
RDD::TextureSubresource texture_subresource;
1814
texture_subresource.aspect = RDD::TEXTURE_ASPECT_COLOR;
1815
texture_subresource.layer = 0;
1816
texture_subresource.mipmap = 0;
1817
driver->texture_get_copyable_layout(p_dst_texture->shared_fallback->texture, texture_subresource, &first_copyable_layout);
1818
1819
// Copying each mipmap from main texture to a buffer and then to the slice texture.
1820
thread_local LocalVector<RDD::BufferTextureCopyRegion> get_data_vector;
1821
thread_local LocalVector<RDG::RecordedBufferToTextureCopy> update_vector;
1822
get_data_vector.clear();
1823
update_vector.clear();
1824
for (uint32_t i = 0; i < p_dst_texture->mipmaps; i++) {
1825
driver->texture_get_copyable_layout(p_dst_texture->shared_fallback->texture, texture_subresource, &copyable_layout);
1826
1827
uint32_t mipmap = p_dst_texture->base_mipmap + i;
1828
get_data_region.buffer_offset = copyable_layout.offset - first_copyable_layout.offset;
1829
get_data_region.texture_subresources.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
1830
get_data_region.texture_subresources.base_layer = p_dst_texture->base_layer;
1831
get_data_region.texture_subresources.mipmap = mipmap;
1832
get_data_region.texture_subresources.layer_count = p_dst_texture->layers;
1833
get_data_region.texture_region_size.x = MAX(1U, p_src_texture->width >> mipmap);
1834
get_data_region.texture_region_size.y = MAX(1U, p_src_texture->height >> mipmap);
1835
get_data_region.texture_region_size.z = MAX(1U, p_src_texture->depth >> mipmap);
1836
get_data_vector.push_back(get_data_region);
1837
1838
update_copy.from_buffer = shared_buffer;
1839
update_copy.region.buffer_offset = get_data_region.buffer_offset;
1840
update_copy.region.texture_subresources.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
1841
update_copy.region.texture_subresources.base_layer = texture_subresource.layer;
1842
update_copy.region.texture_subresources.mipmap = texture_subresource.mipmap;
1843
update_copy.region.texture_subresources.layer_count = get_data_region.texture_subresources.layer_count;
1844
update_copy.region.texture_region_size.x = get_data_region.texture_region_size.x;
1845
update_copy.region.texture_region_size.y = get_data_region.texture_region_size.y;
1846
update_copy.region.texture_region_size.z = get_data_region.texture_region_size.z;
1847
update_vector.push_back(update_copy);
1848
1849
texture_subresource.mipmap++;
1850
}
1851
1852
draw_graph.add_texture_get_data(p_src_texture->driver_id, p_src_texture->draw_tracker, shared_buffer, get_data_vector, shared_buffer_tracker);
1853
draw_graph.add_texture_update(p_dst_texture->shared_fallback->texture, p_dst_texture->shared_fallback->texture_tracker, update_vector, shared_buffer_tracker);
1854
} else {
1855
// Raw reinterpretation is not required. Use a regular texture copy.
1856
RDD::TextureCopyRegion copy_region;
1857
copy_region.src_subresources.aspect = p_src_texture->read_aspect_flags;
1858
copy_region.src_subresources.base_layer = p_dst_texture->base_layer;
1859
copy_region.src_subresources.layer_count = p_dst_texture->layers;
1860
copy_region.dst_subresources.aspect = p_dst_texture->read_aspect_flags;
1861
copy_region.dst_subresources.base_layer = 0;
1862
copy_region.dst_subresources.layer_count = copy_region.src_subresources.layer_count;
1863
1864
// Copying each mipmap from main texture to to the slice texture.
1865
thread_local LocalVector<RDD::TextureCopyRegion> region_vector;
1866
region_vector.clear();
1867
for (uint32_t i = 0; i < p_dst_texture->mipmaps; i++) {
1868
uint32_t mipmap = p_dst_texture->base_mipmap + i;
1869
copy_region.src_subresources.mipmap = mipmap;
1870
copy_region.dst_subresources.mipmap = i;
1871
copy_region.size.x = MAX(1U, p_src_texture->width >> mipmap);
1872
copy_region.size.y = MAX(1U, p_src_texture->height >> mipmap);
1873
copy_region.size.z = MAX(1U, p_src_texture->depth >> mipmap);
1874
region_vector.push_back(copy_region);
1875
}
1876
1877
draw_graph.add_texture_copy(p_src_texture->driver_id, p_src_texture->draw_tracker, p_dst_texture->shared_fallback->texture, p_dst_texture->shared_fallback->texture_tracker, region_vector);
1878
}
1879
}
1880
1881
void RenderingDevice::_texture_create_reinterpret_buffer(Texture *p_texture) {
1882
uint64_t row_pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
1883
uint64_t transfer_alignment = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT);
1884
uint32_t pixel_bytes = get_image_format_pixel_size(p_texture->format);
1885
uint32_t row_pitch = STEPIFY(p_texture->width * pixel_bytes, row_pitch_step);
1886
uint64_t buffer_size = STEPIFY(pixel_bytes * row_pitch * p_texture->height * p_texture->depth, transfer_alignment);
1887
p_texture->shared_fallback->buffer = driver->buffer_create(buffer_size, RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_GPU);
1888
buffer_memory += driver->buffer_get_allocation_size(p_texture->shared_fallback->buffer);
1889
1890
RDG::ResourceTracker *tracker = RDG::resource_tracker_create();
1891
tracker->buffer_driver_id = p_texture->shared_fallback->buffer;
1892
p_texture->shared_fallback->buffer_tracker = tracker;
1893
}
1894
1895
uint32_t RenderingDevice::_texture_vrs_method_to_usage_bits() const {
1896
switch (vrs_method) {
1897
case VRS_METHOD_FRAGMENT_SHADING_RATE:
1898
return RDD::TEXTURE_USAGE_VRS_FRAGMENT_SHADING_RATE_BIT;
1899
case VRS_METHOD_FRAGMENT_DENSITY_MAP:
1900
return RDD::TEXTURE_USAGE_VRS_FRAGMENT_DENSITY_MAP_BIT;
1901
default:
1902
return 0;
1903
}
1904
}
1905
1906
Vector<uint8_t> RenderingDevice::_texture_get_data(Texture *tex, uint32_t p_layer, bool p_2d) {
1907
uint32_t width, height, depth;
1908
uint32_t tight_mip_size = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, tex->mipmaps, &width, &height, &depth);
1909
1910
Vector<uint8_t> image_data;
1911
image_data.resize(tight_mip_size);
1912
1913
uint32_t blockw, blockh;
1914
get_compressed_image_format_block_dimensions(tex->format, blockw, blockh);
1915
uint32_t block_size = get_compressed_image_format_block_byte_size(tex->format);
1916
uint32_t pixel_size = get_image_format_pixel_size(tex->format);
1917
1918
{
1919
uint8_t *w = image_data.ptrw();
1920
1921
uint32_t mipmap_offset = 0;
1922
for (uint32_t mm_i = 0; mm_i < tex->mipmaps; mm_i++) {
1923
uint32_t image_total = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, mm_i + 1, &width, &height, &depth);
1924
1925
uint8_t *write_ptr_mipmap = w + mipmap_offset;
1926
tight_mip_size = image_total - mipmap_offset;
1927
1928
RDD::TextureSubresource subres;
1929
subres.aspect = RDD::TEXTURE_ASPECT_COLOR;
1930
subres.layer = p_layer;
1931
subres.mipmap = mm_i;
1932
RDD::TextureCopyableLayout layout;
1933
driver->texture_get_copyable_layout(tex->driver_id, subres, &layout);
1934
1935
uint8_t *img_mem = driver->texture_map(tex->driver_id, subres);
1936
ERR_FAIL_NULL_V(img_mem, Vector<uint8_t>());
1937
1938
for (uint32_t z = 0; z < depth; z++) {
1939
uint8_t *write_ptr = write_ptr_mipmap + z * tight_mip_size / depth;
1940
const uint8_t *slice_read_ptr = img_mem + z * layout.depth_pitch;
1941
1942
if (block_size > 1) {
1943
// Compressed.
1944
uint32_t line_width = (block_size * (width / blockw));
1945
for (uint32_t y = 0; y < height / blockh; y++) {
1946
const uint8_t *rptr = slice_read_ptr + y * layout.row_pitch;
1947
uint8_t *wptr = write_ptr + y * line_width;
1948
1949
memcpy(wptr, rptr, line_width);
1950
}
1951
1952
} else {
1953
// Uncompressed.
1954
for (uint32_t y = 0; y < height; y++) {
1955
const uint8_t *rptr = slice_read_ptr + y * layout.row_pitch;
1956
uint8_t *wptr = write_ptr + y * pixel_size * width;
1957
memcpy(wptr, rptr, (uint64_t)pixel_size * width);
1958
}
1959
}
1960
}
1961
1962
driver->texture_unmap(tex->driver_id);
1963
1964
mipmap_offset = image_total;
1965
}
1966
}
1967
1968
return image_data;
1969
}
1970
1971
Vector<uint8_t> RenderingDevice::texture_get_data(RID p_texture, uint32_t p_layer) {
1972
ERR_RENDER_THREAD_GUARD_V(Vector<uint8_t>());
1973
1974
Texture *tex = texture_owner.get_or_null(p_texture);
1975
ERR_FAIL_NULL_V(tex, Vector<uint8_t>());
1976
1977
ERR_FAIL_COND_V_MSG(tex->bound, Vector<uint8_t>(),
1978
"Texture can't be retrieved while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to retrieve this texture.");
1979
ERR_FAIL_COND_V_MSG(!(tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), Vector<uint8_t>(),
1980
"Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
1981
1982
ERR_FAIL_COND_V(p_layer >= tex->layers, Vector<uint8_t>());
1983
1984
_check_transfer_worker_texture(tex);
1985
1986
if (tex->usage_flags & TEXTURE_USAGE_CPU_READ_BIT) {
1987
// Does not need anything fancy, map and read.
1988
return _texture_get_data(tex, p_layer);
1989
} else {
1990
LocalVector<RDD::TextureCopyableLayout> mip_layouts;
1991
uint32_t work_mip_alignment = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT);
1992
uint32_t work_buffer_size = 0;
1993
mip_layouts.resize(tex->mipmaps);
1994
for (uint32_t i = 0; i < tex->mipmaps; i++) {
1995
RDD::TextureSubresource subres;
1996
subres.aspect = RDD::TEXTURE_ASPECT_COLOR;
1997
subres.layer = p_layer;
1998
subres.mipmap = i;
1999
driver->texture_get_copyable_layout(tex->driver_id, subres, &mip_layouts[i]);
2000
2001
// Assuming layers are tightly packed. If this is not true on some driver, we must modify the copy algorithm.
2002
DEV_ASSERT(mip_layouts[i].layer_pitch == mip_layouts[i].size / tex->layers);
2003
2004
work_buffer_size = STEPIFY(work_buffer_size, work_mip_alignment) + mip_layouts[i].size;
2005
}
2006
2007
RDD::BufferID tmp_buffer = driver->buffer_create(work_buffer_size, RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
2008
ERR_FAIL_COND_V(!tmp_buffer, Vector<uint8_t>());
2009
2010
thread_local LocalVector<RDD::BufferTextureCopyRegion> command_buffer_texture_copy_regions_vector;
2011
command_buffer_texture_copy_regions_vector.clear();
2012
2013
uint32_t w = tex->width;
2014
uint32_t h = tex->height;
2015
uint32_t d = tex->depth;
2016
for (uint32_t i = 0; i < tex->mipmaps; i++) {
2017
RDD::BufferTextureCopyRegion copy_region;
2018
copy_region.buffer_offset = mip_layouts[i].offset;
2019
copy_region.texture_subresources.aspect = tex->read_aspect_flags;
2020
copy_region.texture_subresources.mipmap = i;
2021
copy_region.texture_subresources.base_layer = p_layer;
2022
copy_region.texture_subresources.layer_count = 1;
2023
copy_region.texture_region_size.x = w;
2024
copy_region.texture_region_size.y = h;
2025
copy_region.texture_region_size.z = d;
2026
command_buffer_texture_copy_regions_vector.push_back(copy_region);
2027
2028
w = MAX(1u, w >> 1);
2029
h = MAX(1u, h >> 1);
2030
d = MAX(1u, d >> 1);
2031
}
2032
2033
if (_texture_make_mutable(tex, p_texture)) {
2034
// The texture must be mutable to be used as a copy source due to layout transitions.
2035
draw_graph.add_synchronization();
2036
}
2037
2038
draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, tmp_buffer, command_buffer_texture_copy_regions_vector);
2039
2040
// Flush everything so memory can be safely mapped.
2041
_flush_and_stall_for_all_frames();
2042
2043
const uint8_t *read_ptr = driver->buffer_map(tmp_buffer);
2044
ERR_FAIL_NULL_V(read_ptr, Vector<uint8_t>());
2045
2046
uint32_t block_w = 0;
2047
uint32_t block_h = 0;
2048
get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
2049
2050
Vector<uint8_t> buffer_data;
2051
uint32_t tight_buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps);
2052
buffer_data.resize(tight_buffer_size);
2053
2054
uint8_t *write_ptr = buffer_data.ptrw();
2055
2056
w = tex->width;
2057
h = tex->height;
2058
d = tex->depth;
2059
for (uint32_t i = 0; i < tex->mipmaps; i++) {
2060
uint32_t width = 0, height = 0, depth = 0;
2061
uint32_t tight_mip_size = get_image_format_required_size(tex->format, w, h, d, 1, &width, &height, &depth);
2062
uint32_t tight_row_pitch = tight_mip_size / ((height / block_h) * depth);
2063
2064
// Copy row-by-row to erase padding due to alignments.
2065
const uint8_t *rp = read_ptr;
2066
uint8_t *wp = write_ptr;
2067
for (uint32_t row = h * d / block_h; row != 0; row--) {
2068
memcpy(wp, rp, tight_row_pitch);
2069
rp += mip_layouts[i].row_pitch;
2070
wp += tight_row_pitch;
2071
}
2072
2073
w = MAX(block_w, w >> 1);
2074
h = MAX(block_h, h >> 1);
2075
d = MAX(1u, d >> 1);
2076
read_ptr += mip_layouts[i].size;
2077
write_ptr += tight_mip_size;
2078
}
2079
2080
driver->buffer_unmap(tmp_buffer);
2081
driver->buffer_free(tmp_buffer);
2082
2083
return buffer_data;
2084
}
2085
}
2086
2087
Error RenderingDevice::texture_get_data_async(RID p_texture, uint32_t p_layer, const Callable &p_callback) {
2088
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
2089
2090
Texture *tex = texture_owner.get_or_null(p_texture);
2091
ERR_FAIL_NULL_V(tex, ERR_INVALID_PARAMETER);
2092
2093
ERR_FAIL_COND_V_MSG(tex->bound, ERR_INVALID_PARAMETER, "Texture can't be retrieved while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to retrieve this texture.");
2094
ERR_FAIL_COND_V_MSG(!(tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER, "Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
2095
ERR_FAIL_COND_V(p_layer >= tex->layers, ERR_INVALID_PARAMETER);
2096
2097
_check_transfer_worker_texture(tex);
2098
2099
thread_local LocalVector<RDD::TextureCopyableLayout> mip_layouts;
2100
mip_layouts.resize(tex->mipmaps);
2101
for (uint32_t i = 0; i < tex->mipmaps; i++) {
2102
RDD::TextureSubresource subres;
2103
subres.aspect = RDD::TEXTURE_ASPECT_COLOR;
2104
subres.layer = p_layer;
2105
subres.mipmap = i;
2106
driver->texture_get_copyable_layout(tex->driver_id, subres, &mip_layouts[i]);
2107
2108
// Assuming layers are tightly packed. If this is not true on some driver, we must modify the copy algorithm.
2109
DEV_ASSERT(mip_layouts[i].layer_pitch == mip_layouts[i].size / tex->layers);
2110
}
2111
2112
ERR_FAIL_COND_V(mip_layouts.is_empty(), ERR_INVALID_PARAMETER);
2113
2114
if (_texture_make_mutable(tex, p_texture)) {
2115
// The texture must be mutable to be used as a copy source due to layout transitions.
2116
draw_graph.add_synchronization();
2117
}
2118
2119
TextureGetDataRequest get_data_request;
2120
get_data_request.callback = p_callback;
2121
get_data_request.frame_local_index = frames[frame].download_buffer_texture_copy_regions.size();
2122
get_data_request.width = tex->width;
2123
get_data_request.height = tex->height;
2124
get_data_request.depth = tex->depth;
2125
get_data_request.format = tex->format;
2126
get_data_request.mipmaps = tex->mipmaps;
2127
2128
uint32_t block_w, block_h;
2129
get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
2130
2131
uint32_t pixel_size = get_image_format_pixel_size(tex->format);
2132
uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(tex->format);
2133
2134
uint32_t w, h, d;
2135
uint32_t required_align = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT);
2136
uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
2137
uint32_t region_size = texture_download_region_size_px;
2138
uint32_t logic_w = tex->width;
2139
uint32_t logic_h = tex->height;
2140
uint32_t mipmap_offset = 0;
2141
uint32_t block_write_offset;
2142
uint32_t block_write_amount;
2143
StagingRequiredAction required_action;
2144
for (uint32_t i = 0; i < tex->mipmaps; i++) {
2145
uint32_t image_total = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, i + 1, &w, &h, &d);
2146
uint32_t tight_mip_size = image_total - mipmap_offset;
2147
for (uint32_t z = 0; z < d; z++) {
2148
for (uint32_t y = 0; y < h; y += region_size) {
2149
for (uint32_t x = 0; x < w; x += region_size) {
2150
uint32_t region_w = MIN(region_size, w - x);
2151
uint32_t region_h = MIN(region_size, h - y);
2152
ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
2153
ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
2154
2155
uint32_t region_logic_w = MIN(region_size, logic_w - x);
2156
uint32_t region_logic_h = MIN(region_size, logic_h - y);
2157
uint32_t region_pitch = (region_w * pixel_size * block_w) >> pixel_rshift;
2158
region_pitch = STEPIFY(region_pitch, pitch_step);
2159
2160
uint32_t to_allocate = region_pitch * region_h;
2161
Error err = _staging_buffer_allocate(download_staging_buffers, to_allocate, required_align, block_write_offset, block_write_amount, required_action, false);
2162
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
2163
2164
const bool flush_frames = (get_data_request.frame_local_count > 0) && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL;
2165
if (flush_frames) {
2166
for (uint32_t j = 0; j < get_data_request.frame_local_count; j++) {
2167
uint32_t local_index = get_data_request.frame_local_index + j;
2168
draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, frames[frame].download_texture_staging_buffers[local_index], frames[frame].download_buffer_texture_copy_regions[local_index]);
2169
}
2170
}
2171
2172
_staging_buffer_execute_required_action(download_staging_buffers, required_action);
2173
2174
if (flush_frames) {
2175
get_data_request.frame_local_count = 0;
2176
get_data_request.frame_local_index = frames[frame].download_buffer_texture_copy_regions.size();
2177
}
2178
2179
RDD::BufferTextureCopyRegion copy_region;
2180
copy_region.buffer_offset = block_write_offset;
2181
copy_region.texture_subresources.aspect = tex->read_aspect_flags;
2182
copy_region.texture_subresources.mipmap = i;
2183
copy_region.texture_subresources.base_layer = p_layer;
2184
copy_region.texture_subresources.layer_count = 1;
2185
copy_region.texture_offset = Vector3i(x, y, z);
2186
copy_region.texture_region_size = Vector3i(region_logic_w, region_logic_h, 1);
2187
frames[frame].download_texture_staging_buffers.push_back(download_staging_buffers.blocks[download_staging_buffers.current].driver_id);
2188
frames[frame].download_buffer_texture_copy_regions.push_back(copy_region);
2189
frames[frame].download_texture_mipmap_offsets.push_back(mipmap_offset + (tight_mip_size / d) * z);
2190
get_data_request.frame_local_count++;
2191
2192
download_staging_buffers.blocks.write[download_staging_buffers.current].fill_amount = block_write_offset + block_write_amount;
2193
}
2194
}
2195
}
2196
2197
mipmap_offset = image_total;
2198
logic_w = MAX(1u, logic_w >> 1);
2199
logic_h = MAX(1u, logic_h >> 1);
2200
}
2201
2202
if (get_data_request.frame_local_count > 0) {
2203
for (uint32_t i = 0; i < get_data_request.frame_local_count; i++) {
2204
uint32_t local_index = get_data_request.frame_local_index + i;
2205
draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, frames[frame].download_texture_staging_buffers[local_index], frames[frame].download_buffer_texture_copy_regions[local_index]);
2206
}
2207
2208
frames[frame].download_texture_get_data_requests.push_back(get_data_request);
2209
}
2210
2211
return OK;
2212
}
2213
2214
bool RenderingDevice::texture_is_shared(RID p_texture) {
2215
ERR_RENDER_THREAD_GUARD_V(false);
2216
2217
Texture *tex = texture_owner.get_or_null(p_texture);
2218
ERR_FAIL_NULL_V(tex, false);
2219
return tex->owner.is_valid();
2220
}
2221
2222
bool RenderingDevice::texture_is_valid(RID p_texture) {
2223
ERR_RENDER_THREAD_GUARD_V(false);
2224
2225
return texture_owner.owns(p_texture);
2226
}
2227
2228
RD::TextureFormat RenderingDevice::texture_get_format(RID p_texture) {
2229
ERR_RENDER_THREAD_GUARD_V(TextureFormat());
2230
2231
Texture *tex = texture_owner.get_or_null(p_texture);
2232
ERR_FAIL_NULL_V(tex, TextureFormat());
2233
2234
TextureFormat tf;
2235
2236
tf.format = tex->format;
2237
tf.width = tex->width;
2238
tf.height = tex->height;
2239
tf.depth = tex->depth;
2240
tf.array_layers = tex->layers;
2241
tf.mipmaps = tex->mipmaps;
2242
tf.texture_type = tex->type;
2243
tf.samples = tex->samples;
2244
tf.usage_bits = tex->usage_flags;
2245
tf.shareable_formats = tex->allowed_shared_formats;
2246
tf.is_resolve_buffer = tex->is_resolve_buffer;
2247
tf.is_discardable = tex->is_discardable;
2248
2249
return tf;
2250
}
2251
2252
Size2i RenderingDevice::texture_size(RID p_texture) {
2253
ERR_RENDER_THREAD_GUARD_V(Size2i());
2254
2255
Texture *tex = texture_owner.get_or_null(p_texture);
2256
ERR_FAIL_NULL_V(tex, Size2i());
2257
return Size2i(tex->width, tex->height);
2258
}
2259
2260
#ifndef DISABLE_DEPRECATED
2261
uint64_t RenderingDevice::texture_get_native_handle(RID p_texture) {
2262
return get_driver_resource(DRIVER_RESOURCE_TEXTURE, p_texture);
2263
}
2264
#endif
2265
2266
Error RenderingDevice::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer) {
2267
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
2268
2269
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
2270
ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
2271
2272
ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
2273
"Source texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
2274
ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
2275
"Source texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
2276
2277
uint32_t src_width, src_height, src_depth;
2278
get_image_format_required_size(src_tex->format, src_tex->width, src_tex->height, src_tex->depth, p_src_mipmap + 1, &src_width, &src_height, &src_depth);
2279
2280
ERR_FAIL_COND_V(p_from.x < 0 || p_from.x + p_size.x > src_width, ERR_INVALID_PARAMETER);
2281
ERR_FAIL_COND_V(p_from.y < 0 || p_from.y + p_size.y > src_height, ERR_INVALID_PARAMETER);
2282
ERR_FAIL_COND_V(p_from.z < 0 || p_from.z + p_size.z > src_depth, ERR_INVALID_PARAMETER);
2283
ERR_FAIL_COND_V(p_src_mipmap >= src_tex->mipmaps, ERR_INVALID_PARAMETER);
2284
ERR_FAIL_COND_V(p_src_layer >= src_tex->layers, ERR_INVALID_PARAMETER);
2285
2286
Texture *dst_tex = texture_owner.get_or_null(p_to_texture);
2287
ERR_FAIL_NULL_V(dst_tex, ERR_INVALID_PARAMETER);
2288
2289
ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
2290
"Destination texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
2291
ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
2292
"Destination texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_TO_BIT` to be set to be retrieved.");
2293
2294
uint32_t dst_width, dst_height, dst_depth;
2295
get_image_format_required_size(dst_tex->format, dst_tex->width, dst_tex->height, dst_tex->depth, p_dst_mipmap + 1, &dst_width, &dst_height, &dst_depth);
2296
2297
ERR_FAIL_COND_V(p_to.x < 0 || p_to.x + p_size.x > dst_width, ERR_INVALID_PARAMETER);
2298
ERR_FAIL_COND_V(p_to.y < 0 || p_to.y + p_size.y > dst_height, ERR_INVALID_PARAMETER);
2299
ERR_FAIL_COND_V(p_to.z < 0 || p_to.z + p_size.z > dst_depth, ERR_INVALID_PARAMETER);
2300
ERR_FAIL_COND_V(p_dst_mipmap >= dst_tex->mipmaps, ERR_INVALID_PARAMETER);
2301
ERR_FAIL_COND_V(p_dst_layer >= dst_tex->layers, ERR_INVALID_PARAMETER);
2302
2303
ERR_FAIL_COND_V_MSG(src_tex->read_aspect_flags != dst_tex->read_aspect_flags, ERR_INVALID_PARAMETER,
2304
"Source and destination texture must be of the same type (color or depth).");
2305
2306
_check_transfer_worker_texture(src_tex);
2307
_check_transfer_worker_texture(dst_tex);
2308
2309
RDD::TextureCopyRegion copy_region;
2310
copy_region.src_subresources.aspect = src_tex->read_aspect_flags;
2311
copy_region.src_subresources.mipmap = p_src_mipmap;
2312
copy_region.src_subresources.base_layer = p_src_layer;
2313
copy_region.src_subresources.layer_count = 1;
2314
copy_region.src_offset = p_from;
2315
2316
copy_region.dst_subresources.aspect = dst_tex->read_aspect_flags;
2317
copy_region.dst_subresources.mipmap = p_dst_mipmap;
2318
copy_region.dst_subresources.base_layer = p_dst_layer;
2319
copy_region.dst_subresources.layer_count = 1;
2320
copy_region.dst_offset = p_to;
2321
2322
copy_region.size = p_size;
2323
2324
// Indicate the texture will get modified for the shared texture fallback.
2325
_texture_update_shared_fallback(p_to_texture, dst_tex, true);
2326
2327
// The textures must be mutable to be used in the copy operation.
2328
bool src_made_mutable = _texture_make_mutable(src_tex, p_from_texture);
2329
bool dst_made_mutable = _texture_make_mutable(dst_tex, p_to_texture);
2330
if (src_made_mutable || dst_made_mutable) {
2331
draw_graph.add_synchronization();
2332
}
2333
2334
draw_graph.add_texture_copy(src_tex->driver_id, src_tex->draw_tracker, dst_tex->driver_id, dst_tex->draw_tracker, copy_region);
2335
2336
return OK;
2337
}
2338
2339
Error RenderingDevice::texture_resolve_multisample(RID p_from_texture, RID p_to_texture) {
2340
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
2341
2342
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
2343
ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
2344
2345
ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
2346
"Source texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
2347
ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
2348
"Source texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
2349
2350
ERR_FAIL_COND_V_MSG(src_tex->type != TEXTURE_TYPE_2D, ERR_INVALID_PARAMETER, "Source texture must be 2D (or a slice of a 3D/Cube texture)");
2351
ERR_FAIL_COND_V_MSG(src_tex->samples == TEXTURE_SAMPLES_1, ERR_INVALID_PARAMETER, "Source texture must be multisampled.");
2352
2353
Texture *dst_tex = texture_owner.get_or_null(p_to_texture);
2354
ERR_FAIL_NULL_V(dst_tex, ERR_INVALID_PARAMETER);
2355
2356
ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
2357
"Destination texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
2358
ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
2359
"Destination texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_TO_BIT` to be set to be retrieved.");
2360
2361
ERR_FAIL_COND_V_MSG(dst_tex->type != TEXTURE_TYPE_2D, ERR_INVALID_PARAMETER, "Destination texture must be 2D (or a slice of a 3D/Cube texture).");
2362
ERR_FAIL_COND_V_MSG(dst_tex->samples != TEXTURE_SAMPLES_1, ERR_INVALID_PARAMETER, "Destination texture must not be multisampled.");
2363
2364
ERR_FAIL_COND_V_MSG(src_tex->format != dst_tex->format, ERR_INVALID_PARAMETER, "Source and Destination textures must be the same format.");
2365
ERR_FAIL_COND_V_MSG(src_tex->width != dst_tex->width && src_tex->height != dst_tex->height && src_tex->depth != dst_tex->depth, ERR_INVALID_PARAMETER, "Source and Destination textures must have the same dimensions.");
2366
2367
ERR_FAIL_COND_V_MSG(src_tex->read_aspect_flags != dst_tex->read_aspect_flags, ERR_INVALID_PARAMETER,
2368
"Source and destination texture must be of the same type (color or depth).");
2369
2370
// Indicate the texture will get modified for the shared texture fallback.
2371
_texture_update_shared_fallback(p_to_texture, dst_tex, true);
2372
2373
_check_transfer_worker_texture(src_tex);
2374
_check_transfer_worker_texture(dst_tex);
2375
2376
// The textures must be mutable to be used in the resolve operation.
2377
bool src_made_mutable = _texture_make_mutable(src_tex, p_from_texture);
2378
bool dst_made_mutable = _texture_make_mutable(dst_tex, p_to_texture);
2379
if (src_made_mutable || dst_made_mutable) {
2380
draw_graph.add_synchronization();
2381
}
2382
2383
draw_graph.add_texture_resolve(src_tex->driver_id, src_tex->draw_tracker, dst_tex->driver_id, dst_tex->draw_tracker, src_tex->base_layer, src_tex->base_mipmap, dst_tex->base_layer, dst_tex->base_mipmap);
2384
2385
return OK;
2386
}
2387
2388
void RenderingDevice::texture_set_discardable(RID p_texture, bool p_discardable) {
2389
ERR_RENDER_THREAD_GUARD();
2390
2391
Texture *texture = texture_owner.get_or_null(p_texture);
2392
ERR_FAIL_NULL(texture);
2393
2394
texture->is_discardable = p_discardable;
2395
2396
if (texture->draw_tracker != nullptr) {
2397
texture->draw_tracker->is_discardable = p_discardable;
2398
}
2399
2400
if (texture->shared_fallback != nullptr && texture->shared_fallback->texture_tracker != nullptr) {
2401
texture->shared_fallback->texture_tracker->is_discardable = p_discardable;
2402
}
2403
}
2404
2405
bool RenderingDevice::texture_is_discardable(RID p_texture) {
2406
ERR_RENDER_THREAD_GUARD_V(false);
2407
2408
Texture *texture = texture_owner.get_or_null(p_texture);
2409
ERR_FAIL_NULL_V(texture, false);
2410
2411
return texture->is_discardable;
2412
}
2413
2414
Error RenderingDevice::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers) {
2415
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
2416
2417
Texture *src_tex = texture_owner.get_or_null(p_texture);
2418
ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
2419
2420
ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
2421
"Source texture can't be cleared while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to clear this texture.");
2422
2423
ERR_FAIL_COND_V(p_layers == 0, ERR_INVALID_PARAMETER);
2424
ERR_FAIL_COND_V(p_mipmaps == 0, ERR_INVALID_PARAMETER);
2425
2426
ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
2427
"Source texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_TO_BIT` to be set to be cleared.");
2428
2429
ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER);
2430
ERR_FAIL_COND_V(p_base_layer + p_layers > src_tex->layers, ERR_INVALID_PARAMETER);
2431
2432
_check_transfer_worker_texture(src_tex);
2433
2434
RDD::TextureSubresourceRange range;
2435
range.aspect = src_tex->read_aspect_flags;
2436
range.base_mipmap = src_tex->base_mipmap + p_base_mipmap;
2437
range.mipmap_count = p_mipmaps;
2438
range.base_layer = src_tex->base_layer + p_base_layer;
2439
range.layer_count = p_layers;
2440
2441
// Indicate the texture will get modified for the shared texture fallback.
2442
_texture_update_shared_fallback(p_texture, src_tex, true);
2443
2444
if (_texture_make_mutable(src_tex, p_texture)) {
2445
// The texture must be mutable to be used as a clear destination.
2446
draw_graph.add_synchronization();
2447
}
2448
2449
draw_graph.add_texture_clear(src_tex->driver_id, src_tex->draw_tracker, p_color, range);
2450
2451
return OK;
2452
}
2453
2454
bool RenderingDevice::texture_is_format_supported_for_usage(DataFormat p_format, BitField<RenderingDevice::TextureUsageBits> p_usage) const {
2455
ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
2456
2457
bool cpu_readable = (p_usage & RDD::TEXTURE_USAGE_CPU_READ_BIT);
2458
BitField<TextureUsageBits> supported = driver->texture_get_usages_supported_by_format(p_format, cpu_readable);
2459
bool any_unsupported = (((int64_t)supported) | ((int64_t)p_usage)) != ((int64_t)supported);
2460
return !any_unsupported;
2461
}
2462
2463
/*********************/
2464
/**** FRAMEBUFFER ****/
2465
/*********************/
2466
2467
RDD::RenderPassID RenderingDevice::_render_pass_create(RenderingDeviceDriver *p_driver, const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, uint32_t p_view_count, VRSMethod p_vrs_method, int32_t p_vrs_attachment, Size2i p_vrs_texel_size, Vector<TextureSamples> *r_samples) {
2468
// NOTE:
2469
// Before the refactor to RenderingDevice-RenderingDeviceDriver, there was commented out code to
2470
// specify dependencies to external subpasses. Since it had been unused for a long timel it wasn't ported
2471
// to the new architecture.
2472
2473
LocalVector<int32_t> attachment_last_pass;
2474
attachment_last_pass.resize(p_attachments.size());
2475
2476
if (p_view_count > 1) {
2477
const RDD::MultiviewCapabilities &capabilities = p_driver->get_multiview_capabilities();
2478
2479
// This only works with multiview!
2480
ERR_FAIL_COND_V_MSG(!capabilities.is_supported, RDD::RenderPassID(), "Multiview not supported");
2481
2482
// Make sure we limit this to the number of views we support.
2483
ERR_FAIL_COND_V_MSG(p_view_count > capabilities.max_view_count, RDD::RenderPassID(), "Hardware does not support requested number of views for Multiview render pass");
2484
}
2485
2486
LocalVector<RDD::Attachment> attachments;
2487
LocalVector<uint32_t> attachment_remap;
2488
2489
for (int i = 0; i < p_attachments.size(); i++) {
2490
if (p_attachments[i].usage_flags == AttachmentFormat::UNUSED_ATTACHMENT) {
2491
attachment_remap.push_back(RDD::AttachmentReference::UNUSED);
2492
continue;
2493
}
2494
2495
ERR_FAIL_INDEX_V(p_attachments[i].format, DATA_FORMAT_MAX, RDD::RenderPassID());
2496
ERR_FAIL_INDEX_V(p_attachments[i].samples, TEXTURE_SAMPLES_MAX, RDD::RenderPassID());
2497
ERR_FAIL_COND_V_MSG(!(p_attachments[i].usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT | TEXTURE_USAGE_VRS_ATTACHMENT_BIT)),
2498
RDD::RenderPassID(), "Texture format for index (" + itos(i) + ") requires an attachment (color, depth-stencil, input or VRS) bit set.");
2499
2500
RDD::Attachment description;
2501
description.format = p_attachments[i].format;
2502
description.samples = p_attachments[i].samples;
2503
2504
// We can setup a framebuffer where we write to our VRS texture to set it up.
2505
// We make the assumption here that if our texture is actually used as our VRS attachment.
2506
// It is used as such for each subpass. This is fairly certain seeing the restrictions on subpasses.
2507
bool is_vrs = (p_attachments[i].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && i == p_vrs_attachment;
2508
if (is_vrs) {
2509
description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
2510
description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
2511
description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
2512
description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
2513
description.initial_layout = _vrs_layout_from_method(p_vrs_method);
2514
description.final_layout = _vrs_layout_from_method(p_vrs_method);
2515
} else {
2516
if (p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
2517
description.load_op = p_load_ops[i];
2518
description.store_op = p_store_ops[i];
2519
description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
2520
description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
2521
description.initial_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
2522
description.final_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
2523
} else if (p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
2524
description.load_op = p_load_ops[i];
2525
description.store_op = p_store_ops[i];
2526
description.stencil_load_op = p_load_ops[i];
2527
description.stencil_store_op = p_store_ops[i];
2528
description.initial_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
2529
description.final_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
2530
} else {
2531
description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
2532
description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
2533
description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
2534
description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
2535
description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
2536
description.final_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
2537
}
2538
}
2539
2540
attachment_last_pass[i] = -1;
2541
attachment_remap.push_back(attachments.size());
2542
attachments.push_back(description);
2543
}
2544
2545
LocalVector<RDD::Subpass> subpasses;
2546
subpasses.resize(p_passes.size());
2547
LocalVector<RDD::SubpassDependency> subpass_dependencies;
2548
2549
for (int i = 0; i < p_passes.size(); i++) {
2550
const FramebufferPass *pass = &p_passes[i];
2551
RDD::Subpass &subpass = subpasses[i];
2552
2553
TextureSamples texture_samples = TEXTURE_SAMPLES_1;
2554
bool is_multisample_first = true;
2555
2556
for (int j = 0; j < pass->color_attachments.size(); j++) {
2557
int32_t attachment = pass->color_attachments[j];
2558
RDD::AttachmentReference reference;
2559
if (attachment == ATTACHMENT_UNUSED) {
2560
reference.attachment = RDD::AttachmentReference::UNUSED;
2561
reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
2562
} else {
2563
ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), color attachment (" + itos(j) + ").");
2564
ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as depth, but it's not usable as color attachment.");
2565
ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
2566
2567
if (is_multisample_first) {
2568
texture_samples = p_attachments[attachment].samples;
2569
is_multisample_first = false;
2570
} else {
2571
ERR_FAIL_COND_V_MSG(texture_samples != p_attachments[attachment].samples, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), if an attachment is marked as multisample, all of them should be multisample and use the same number of samples.");
2572
}
2573
reference.attachment = attachment_remap[attachment];
2574
reference.layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
2575
attachment_last_pass[attachment] = i;
2576
}
2577
reference.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
2578
subpass.color_references.push_back(reference);
2579
}
2580
2581
for (int j = 0; j < pass->input_attachments.size(); j++) {
2582
int32_t attachment = pass->input_attachments[j];
2583
RDD::AttachmentReference reference;
2584
if (attachment == ATTACHMENT_UNUSED) {
2585
reference.attachment = RDD::AttachmentReference::UNUSED;
2586
reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
2587
} else {
2588
ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), input attachment (" + itos(j) + ").");
2589
ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_INPUT_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it isn't marked as an input texture.");
2590
ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
2591
reference.attachment = attachment_remap[attachment];
2592
reference.layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
2593
attachment_last_pass[attachment] = i;
2594
}
2595
reference.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
2596
subpass.input_references.push_back(reference);
2597
}
2598
2599
if (pass->resolve_attachments.size() > 0) {
2600
ERR_FAIL_COND_V_MSG(pass->resolve_attachments.size() != pass->color_attachments.size(), RDD::RenderPassID(), "The amount of resolve attachments (" + itos(pass->resolve_attachments.size()) + ") must match the number of color attachments (" + itos(pass->color_attachments.size()) + ").");
2601
ERR_FAIL_COND_V_MSG(texture_samples == TEXTURE_SAMPLES_1, RDD::RenderPassID(), "Resolve attachments specified, but color attachments are not multisample.");
2602
}
2603
for (int j = 0; j < pass->resolve_attachments.size(); j++) {
2604
int32_t attachment = pass->resolve_attachments[j];
2605
attachments[attachment].load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
2606
2607
RDD::AttachmentReference reference;
2608
if (attachment == ATTACHMENT_UNUSED) {
2609
reference.attachment = RDD::AttachmentReference::UNUSED;
2610
reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
2611
} else {
2612
ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment (" + itos(j) + ").");
2613
ERR_FAIL_COND_V_MSG(pass->color_attachments[j] == ATTACHMENT_UNUSED, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment (" + itos(j) + "), the respective color attachment is marked as unused.");
2614
ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment, it isn't marked as a color texture.");
2615
ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
2616
bool multisample = p_attachments[attachment].samples > TEXTURE_SAMPLES_1;
2617
ERR_FAIL_COND_V_MSG(multisample, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachments can't be multisample.");
2618
reference.attachment = attachment_remap[attachment];
2619
reference.layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
2620
attachment_last_pass[attachment] = i;
2621
}
2622
reference.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
2623
subpass.resolve_references.push_back(reference);
2624
}
2625
2626
if (pass->depth_attachment != ATTACHMENT_UNUSED) {
2627
int32_t attachment = pass->depth_attachment;
2628
ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), depth attachment.");
2629
ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as depth, but it's not a depth attachment.");
2630
ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
2631
subpass.depth_stencil_reference.attachment = attachment_remap[attachment];
2632
subpass.depth_stencil_reference.layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
2633
attachment_last_pass[attachment] = i;
2634
2635
if (is_multisample_first) {
2636
texture_samples = p_attachments[attachment].samples;
2637
is_multisample_first = false;
2638
} else {
2639
ERR_FAIL_COND_V_MSG(texture_samples != p_attachments[attachment].samples, RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), if an attachment is marked as multisample, all of them should be multisample and use the same number of samples including the depth.");
2640
}
2641
2642
} else {
2643
subpass.depth_stencil_reference.attachment = RDD::AttachmentReference::UNUSED;
2644
subpass.depth_stencil_reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
2645
}
2646
2647
if (p_vrs_method == VRS_METHOD_FRAGMENT_SHADING_RATE && p_vrs_attachment >= 0) {
2648
int32_t attachment = p_vrs_attachment;
2649
ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer VRS format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), VRS attachment.");
2650
ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer VRS format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as VRS, but it's not a VRS attachment.");
2651
ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer VRS attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
2652
2653
subpass.fragment_shading_rate_reference.attachment = attachment_remap[attachment];
2654
subpass.fragment_shading_rate_reference.layout = RDD::TEXTURE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL;
2655
subpass.fragment_shading_rate_texel_size = p_vrs_texel_size;
2656
2657
attachment_last_pass[attachment] = i;
2658
}
2659
2660
for (int j = 0; j < pass->preserve_attachments.size(); j++) {
2661
int32_t attachment = pass->preserve_attachments[j];
2662
2663
ERR_FAIL_COND_V_MSG(attachment == ATTACHMENT_UNUSED, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + "). Preserve attachments can't be unused.");
2664
2665
ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + ").");
2666
2667
if (attachment_last_pass[attachment] != i) {
2668
// Preserve can still be used to keep depth or color from being discarded after use.
2669
attachment_last_pass[attachment] = i;
2670
subpasses[i].preserve_attachments.push_back(attachment);
2671
}
2672
}
2673
2674
if (r_samples) {
2675
r_samples->push_back(texture_samples);
2676
}
2677
2678
if (i > 0) {
2679
RDD::SubpassDependency dependency;
2680
dependency.src_subpass = i - 1;
2681
dependency.dst_subpass = i;
2682
dependency.src_stages = (RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
2683
dependency.dst_stages = (RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
2684
dependency.src_access = (RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
2685
dependency.dst_access = (RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT | RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | RDD::BARRIER_ACCESS_INPUT_ATTACHMENT_READ_BIT);
2686
subpass_dependencies.push_back(dependency);
2687
}
2688
}
2689
2690
RDD::AttachmentReference fragment_density_map_attachment_reference;
2691
if (p_vrs_method == VRS_METHOD_FRAGMENT_DENSITY_MAP && p_vrs_attachment >= 0) {
2692
fragment_density_map_attachment_reference.attachment = p_vrs_attachment;
2693
fragment_density_map_attachment_reference.layout = RDD::TEXTURE_LAYOUT_FRAGMENT_DENSITY_MAP_ATTACHMENT_OPTIMAL;
2694
}
2695
2696
RDD::RenderPassID render_pass = p_driver->render_pass_create(attachments, subpasses, subpass_dependencies, p_view_count, fragment_density_map_attachment_reference);
2697
ERR_FAIL_COND_V(!render_pass, RDD::RenderPassID());
2698
2699
return render_pass;
2700
}
2701
2702
RDD::RenderPassID RenderingDevice::_render_pass_create_from_graph(RenderingDeviceDriver *p_driver, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, void *p_user_data) {
2703
DEV_ASSERT(p_driver != nullptr);
2704
DEV_ASSERT(p_user_data != nullptr);
2705
2706
// The graph delegates the creation of the render pass to the user according to the load and store ops that were determined as necessary after
2707
// resolving the dependencies between commands. This function creates a render pass for the framebuffer accordingly.
2708
Framebuffer *framebuffer = (Framebuffer *)(p_user_data);
2709
const FramebufferFormatKey &key = framebuffer->rendering_device->framebuffer_formats[framebuffer->format_id].E->key();
2710
return _render_pass_create(p_driver, key.attachments, key.passes, p_load_ops, p_store_ops, framebuffer->view_count, key.vrs_method, key.vrs_attachment, key.vrs_texel_size);
2711
}
2712
2713
RDG::ResourceUsage RenderingDevice::_vrs_usage_from_method(VRSMethod p_method) {
2714
switch (p_method) {
2715
case VRS_METHOD_FRAGMENT_SHADING_RATE:
2716
return RDG::RESOURCE_USAGE_ATTACHMENT_FRAGMENT_SHADING_RATE_READ;
2717
case VRS_METHOD_FRAGMENT_DENSITY_MAP:
2718
return RDG::RESOURCE_USAGE_ATTACHMENT_FRAGMENT_DENSITY_MAP_READ;
2719
default:
2720
return RDG::RESOURCE_USAGE_NONE;
2721
}
2722
}
2723
2724
RDD::PipelineStageBits RenderingDevice::_vrs_stages_from_method(VRSMethod p_method) {
2725
switch (p_method) {
2726
case VRS_METHOD_FRAGMENT_SHADING_RATE:
2727
return RDD::PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT;
2728
case VRS_METHOD_FRAGMENT_DENSITY_MAP:
2729
return RDD::PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT;
2730
default:
2731
return RDD::PipelineStageBits(0);
2732
}
2733
}
2734
2735
RDD::TextureLayout RenderingDevice::_vrs_layout_from_method(VRSMethod p_method) {
2736
switch (p_method) {
2737
case VRS_METHOD_FRAGMENT_SHADING_RATE:
2738
return RDD::TEXTURE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL;
2739
case VRS_METHOD_FRAGMENT_DENSITY_MAP:
2740
return RDD::TEXTURE_LAYOUT_FRAGMENT_DENSITY_MAP_ATTACHMENT_OPTIMAL;
2741
default:
2742
return RDD::TEXTURE_LAYOUT_UNDEFINED;
2743
}
2744
}
2745
2746
void RenderingDevice::_vrs_detect_method() {
2747
const RDD::FragmentShadingRateCapabilities &fsr_capabilities = driver->get_fragment_shading_rate_capabilities();
2748
const RDD::FragmentDensityMapCapabilities &fdm_capabilities = driver->get_fragment_density_map_capabilities();
2749
if (fsr_capabilities.attachment_supported) {
2750
vrs_method = VRS_METHOD_FRAGMENT_SHADING_RATE;
2751
} else if (fdm_capabilities.attachment_supported) {
2752
vrs_method = VRS_METHOD_FRAGMENT_DENSITY_MAP;
2753
}
2754
2755
switch (vrs_method) {
2756
case VRS_METHOD_FRAGMENT_SHADING_RATE:
2757
vrs_format = DATA_FORMAT_R8_UINT;
2758
vrs_texel_size = Vector2i(16, 16).clamp(fsr_capabilities.min_texel_size, fsr_capabilities.max_texel_size);
2759
break;
2760
case VRS_METHOD_FRAGMENT_DENSITY_MAP:
2761
vrs_format = DATA_FORMAT_R8G8_UNORM;
2762
vrs_texel_size = Vector2i(32, 32).clamp(fdm_capabilities.min_texel_size, fdm_capabilities.max_texel_size);
2763
break;
2764
default:
2765
break;
2766
}
2767
}
2768
2769
RD::VRSMethod RenderingDevice::vrs_get_method() const {
2770
return vrs_method;
2771
}
2772
2773
RD::DataFormat RenderingDevice::vrs_get_format() const {
2774
return vrs_format;
2775
}
2776
2777
Size2i RenderingDevice::vrs_get_texel_size() const {
2778
return vrs_texel_size;
2779
}
2780
2781
RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create(const Vector<AttachmentFormat> &p_format, uint32_t p_view_count, int32_t p_fragment_density_map_attachment) {
2782
FramebufferPass pass;
2783
for (int i = 0; i < p_format.size(); i++) {
2784
if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
2785
pass.depth_attachment = i;
2786
} else {
2787
pass.color_attachments.push_back(i);
2788
}
2789
}
2790
2791
Vector<FramebufferPass> passes;
2792
passes.push_back(pass);
2793
return framebuffer_format_create_multipass(p_format, passes, p_view_count, p_fragment_density_map_attachment);
2794
}
2795
2796
RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count, int32_t p_vrs_attachment) {
2797
_THREAD_SAFE_METHOD_
2798
2799
FramebufferFormatKey key;
2800
key.attachments = p_attachments;
2801
key.passes = p_passes;
2802
key.view_count = p_view_count;
2803
key.vrs_method = vrs_method;
2804
key.vrs_attachment = p_vrs_attachment;
2805
key.vrs_texel_size = vrs_texel_size;
2806
2807
const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key);
2808
if (E) {
2809
// Exists, return.
2810
return E->get();
2811
}
2812
2813
Vector<TextureSamples> samples;
2814
LocalVector<RDD::AttachmentLoadOp> load_ops;
2815
LocalVector<RDD::AttachmentStoreOp> store_ops;
2816
for (int64_t i = 0; i < p_attachments.size(); i++) {
2817
load_ops.push_back(RDD::ATTACHMENT_LOAD_OP_CLEAR);
2818
store_ops.push_back(RDD::ATTACHMENT_STORE_OP_STORE);
2819
}
2820
2821
RDD::RenderPassID render_pass = _render_pass_create(driver, p_attachments, p_passes, load_ops, store_ops, p_view_count, vrs_method, p_vrs_attachment, vrs_texel_size, &samples); // Actions don't matter for this use case.
2822
if (!render_pass) { // Was likely invalid.
2823
return INVALID_ID;
2824
}
2825
2826
FramebufferFormatID id = FramebufferFormatID(framebuffer_format_cache.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
2827
E = framebuffer_format_cache.insert(key, id);
2828
2829
FramebufferFormat fb_format;
2830
fb_format.E = E;
2831
fb_format.render_pass = render_pass;
2832
fb_format.pass_samples = samples;
2833
fb_format.view_count = p_view_count;
2834
framebuffer_formats[id] = fb_format;
2835
2836
#if PRINT_FRAMEBUFFER_FORMAT
2837
print_line("FRAMEBUFFER FORMAT:", id, "ATTACHMENTS:", p_attachments.size(), "PASSES:", p_passes.size());
2838
for (RD::AttachmentFormat attachment : p_attachments) {
2839
print_line("FORMAT:", attachment.format, "SAMPLES:", attachment.samples, "USAGE FLAGS:", attachment.usage_flags);
2840
}
2841
#endif
2842
2843
return id;
2844
}
2845
2846
RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create_empty(TextureSamples p_samples) {
2847
_THREAD_SAFE_METHOD_
2848
2849
FramebufferFormatKey key;
2850
key.passes.push_back(FramebufferPass());
2851
2852
const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key);
2853
if (E) {
2854
// Exists, return.
2855
return E->get();
2856
}
2857
2858
LocalVector<RDD::Subpass> subpass;
2859
subpass.resize(1);
2860
2861
RDD::RenderPassID render_pass = driver->render_pass_create({}, subpass, {}, 1, RDD::AttachmentReference());
2862
ERR_FAIL_COND_V(!render_pass, FramebufferFormatID());
2863
2864
FramebufferFormatID id = FramebufferFormatID(framebuffer_format_cache.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
2865
2866
E = framebuffer_format_cache.insert(key, id);
2867
2868
FramebufferFormat fb_format;
2869
fb_format.E = E;
2870
fb_format.render_pass = render_pass;
2871
fb_format.pass_samples.push_back(p_samples);
2872
framebuffer_formats[id] = fb_format;
2873
2874
#if PRINT_FRAMEBUFFER_FORMAT
2875
print_line("FRAMEBUFFER FORMAT:", id, "ATTACHMENTS: EMPTY");
2876
#endif
2877
2878
return id;
2879
}
2880
2881
RenderingDevice::TextureSamples RenderingDevice::framebuffer_format_get_texture_samples(FramebufferFormatID p_format, uint32_t p_pass) {
2882
_THREAD_SAFE_METHOD_
2883
2884
HashMap<FramebufferFormatID, FramebufferFormat>::Iterator E = framebuffer_formats.find(p_format);
2885
ERR_FAIL_COND_V(!E, TEXTURE_SAMPLES_1);
2886
ERR_FAIL_COND_V(p_pass >= uint32_t(E->value.pass_samples.size()), TEXTURE_SAMPLES_1);
2887
2888
return E->value.pass_samples[p_pass];
2889
}
2890
2891
RID RenderingDevice::framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples, FramebufferFormatID p_format_check) {
2892
_THREAD_SAFE_METHOD_
2893
2894
Framebuffer framebuffer;
2895
framebuffer.rendering_device = this;
2896
framebuffer.format_id = framebuffer_format_create_empty(p_samples);
2897
ERR_FAIL_COND_V(p_format_check != INVALID_FORMAT_ID && framebuffer.format_id != p_format_check, RID());
2898
framebuffer.size = p_size;
2899
framebuffer.view_count = 1;
2900
2901
RDG::FramebufferCache *framebuffer_cache = RDG::framebuffer_cache_create();
2902
framebuffer_cache->width = p_size.width;
2903
framebuffer_cache->height = p_size.height;
2904
framebuffer.framebuffer_cache = framebuffer_cache;
2905
2906
RID id = framebuffer_owner.make_rid(framebuffer);
2907
#ifdef DEV_ENABLED
2908
set_resource_name(id, "RID:" + itos(id.get_id()));
2909
#endif
2910
2911
framebuffer_cache->render_pass_creation_user_data = framebuffer_owner.get_or_null(id);
2912
2913
return id;
2914
}
2915
2916
RID RenderingDevice::framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check, uint32_t p_view_count) {
2917
_THREAD_SAFE_METHOD_
2918
2919
FramebufferPass pass;
2920
2921
for (int i = 0; i < p_texture_attachments.size(); i++) {
2922
Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
2923
2924
ERR_FAIL_COND_V_MSG(texture && texture->layers != p_view_count, RID(), "Layers of our texture doesn't match view count for this framebuffer");
2925
2926
if (texture != nullptr) {
2927
_check_transfer_worker_texture(texture);
2928
}
2929
2930
if (texture && texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
2931
pass.depth_attachment = i;
2932
} else if (texture && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
2933
// Prevent the VRS attachment from being added to the color_attachments.
2934
} else {
2935
if (texture && texture->is_resolve_buffer) {
2936
pass.resolve_attachments.push_back(i);
2937
} else {
2938
pass.color_attachments.push_back(texture ? i : ATTACHMENT_UNUSED);
2939
}
2940
}
2941
}
2942
2943
Vector<FramebufferPass> passes;
2944
passes.push_back(pass);
2945
2946
return framebuffer_create_multipass(p_texture_attachments, passes, p_format_check, p_view_count);
2947
}
2948
2949
RID RenderingDevice::framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) {
2950
_THREAD_SAFE_METHOD_
2951
2952
Vector<AttachmentFormat> attachments;
2953
LocalVector<RDD::TextureID> textures;
2954
LocalVector<RDG::ResourceTracker *> trackers;
2955
int32_t vrs_attachment = -1;
2956
attachments.resize(p_texture_attachments.size());
2957
Size2i size;
2958
bool size_set = false;
2959
for (int i = 0; i < p_texture_attachments.size(); i++) {
2960
AttachmentFormat af;
2961
Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
2962
if (!texture) {
2963
af.usage_flags = AttachmentFormat::UNUSED_ATTACHMENT;
2964
trackers.push_back(nullptr);
2965
} else {
2966
ERR_FAIL_COND_V_MSG(texture->layers != p_view_count, RID(), "Layers of our texture doesn't match view count for this framebuffer");
2967
2968
_check_transfer_worker_texture(texture);
2969
2970
if (i != 0 && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
2971
// Detect if the texture is the fragment density map and it's not the first attachment.
2972
vrs_attachment = i;
2973
}
2974
2975
if (!size_set) {
2976
size.width = texture->width;
2977
size.height = texture->height;
2978
size_set = true;
2979
} else if (texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
2980
// If this is not the first attachment we assume this is used as the VRS attachment.
2981
// In this case this texture will be 1/16th the size of the color attachment.
2982
// So we skip the size check.
2983
} else {
2984
ERR_FAIL_COND_V_MSG((uint32_t)size.width != texture->width || (uint32_t)size.height != texture->height, RID(),
2985
"All textures in a framebuffer should be the same size.");
2986
}
2987
2988
af.format = texture->format;
2989
af.samples = texture->samples;
2990
af.usage_flags = texture->usage_flags;
2991
2992
_texture_make_mutable(texture, p_texture_attachments[i]);
2993
2994
textures.push_back(texture->driver_id);
2995
trackers.push_back(texture->draw_tracker);
2996
}
2997
attachments.write[i] = af;
2998
}
2999
3000
ERR_FAIL_COND_V_MSG(!size_set, RID(), "All attachments unused.");
3001
3002
FramebufferFormatID format_id = framebuffer_format_create_multipass(attachments, p_passes, p_view_count, vrs_attachment);
3003
if (format_id == INVALID_ID) {
3004
return RID();
3005
}
3006
3007
ERR_FAIL_COND_V_MSG(p_format_check != INVALID_ID && format_id != p_format_check, RID(),
3008
"The format used to check this framebuffer differs from the intended framebuffer format.");
3009
3010
Framebuffer framebuffer;
3011
framebuffer.rendering_device = this;
3012
framebuffer.format_id = format_id;
3013
framebuffer.texture_ids = p_texture_attachments;
3014
framebuffer.size = size;
3015
framebuffer.view_count = p_view_count;
3016
3017
RDG::FramebufferCache *framebuffer_cache = RDG::framebuffer_cache_create();
3018
framebuffer_cache->width = size.width;
3019
framebuffer_cache->height = size.height;
3020
framebuffer_cache->textures = textures;
3021
framebuffer_cache->trackers = trackers;
3022
framebuffer.framebuffer_cache = framebuffer_cache;
3023
3024
RID id = framebuffer_owner.make_rid(framebuffer);
3025
#ifdef DEV_ENABLED
3026
set_resource_name(id, "RID:" + itos(id.get_id()));
3027
#endif
3028
3029
for (int i = 0; i < p_texture_attachments.size(); i++) {
3030
if (p_texture_attachments[i].is_valid()) {
3031
_add_dependency(id, p_texture_attachments[i]);
3032
}
3033
}
3034
3035
framebuffer_cache->render_pass_creation_user_data = framebuffer_owner.get_or_null(id);
3036
3037
return id;
3038
}
3039
3040
RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_get_format(RID p_framebuffer) {
3041
_THREAD_SAFE_METHOD_
3042
3043
Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
3044
ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
3045
3046
return framebuffer->format_id;
3047
}
3048
3049
Size2 RenderingDevice::framebuffer_get_size(RID p_framebuffer) {
3050
_THREAD_SAFE_METHOD_
3051
3052
Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
3053
ERR_FAIL_NULL_V(framebuffer, Size2(0, 0));
3054
3055
return framebuffer->size;
3056
}
3057
3058
bool RenderingDevice::framebuffer_is_valid(RID p_framebuffer) const {
3059
_THREAD_SAFE_METHOD_
3060
3061
return framebuffer_owner.owns(p_framebuffer);
3062
}
3063
3064
void RenderingDevice::framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata) {
3065
_THREAD_SAFE_METHOD_
3066
3067
Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
3068
ERR_FAIL_NULL(framebuffer);
3069
3070
framebuffer->invalidated_callback = p_callback;
3071
framebuffer->invalidated_callback_userdata = p_userdata;
3072
}
3073
3074
/*****************/
3075
/**** SAMPLER ****/
3076
/*****************/
3077
3078
RID RenderingDevice::sampler_create(const SamplerState &p_state) {
3079
_THREAD_SAFE_METHOD_
3080
3081
ERR_FAIL_INDEX_V(p_state.repeat_u, SAMPLER_REPEAT_MODE_MAX, RID());
3082
ERR_FAIL_INDEX_V(p_state.repeat_v, SAMPLER_REPEAT_MODE_MAX, RID());
3083
ERR_FAIL_INDEX_V(p_state.repeat_w, SAMPLER_REPEAT_MODE_MAX, RID());
3084
ERR_FAIL_INDEX_V(p_state.compare_op, COMPARE_OP_MAX, RID());
3085
ERR_FAIL_INDEX_V(p_state.border_color, SAMPLER_BORDER_COLOR_MAX, RID());
3086
3087
RDD::SamplerID sampler = driver->sampler_create(p_state);
3088
ERR_FAIL_COND_V(!sampler, RID());
3089
3090
RID id = sampler_owner.make_rid(sampler);
3091
#ifdef DEV_ENABLED
3092
set_resource_name(id, "RID:" + itos(id.get_id()));
3093
#endif
3094
return id;
3095
}
3096
3097
bool RenderingDevice::sampler_is_format_supported_for_filter(DataFormat p_format, SamplerFilter p_sampler_filter) const {
3098
_THREAD_SAFE_METHOD_
3099
3100
ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
3101
3102
return driver->sampler_is_format_supported_for_filter(p_format, p_sampler_filter);
3103
}
3104
3105
/***********************/
3106
/**** VERTEX BUFFER ****/
3107
/***********************/
3108
3109
RID RenderingDevice::vertex_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data, BitField<BufferCreationBits> p_creation_bits) {
3110
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
3111
3112
Buffer buffer;
3113
buffer.size = p_size_bytes;
3114
buffer.usage = RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_VERTEX_BIT;
3115
if (p_creation_bits.has_flag(BUFFER_CREATION_AS_STORAGE_BIT)) {
3116
buffer.usage.set_flag(RDD::BUFFER_USAGE_STORAGE_BIT);
3117
}
3118
if (p_creation_bits.has_flag(BUFFER_CREATION_DEVICE_ADDRESS_BIT)) {
3119
buffer.usage.set_flag(RDD::BUFFER_USAGE_DEVICE_ADDRESS_BIT);
3120
}
3121
buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
3122
ERR_FAIL_COND_V(!buffer.driver_id, RID());
3123
3124
// Vertex buffers are assumed to be immutable unless they don't have initial data or they've been marked for storage explicitly.
3125
if (p_data.is_empty() || p_creation_bits.has_flag(BUFFER_CREATION_AS_STORAGE_BIT)) {
3126
buffer.draw_tracker = RDG::resource_tracker_create();
3127
buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
3128
}
3129
3130
if (p_data.size()) {
3131
_buffer_initialize(&buffer, p_data);
3132
}
3133
3134
_THREAD_SAFE_LOCK_
3135
buffer_memory += buffer.size;
3136
_THREAD_SAFE_UNLOCK_
3137
3138
RID id = vertex_buffer_owner.make_rid(buffer);
3139
#ifdef DEV_ENABLED
3140
set_resource_name(id, "RID:" + itos(id.get_id()));
3141
#endif
3142
return id;
3143
}
3144
3145
// Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated.
3146
RenderingDevice::VertexFormatID RenderingDevice::vertex_format_create(const Vector<VertexAttribute> &p_vertex_descriptions) {
3147
_THREAD_SAFE_METHOD_
3148
3149
VertexDescriptionKey key;
3150
key.vertex_formats = p_vertex_descriptions;
3151
3152
VertexFormatID *idptr = vertex_format_cache.getptr(key);
3153
if (idptr) {
3154
return *idptr;
3155
}
3156
3157
HashSet<int> used_locations;
3158
for (int i = 0; i < p_vertex_descriptions.size(); i++) {
3159
ERR_CONTINUE(p_vertex_descriptions[i].format >= DATA_FORMAT_MAX);
3160
ERR_FAIL_COND_V(used_locations.has(p_vertex_descriptions[i].location), INVALID_ID);
3161
3162
ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_descriptions[i].format) == 0, INVALID_ID,
3163
"Data format for attachment (" + itos(i) + "), '" + FORMAT_NAMES[p_vertex_descriptions[i].format] + "', is not valid for a vertex array.");
3164
3165
used_locations.insert(p_vertex_descriptions[i].location);
3166
}
3167
3168
RDD::VertexFormatID driver_id = driver->vertex_format_create(p_vertex_descriptions);
3169
ERR_FAIL_COND_V(!driver_id, 0);
3170
3171
VertexFormatID id = (vertex_format_cache.size() | ((int64_t)ID_TYPE_VERTEX_FORMAT << ID_BASE_SHIFT));
3172
vertex_format_cache[key] = id;
3173
vertex_formats[id].vertex_formats = p_vertex_descriptions;
3174
vertex_formats[id].driver_id = driver_id;
3175
return id;
3176
}
3177
3178
RID RenderingDevice::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets) {
3179
_THREAD_SAFE_METHOD_
3180
3181
ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
3182
const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
3183
3184
ERR_FAIL_COND_V(vd.vertex_formats.size() != p_src_buffers.size(), RID());
3185
3186
for (int i = 0; i < p_src_buffers.size(); i++) {
3187
ERR_FAIL_COND_V(!vertex_buffer_owner.owns(p_src_buffers[i]), RID());
3188
}
3189
3190
VertexArray vertex_array;
3191
3192
if (p_offsets.is_empty()) {
3193
vertex_array.offsets.resize_initialized(p_src_buffers.size());
3194
} else {
3195
ERR_FAIL_COND_V(p_offsets.size() != p_src_buffers.size(), RID());
3196
vertex_array.offsets = p_offsets;
3197
}
3198
3199
vertex_array.vertex_count = p_vertex_count;
3200
vertex_array.description = p_vertex_format;
3201
vertex_array.max_instances_allowed = 0xFFFFFFFF; // By default as many as you want.
3202
for (int i = 0; i < p_src_buffers.size(); i++) {
3203
Buffer *buffer = vertex_buffer_owner.get_or_null(p_src_buffers[i]);
3204
3205
// Validate with buffer.
3206
{
3207
const VertexAttribute &atf = vd.vertex_formats[i];
3208
3209
uint32_t element_size = get_format_vertex_size(atf.format);
3210
ERR_FAIL_COND_V(element_size == 0, RID()); // Should never happens since this was prevalidated.
3211
3212
if (atf.frequency == VERTEX_FREQUENCY_VERTEX) {
3213
// Validate size for regular drawing.
3214
uint64_t total_size = uint64_t(atf.stride) * (p_vertex_count - 1) + atf.offset + element_size;
3215
ERR_FAIL_COND_V_MSG(total_size > buffer->size, RID(),
3216
"Attachment (" + itos(i) + ") will read past the end of the buffer.");
3217
3218
} else {
3219
// Validate size for instances drawing.
3220
uint64_t available = buffer->size - atf.offset;
3221
ERR_FAIL_COND_V_MSG(available < element_size, RID(),
3222
"Attachment (" + itos(i) + ") uses instancing, but it's just too small.");
3223
3224
uint32_t instances_allowed = available / atf.stride;
3225
vertex_array.max_instances_allowed = MIN(instances_allowed, vertex_array.max_instances_allowed);
3226
}
3227
}
3228
3229
vertex_array.buffers.push_back(buffer->driver_id);
3230
3231
if (buffer->draw_tracker != nullptr) {
3232
vertex_array.draw_trackers.push_back(buffer->draw_tracker);
3233
} else {
3234
vertex_array.untracked_buffers.insert(p_src_buffers[i]);
3235
}
3236
3237
if (buffer->transfer_worker_index >= 0) {
3238
vertex_array.transfer_worker_indices.push_back(buffer->transfer_worker_index);
3239
vertex_array.transfer_worker_operations.push_back(buffer->transfer_worker_operation);
3240
}
3241
}
3242
3243
RID id = vertex_array_owner.make_rid(vertex_array);
3244
for (int i = 0; i < p_src_buffers.size(); i++) {
3245
_add_dependency(id, p_src_buffers[i]);
3246
}
3247
3248
return id;
3249
}
3250
3251
RID RenderingDevice::index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, Span<uint8_t> p_data, bool p_use_restart_indices, BitField<BufferCreationBits> p_creation_bits) {
3252
ERR_FAIL_COND_V(p_index_count == 0, RID());
3253
3254
IndexBuffer index_buffer;
3255
index_buffer.format = p_format;
3256
index_buffer.supports_restart_indices = p_use_restart_indices;
3257
index_buffer.index_count = p_index_count;
3258
uint32_t size_bytes = p_index_count * ((p_format == INDEX_BUFFER_FORMAT_UINT16) ? 2 : 4);
3259
#ifdef DEBUG_ENABLED
3260
if (p_data.size()) {
3261
index_buffer.max_index = 0;
3262
ERR_FAIL_COND_V_MSG((uint32_t)p_data.size() != size_bytes, RID(),
3263
"Default index buffer initializer array size (" + itos(p_data.size()) + ") does not match format required size (" + itos(size_bytes) + ").");
3264
const uint8_t *r = p_data.ptr();
3265
if (p_format == INDEX_BUFFER_FORMAT_UINT16) {
3266
const uint16_t *index16 = (const uint16_t *)r;
3267
for (uint32_t i = 0; i < p_index_count; i++) {
3268
if (p_use_restart_indices && index16[i] == 0xFFFF) {
3269
continue; // Restart index, ignore.
3270
}
3271
index_buffer.max_index = MAX(index16[i], index_buffer.max_index);
3272
}
3273
} else {
3274
const uint32_t *index32 = (const uint32_t *)r;
3275
for (uint32_t i = 0; i < p_index_count; i++) {
3276
if (p_use_restart_indices && index32[i] == 0xFFFFFFFF) {
3277
continue; // Restart index, ignore.
3278
}
3279
index_buffer.max_index = MAX(index32[i], index_buffer.max_index);
3280
}
3281
}
3282
} else {
3283
index_buffer.max_index = 0xFFFFFFFF;
3284
}
3285
#else
3286
index_buffer.max_index = 0xFFFFFFFF;
3287
#endif
3288
index_buffer.size = size_bytes;
3289
index_buffer.usage = (RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_INDEX_BIT);
3290
if (p_creation_bits.has_flag(BUFFER_CREATION_DEVICE_ADDRESS_BIT)) {
3291
index_buffer.usage.set_flag(RDD::BUFFER_USAGE_DEVICE_ADDRESS_BIT);
3292
}
3293
index_buffer.driver_id = driver->buffer_create(index_buffer.size, index_buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
3294
ERR_FAIL_COND_V(!index_buffer.driver_id, RID());
3295
3296
// Index buffers are assumed to be immutable unless they don't have initial data.
3297
if (p_data.is_empty()) {
3298
index_buffer.draw_tracker = RDG::resource_tracker_create();
3299
index_buffer.draw_tracker->buffer_driver_id = index_buffer.driver_id;
3300
}
3301
3302
if (p_data.size()) {
3303
_buffer_initialize(&index_buffer, p_data);
3304
}
3305
3306
_THREAD_SAFE_LOCK_
3307
buffer_memory += index_buffer.size;
3308
_THREAD_SAFE_UNLOCK_
3309
3310
RID id = index_buffer_owner.make_rid(index_buffer);
3311
#ifdef DEV_ENABLED
3312
set_resource_name(id, "RID:" + itos(id.get_id()));
3313
#endif
3314
return id;
3315
}
3316
3317
RID RenderingDevice::index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) {
3318
_THREAD_SAFE_METHOD_
3319
3320
ERR_FAIL_COND_V(!index_buffer_owner.owns(p_index_buffer), RID());
3321
3322
IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_index_buffer);
3323
3324
ERR_FAIL_COND_V(p_index_count == 0, RID());
3325
ERR_FAIL_COND_V(p_index_offset + p_index_count > index_buffer->index_count, RID());
3326
3327
IndexArray index_array;
3328
index_array.max_index = index_buffer->max_index;
3329
index_array.driver_id = index_buffer->driver_id;
3330
index_array.draw_tracker = index_buffer->draw_tracker;
3331
index_array.offset = p_index_offset;
3332
index_array.indices = p_index_count;
3333
index_array.format = index_buffer->format;
3334
index_array.supports_restart_indices = index_buffer->supports_restart_indices;
3335
index_array.transfer_worker_index = index_buffer->transfer_worker_index;
3336
index_array.transfer_worker_operation = index_buffer->transfer_worker_operation;
3337
3338
RID id = index_array_owner.make_rid(index_array);
3339
_add_dependency(id, p_index_buffer);
3340
return id;
3341
}
3342
3343
/****************/
3344
/**** SHADER ****/
3345
/****************/
3346
3347
static const char *SHADER_UNIFORM_NAMES[RenderingDevice::UNIFORM_TYPE_MAX] = {
3348
"Sampler", "CombinedSampler", "Texture", "Image", "TextureBuffer", "SamplerTextureBuffer", "ImageBuffer", "UniformBuffer", "StorageBuffer", "InputAttachment"
3349
};
3350
3351
String RenderingDevice::_shader_uniform_debug(RID p_shader, int p_set) {
3352
String ret;
3353
const Shader *shader = shader_owner.get_or_null(p_shader);
3354
ERR_FAIL_NULL_V(shader, String());
3355
for (int i = 0; i < shader->uniform_sets.size(); i++) {
3356
if (p_set >= 0 && i != p_set) {
3357
continue;
3358
}
3359
for (int j = 0; j < shader->uniform_sets[i].size(); j++) {
3360
const ShaderUniform &ui = shader->uniform_sets[i][j];
3361
if (!ret.is_empty()) {
3362
ret += "\n";
3363
}
3364
ret += "Set: " + itos(i) + " Binding: " + itos(ui.binding) + " Type: " + SHADER_UNIFORM_NAMES[ui.type] + " Writable: " + (ui.writable ? "Y" : "N") + " Length: " + itos(ui.length);
3365
}
3366
}
3367
return ret;
3368
}
3369
3370
Vector<uint8_t> RenderingDevice::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
3371
const RenderingShaderContainerFormat &container_format = driver->get_shader_container_format();
3372
Ref<RenderingShaderContainer> shader_container = container_format.create_container();
3373
ERR_FAIL_COND_V(shader_container.is_null(), Vector<uint8_t>());
3374
3375
// Compile shader binary from SPIR-V.
3376
bool code_compiled = shader_container->set_code_from_spirv(p_shader_name, p_spirv);
3377
ERR_FAIL_COND_V_MSG(!code_compiled, Vector<uint8_t>(), vformat("Failed to compile code to native for SPIR-V."));
3378
3379
return shader_container->to_bytes();
3380
}
3381
3382
RID RenderingDevice::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder) {
3383
// Immutable samplers :
3384
// Expanding api when creating shader to allow passing optionally a set of immutable samplers
3385
// keeping existing api but extending it by sending an empty set.
3386
Vector<PipelineImmutableSampler> immutable_samplers;
3387
return shader_create_from_bytecode_with_samplers(p_shader_binary, p_placeholder, immutable_samplers);
3388
}
3389
3390
RID RenderingDevice::shader_create_from_bytecode_with_samplers(const Vector<uint8_t> &p_shader_binary, RID p_placeholder, const Vector<PipelineImmutableSampler> &p_immutable_samplers) {
3391
_THREAD_SAFE_METHOD_
3392
3393
Ref<RenderingShaderContainer> shader_container = driver->get_shader_container_format().create_container();
3394
ERR_FAIL_COND_V(shader_container.is_null(), RID());
3395
3396
bool parsed_container = shader_container->from_bytes(p_shader_binary);
3397
ERR_FAIL_COND_V_MSG(!parsed_container, RID(), "Failed to parse shader container from binary.");
3398
3399
Vector<RDD::ImmutableSampler> driver_immutable_samplers;
3400
for (const PipelineImmutableSampler &source_sampler : p_immutable_samplers) {
3401
RDD::ImmutableSampler driver_sampler;
3402
driver_sampler.type = source_sampler.uniform_type;
3403
driver_sampler.binding = source_sampler.binding;
3404
3405
for (uint32_t j = 0; j < source_sampler.get_id_count(); j++) {
3406
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(source_sampler.get_id(j));
3407
driver_sampler.ids.push_back(*sampler_driver_id);
3408
}
3409
3410
driver_immutable_samplers.append(driver_sampler);
3411
}
3412
3413
RDD::ShaderID shader_id = driver->shader_create_from_container(shader_container, driver_immutable_samplers);
3414
ERR_FAIL_COND_V(!shader_id, RID());
3415
3416
// All good, let's create modules.
3417
3418
RID id;
3419
if (p_placeholder.is_null()) {
3420
id = shader_owner.make_rid();
3421
} else {
3422
id = p_placeholder;
3423
}
3424
3425
Shader *shader = shader_owner.get_or_null(id);
3426
ERR_FAIL_NULL_V(shader, RID());
3427
3428
*((ShaderReflection *)shader) = shader_container->get_shader_reflection();
3429
shader->name.clear();
3430
shader->name.append_utf8(shader_container->shader_name);
3431
shader->driver_id = shader_id;
3432
shader->layout_hash = driver->shader_get_layout_hash(shader_id);
3433
3434
for (int i = 0; i < shader->uniform_sets.size(); i++) {
3435
uint32_t format = 0; // No format, default.
3436
3437
if (shader->uniform_sets[i].size()) {
3438
// Sort and hash.
3439
3440
shader->uniform_sets.write[i].sort();
3441
3442
UniformSetFormat usformat;
3443
usformat.uniforms = shader->uniform_sets[i];
3444
RBMap<UniformSetFormat, uint32_t>::Element *E = uniform_set_format_cache.find(usformat);
3445
if (E) {
3446
format = E->get();
3447
} else {
3448
format = uniform_set_format_cache.size() + 1;
3449
uniform_set_format_cache.insert(usformat, format);
3450
}
3451
}
3452
3453
shader->set_formats.push_back(format);
3454
}
3455
3456
for (ShaderStage stage : shader->stages_vector) {
3457
switch (stage) {
3458
case SHADER_STAGE_VERTEX:
3459
shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
3460
break;
3461
case SHADER_STAGE_FRAGMENT:
3462
shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
3463
break;
3464
case SHADER_STAGE_TESSELATION_CONTROL:
3465
shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT);
3466
break;
3467
case SHADER_STAGE_TESSELATION_EVALUATION:
3468
shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT);
3469
break;
3470
case SHADER_STAGE_COMPUTE:
3471
shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
3472
break;
3473
default:
3474
DEV_ASSERT(false && "Unknown shader stage.");
3475
break;
3476
}
3477
}
3478
3479
#ifdef DEV_ENABLED
3480
set_resource_name(id, "RID:" + itos(id.get_id()));
3481
#endif
3482
return id;
3483
}
3484
3485
void RenderingDevice::shader_destroy_modules(RID p_shader) {
3486
Shader *shader = shader_owner.get_or_null(p_shader);
3487
ERR_FAIL_NULL(shader);
3488
driver->shader_destroy_modules(shader->driver_id);
3489
}
3490
3491
RID RenderingDevice::shader_create_placeholder() {
3492
_THREAD_SAFE_METHOD_
3493
3494
Shader shader;
3495
return shader_owner.make_rid(shader);
3496
}
3497
3498
uint64_t RenderingDevice::shader_get_vertex_input_attribute_mask(RID p_shader) {
3499
_THREAD_SAFE_METHOD_
3500
3501
const Shader *shader = shader_owner.get_or_null(p_shader);
3502
ERR_FAIL_NULL_V(shader, 0);
3503
return shader->vertex_input_mask;
3504
}
3505
3506
/******************/
3507
/**** UNIFORMS ****/
3508
/******************/
3509
3510
RID RenderingDevice::uniform_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data, BitField<BufferCreationBits> p_creation_bits) {
3511
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
3512
3513
Buffer buffer;
3514
buffer.size = p_size_bytes;
3515
buffer.usage = (RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_UNIFORM_BIT);
3516
if (p_creation_bits.has_flag(BUFFER_CREATION_DEVICE_ADDRESS_BIT)) {
3517
buffer.usage.set_flag(RDD::BUFFER_USAGE_DEVICE_ADDRESS_BIT);
3518
}
3519
buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
3520
ERR_FAIL_COND_V(!buffer.driver_id, RID());
3521
3522
// Uniform buffers are assumed to be immutable unless they don't have initial data.
3523
if (p_data.is_empty()) {
3524
buffer.draw_tracker = RDG::resource_tracker_create();
3525
buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
3526
}
3527
3528
if (p_data.size()) {
3529
_buffer_initialize(&buffer, p_data);
3530
}
3531
3532
_THREAD_SAFE_LOCK_
3533
buffer_memory += buffer.size;
3534
_THREAD_SAFE_UNLOCK_
3535
3536
RID id = uniform_buffer_owner.make_rid(buffer);
3537
#ifdef DEV_ENABLED
3538
set_resource_name(id, "RID:" + itos(id.get_id()));
3539
#endif
3540
return id;
3541
}
3542
3543
void RenderingDevice::_uniform_set_update_shared(UniformSet *p_uniform_set) {
3544
for (UniformSet::SharedTexture shared : p_uniform_set->shared_textures_to_update) {
3545
Texture *texture = texture_owner.get_or_null(shared.texture);
3546
ERR_CONTINUE(texture == nullptr);
3547
_texture_update_shared_fallback(shared.texture, texture, shared.writing);
3548
}
3549
}
3550
3551
RID RenderingDevice::uniform_set_create(const VectorView<RD::Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set, bool p_linear_pool) {
3552
_THREAD_SAFE_METHOD_
3553
3554
ERR_FAIL_COND_V(p_uniforms.size() == 0, RID());
3555
3556
Shader *shader = shader_owner.get_or_null(p_shader);
3557
ERR_FAIL_NULL_V(shader, RID());
3558
3559
ERR_FAIL_COND_V_MSG(p_shader_set >= (uint32_t)shader->uniform_sets.size() || shader->uniform_sets[p_shader_set].is_empty(), RID(),
3560
"Desired set (" + itos(p_shader_set) + ") not used by shader.");
3561
// See that all sets in shader are satisfied.
3562
3563
const Vector<ShaderUniform> &set = shader->uniform_sets[p_shader_set];
3564
3565
uint32_t uniform_count = p_uniforms.size();
3566
const Uniform *uniforms = p_uniforms.ptr();
3567
3568
uint32_t set_uniform_count = set.size();
3569
const ShaderUniform *set_uniforms = set.ptr();
3570
3571
LocalVector<RDD::BoundUniform> driver_uniforms;
3572
driver_uniforms.resize(set_uniform_count);
3573
3574
// Used for verification to make sure a uniform set does not use a framebuffer bound texture.
3575
LocalVector<UniformSet::AttachableTexture> attachable_textures;
3576
Vector<RDG::ResourceTracker *> draw_trackers;
3577
Vector<RDG::ResourceUsage> draw_trackers_usage;
3578
HashMap<RID, RDG::ResourceUsage> untracked_usage;
3579
Vector<UniformSet::SharedTexture> shared_textures_to_update;
3580
3581
for (uint32_t i = 0; i < set_uniform_count; i++) {
3582
const ShaderUniform &set_uniform = set_uniforms[i];
3583
int uniform_idx = -1;
3584
for (int j = 0; j < (int)uniform_count; j++) {
3585
if (uniforms[j].binding == set_uniform.binding) {
3586
uniform_idx = j;
3587
break;
3588
}
3589
}
3590
ERR_FAIL_COND_V_MSG(uniform_idx == -1, RID(),
3591
"All the shader bindings for the given set must be covered by the uniforms provided. Binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + ") was not provided.");
3592
3593
const Uniform &uniform = uniforms[uniform_idx];
3594
3595
ERR_FAIL_INDEX_V(uniform.uniform_type, RD::UNIFORM_TYPE_MAX, RID());
3596
ERR_FAIL_COND_V_MSG(uniform.uniform_type != set_uniform.type, RID(),
3597
"Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + SHADER_UNIFORM_NAMES[set_uniform.type] + "', supplied: '" + SHADER_UNIFORM_NAMES[uniform.uniform_type] + "'.");
3598
3599
RDD::BoundUniform &driver_uniform = driver_uniforms[i];
3600
driver_uniform.type = uniform.uniform_type;
3601
driver_uniform.binding = uniform.binding;
3602
3603
// Mark immutable samplers to be skipped when creating uniform set.
3604
driver_uniform.immutable_sampler = uniform.immutable_sampler;
3605
3606
switch (uniform.uniform_type) {
3607
case UNIFORM_TYPE_SAMPLER: {
3608
if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
3609
if (set_uniform.length > 1) {
3610
ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler elements, so it should be provided equal number of sampler IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3611
} else {
3612
ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") should provide one ID referencing a sampler (IDs provided: " + itos(uniform.get_id_count()) + ").");
3613
}
3614
}
3615
3616
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
3617
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j));
3618
ERR_FAIL_NULL_V_MSG(sampler_driver_id, RID(), "Sampler (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid sampler.");
3619
3620
driver_uniform.ids.push_back(*sampler_driver_id);
3621
}
3622
} break;
3623
case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
3624
if (uniform.get_id_count() != (uint32_t)set_uniform.length * 2) {
3625
if (set_uniform.length > 1) {
3626
ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler&texture elements, so it should provided twice the amount of IDs (sampler,texture pairs) to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3627
} else {
3628
ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
3629
}
3630
}
3631
3632
for (uint32_t j = 0; j < uniform.get_id_count(); j += 2) {
3633
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j + 0));
3634
ERR_FAIL_NULL_V_MSG(sampler_driver_id, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
3635
3636
RID texture_id = uniform.get_id(j + 1);
3637
Texture *texture = texture_owner.get_or_null(texture_id);
3638
ERR_FAIL_NULL_V_MSG(texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
3639
3640
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
3641
"Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
3642
3643
if ((texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT))) {
3644
UniformSet::AttachableTexture attachable_texture;
3645
attachable_texture.bind = set_uniform.binding;
3646
attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.get_id(j + 1);
3647
attachable_textures.push_back(attachable_texture);
3648
}
3649
3650
RDD::TextureID driver_id = texture->driver_id;
3651
RDG::ResourceTracker *tracker = texture->draw_tracker;
3652
if (texture->shared_fallback != nullptr && texture->shared_fallback->texture.id != 0) {
3653
driver_id = texture->shared_fallback->texture;
3654
tracker = texture->shared_fallback->texture_tracker;
3655
shared_textures_to_update.push_back({ false, texture_id });
3656
}
3657
3658
if (tracker != nullptr) {
3659
draw_trackers.push_back(tracker);
3660
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_SAMPLE);
3661
} else {
3662
untracked_usage[texture_id] = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
3663
}
3664
3665
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
3666
3667
driver_uniform.ids.push_back(*sampler_driver_id);
3668
driver_uniform.ids.push_back(driver_id);
3669
_check_transfer_worker_texture(texture);
3670
}
3671
} break;
3672
case UNIFORM_TYPE_TEXTURE: {
3673
if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
3674
if (set_uniform.length > 1) {
3675
ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3676
} else {
3677
ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
3678
}
3679
}
3680
3681
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
3682
RID texture_id = uniform.get_id(j);
3683
Texture *texture = texture_owner.get_or_null(texture_id);
3684
ERR_FAIL_NULL_V_MSG(texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
3685
3686
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
3687
"Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
3688
3689
if ((texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT))) {
3690
UniformSet::AttachableTexture attachable_texture;
3691
attachable_texture.bind = set_uniform.binding;
3692
attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.get_id(j);
3693
attachable_textures.push_back(attachable_texture);
3694
}
3695
3696
RDD::TextureID driver_id = texture->driver_id;
3697
RDG::ResourceTracker *tracker = texture->draw_tracker;
3698
if (texture->shared_fallback != nullptr && texture->shared_fallback->texture.id != 0) {
3699
driver_id = texture->shared_fallback->texture;
3700
tracker = texture->shared_fallback->texture_tracker;
3701
shared_textures_to_update.push_back({ false, texture_id });
3702
}
3703
3704
if (tracker != nullptr) {
3705
draw_trackers.push_back(tracker);
3706
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_SAMPLE);
3707
} else {
3708
untracked_usage[texture_id] = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
3709
}
3710
3711
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
3712
3713
driver_uniform.ids.push_back(driver_id);
3714
_check_transfer_worker_texture(texture);
3715
}
3716
} break;
3717
case UNIFORM_TYPE_IMAGE: {
3718
if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
3719
if (set_uniform.length > 1) {
3720
ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3721
} else {
3722
ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
3723
}
3724
}
3725
3726
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
3727
RID texture_id = uniform.get_id(j);
3728
Texture *texture = texture_owner.get_or_null(texture_id);
3729
3730
ERR_FAIL_NULL_V_MSG(texture, RID(),
3731
"Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
3732
3733
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), RID(),
3734
"Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_STORAGE_BIT usage flag set in order to be used as uniform.");
3735
3736
if (texture->owner.is_null() && texture->shared_fallback != nullptr) {
3737
shared_textures_to_update.push_back({ true, texture_id });
3738
}
3739
3740
if (_texture_make_mutable(texture, texture_id)) {
3741
// The texture must be mutable as a layout transition will be required.
3742
draw_graph.add_synchronization();
3743
}
3744
3745
if (texture->draw_tracker != nullptr) {
3746
draw_trackers.push_back(texture->draw_tracker);
3747
3748
if (set_uniform.writable) {
3749
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE);
3750
} else {
3751
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ);
3752
}
3753
}
3754
3755
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
3756
3757
driver_uniform.ids.push_back(texture->driver_id);
3758
_check_transfer_worker_texture(texture);
3759
}
3760
} break;
3761
case UNIFORM_TYPE_TEXTURE_BUFFER: {
3762
if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
3763
if (set_uniform.length > 1) {
3764
ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") texture buffer elements, so it should be provided equal number of texture buffer IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3765
} else {
3766
ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture buffer (IDs provided: " + itos(uniform.get_id_count()) + ").");
3767
}
3768
}
3769
3770
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
3771
RID buffer_id = uniform.get_id(j);
3772
Buffer *buffer = texture_buffer_owner.get_or_null(buffer_id);
3773
ERR_FAIL_NULL_V_MSG(buffer, RID(), "Texture Buffer (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture buffer.");
3774
3775
if (set_uniform.writable && _buffer_make_mutable(buffer, buffer_id)) {
3776
// The buffer must be mutable if it's used for writing.
3777
draw_graph.add_synchronization();
3778
}
3779
3780
if (buffer->draw_tracker != nullptr) {
3781
draw_trackers.push_back(buffer->draw_tracker);
3782
3783
if (set_uniform.writable) {
3784
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE);
3785
} else {
3786
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ);
3787
}
3788
} else {
3789
untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ;
3790
}
3791
3792
driver_uniform.ids.push_back(buffer->driver_id);
3793
_check_transfer_worker_buffer(buffer);
3794
}
3795
} break;
3796
case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
3797
if (uniform.get_id_count() != (uint32_t)set_uniform.length * 2) {
3798
if (set_uniform.length > 1) {
3799
ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler buffer elements, so it should provided twice the amount of IDs (sampler,buffer pairs) to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3800
} else {
3801
ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture buffer (IDs provided: " + itos(uniform.get_id_count()) + ").");
3802
}
3803
}
3804
3805
for (uint32_t j = 0; j < uniform.get_id_count(); j += 2) {
3806
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j + 0));
3807
ERR_FAIL_NULL_V_MSG(sampler_driver_id, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
3808
3809
RID buffer_id = uniform.get_id(j + 1);
3810
Buffer *buffer = texture_buffer_owner.get_or_null(buffer_id);
3811
ERR_FAIL_NULL_V_MSG(buffer, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid texture buffer.");
3812
3813
if (buffer->draw_tracker != nullptr) {
3814
draw_trackers.push_back(buffer->draw_tracker);
3815
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ);
3816
} else {
3817
untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ;
3818
}
3819
3820
driver_uniform.ids.push_back(*sampler_driver_id);
3821
driver_uniform.ids.push_back(buffer->driver_id);
3822
_check_transfer_worker_buffer(buffer);
3823
}
3824
} break;
3825
case UNIFORM_TYPE_IMAGE_BUFFER: {
3826
// Todo.
3827
} break;
3828
case UNIFORM_TYPE_UNIFORM_BUFFER: {
3829
ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
3830
"Uniform buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
3831
3832
RID buffer_id = uniform.get_id(0);
3833
Buffer *buffer = uniform_buffer_owner.get_or_null(buffer_id);
3834
ERR_FAIL_NULL_V_MSG(buffer, RID(), "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
3835
3836
ERR_FAIL_COND_V_MSG(buffer->size < (uint32_t)set_uniform.length, RID(),
3837
"Uniform buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + ") is smaller than size of shader uniform: (" + itos(set_uniform.length) + ").");
3838
3839
if (buffer->draw_tracker != nullptr) {
3840
draw_trackers.push_back(buffer->draw_tracker);
3841
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ);
3842
} else {
3843
untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ;
3844
}
3845
3846
driver_uniform.ids.push_back(buffer->driver_id);
3847
_check_transfer_worker_buffer(buffer);
3848
} break;
3849
case UNIFORM_TYPE_STORAGE_BUFFER: {
3850
ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
3851
"Storage buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
3852
3853
Buffer *buffer = nullptr;
3854
3855
RID buffer_id = uniform.get_id(0);
3856
if (storage_buffer_owner.owns(buffer_id)) {
3857
buffer = storage_buffer_owner.get_or_null(buffer_id);
3858
} else if (vertex_buffer_owner.owns(buffer_id)) {
3859
buffer = vertex_buffer_owner.get_or_null(buffer_id);
3860
3861
ERR_FAIL_COND_V_MSG(!(buffer->usage.has_flag(RDD::BUFFER_USAGE_STORAGE_BIT)), RID(), "Vertex buffer supplied (binding: " + itos(uniform.binding) + ") was not created with storage flag.");
3862
}
3863
ERR_FAIL_NULL_V_MSG(buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
3864
3865
// If 0, then it's sized on link time.
3866
ERR_FAIL_COND_V_MSG(set_uniform.length > 0 && buffer->size != (uint32_t)set_uniform.length, RID(),
3867
"Storage buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + ") does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
3868
3869
if (set_uniform.writable && _buffer_make_mutable(buffer, buffer_id)) {
3870
// The buffer must be mutable if it's used for writing.
3871
draw_graph.add_synchronization();
3872
}
3873
3874
if (buffer->draw_tracker != nullptr) {
3875
draw_trackers.push_back(buffer->draw_tracker);
3876
3877
if (set_uniform.writable) {
3878
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE);
3879
} else {
3880
draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ);
3881
}
3882
} else {
3883
untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ;
3884
}
3885
3886
driver_uniform.ids.push_back(buffer->driver_id);
3887
_check_transfer_worker_buffer(buffer);
3888
} break;
3889
case UNIFORM_TYPE_INPUT_ATTACHMENT: {
3890
ERR_FAIL_COND_V_MSG(shader->is_compute, RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") supplied for compute shader (this is not allowed).");
3891
3892
if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
3893
if (set_uniform.length > 1) {
3894
ERR_FAIL_V_MSG(RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
3895
} else {
3896
ERR_FAIL_V_MSG(RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
3897
}
3898
}
3899
3900
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
3901
RID texture_id = uniform.get_id(j);
3902
Texture *texture = texture_owner.get_or_null(texture_id);
3903
3904
ERR_FAIL_NULL_V_MSG(texture, RID(),
3905
"InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
3906
3907
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
3908
"InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
3909
3910
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
3911
3912
driver_uniform.ids.push_back(texture->driver_id);
3913
_check_transfer_worker_texture(texture);
3914
}
3915
} break;
3916
default: {
3917
}
3918
}
3919
}
3920
3921
RDD::UniformSetID driver_uniform_set = driver->uniform_set_create(driver_uniforms, shader->driver_id, p_shader_set, p_linear_pool ? frame : -1);
3922
ERR_FAIL_COND_V(!driver_uniform_set, RID());
3923
3924
UniformSet uniform_set;
3925
uniform_set.driver_id = driver_uniform_set;
3926
uniform_set.format = shader->set_formats[p_shader_set];
3927
uniform_set.attachable_textures = attachable_textures;
3928
uniform_set.draw_trackers = draw_trackers;
3929
uniform_set.draw_trackers_usage = draw_trackers_usage;
3930
uniform_set.untracked_usage = untracked_usage;
3931
uniform_set.shared_textures_to_update = shared_textures_to_update;
3932
uniform_set.shader_set = p_shader_set;
3933
uniform_set.shader_id = p_shader;
3934
3935
RID id = uniform_set_owner.make_rid(uniform_set);
3936
#ifdef DEV_ENABLED
3937
set_resource_name(id, "RID:" + itos(id.get_id()));
3938
#endif
3939
// Add dependencies.
3940
_add_dependency(id, p_shader);
3941
for (uint32_t i = 0; i < uniform_count; i++) {
3942
const Uniform &uniform = uniforms[i];
3943
int id_count = uniform.get_id_count();
3944
for (int j = 0; j < id_count; j++) {
3945
_add_dependency(id, uniform.get_id(j));
3946
}
3947
}
3948
3949
return id;
3950
}
3951
3952
bool RenderingDevice::uniform_set_is_valid(RID p_uniform_set) {
3953
_THREAD_SAFE_METHOD_
3954
3955
return uniform_set_owner.owns(p_uniform_set);
3956
}
3957
3958
void RenderingDevice::uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata) {
3959
_THREAD_SAFE_METHOD_
3960
3961
UniformSet *us = uniform_set_owner.get_or_null(p_uniform_set);
3962
ERR_FAIL_NULL(us);
3963
us->invalidated_callback = p_callback;
3964
us->invalidated_callback_userdata = p_userdata;
3965
}
3966
3967
bool RenderingDevice::uniform_sets_have_linear_pools() const {
3968
return driver->uniform_sets_have_linear_pools();
3969
}
3970
3971
/*******************/
3972
/**** PIPELINES ****/
3973
/*******************/
3974
3975
RID RenderingDevice::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
3976
// Needs a shader.
3977
Shader *shader = shader_owner.get_or_null(p_shader);
3978
ERR_FAIL_NULL_V(shader, RID());
3979
ERR_FAIL_COND_V_MSG(shader->is_compute, RID(), "Compute shaders can't be used in render pipelines");
3980
3981
// Validate pre-raster shader. One of stages must be vertex shader or mesh shader (not implemented yet).
3982
ERR_FAIL_COND_V_MSG(!shader->stage_bits.has_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT), RID(), "Pre-raster shader (vertex shader) is not provided for pipeline creation.");
3983
3984
FramebufferFormat fb_format;
3985
{
3986
_THREAD_SAFE_METHOD_
3987
3988
if (p_framebuffer_format == INVALID_ID) {
3989
// If nothing provided, use an empty one (no attachments).
3990
p_framebuffer_format = framebuffer_format_create(Vector<AttachmentFormat>());
3991
}
3992
ERR_FAIL_COND_V(!framebuffer_formats.has(p_framebuffer_format), RID());
3993
fb_format = framebuffer_formats[p_framebuffer_format];
3994
}
3995
3996
// Validate shader vs. framebuffer.
3997
{
3998
ERR_FAIL_COND_V_MSG(p_for_render_pass >= uint32_t(fb_format.E->key().passes.size()), RID(), "Render pass requested for pipeline creation (" + itos(p_for_render_pass) + ") is out of bounds");
3999
const FramebufferPass &pass = fb_format.E->key().passes[p_for_render_pass];
4000
uint32_t output_mask = 0;
4001
for (int i = 0; i < pass.color_attachments.size(); i++) {
4002
if (pass.color_attachments[i] != ATTACHMENT_UNUSED) {
4003
output_mask |= 1 << i;
4004
}
4005
}
4006
ERR_FAIL_COND_V_MSG(shader->fragment_output_mask != output_mask, RID(),
4007
"Mismatch fragment shader output mask (" + itos(shader->fragment_output_mask) + ") and framebuffer color output mask (" + itos(output_mask) + ") when binding both in render pipeline.");
4008
}
4009
4010
RDD::VertexFormatID driver_vertex_format;
4011
if (p_vertex_format != INVALID_ID) {
4012
// Uses vertices, else it does not.
4013
ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
4014
const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
4015
driver_vertex_format = vertex_formats[p_vertex_format].driver_id;
4016
4017
// Validate with inputs.
4018
for (uint32_t i = 0; i < 64; i++) {
4019
if (!(shader->vertex_input_mask & ((uint64_t)1) << i)) {
4020
continue;
4021
}
4022
bool found = false;
4023
for (int j = 0; j < vd.vertex_formats.size(); j++) {
4024
if (vd.vertex_formats[j].location == i) {
4025
found = true;
4026
break;
4027
}
4028
}
4029
4030
ERR_FAIL_COND_V_MSG(!found, RID(),
4031
"Shader vertex input location (" + itos(i) + ") not provided in vertex input description for pipeline creation.");
4032
}
4033
4034
} else {
4035
ERR_FAIL_COND_V_MSG(shader->vertex_input_mask != 0, RID(),
4036
"Shader contains vertex inputs, but no vertex input description was provided for pipeline creation.");
4037
}
4038
4039
ERR_FAIL_INDEX_V(p_render_primitive, RENDER_PRIMITIVE_MAX, RID());
4040
4041
ERR_FAIL_INDEX_V(p_rasterization_state.cull_mode, 3, RID());
4042
4043
if (p_multisample_state.sample_mask.size()) {
4044
// Use sample mask.
4045
ERR_FAIL_COND_V((int)TEXTURE_SAMPLES_COUNT[p_multisample_state.sample_count] != p_multisample_state.sample_mask.size(), RID());
4046
}
4047
4048
ERR_FAIL_INDEX_V(p_depth_stencil_state.depth_compare_operator, COMPARE_OP_MAX, RID());
4049
4050
ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.fail, STENCIL_OP_MAX, RID());
4051
ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.pass, STENCIL_OP_MAX, RID());
4052
ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.depth_fail, STENCIL_OP_MAX, RID());
4053
ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.compare, COMPARE_OP_MAX, RID());
4054
4055
ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.fail, STENCIL_OP_MAX, RID());
4056
ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.pass, STENCIL_OP_MAX, RID());
4057
ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.depth_fail, STENCIL_OP_MAX, RID());
4058
ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.compare, COMPARE_OP_MAX, RID());
4059
4060
ERR_FAIL_INDEX_V(p_blend_state.logic_op, LOGIC_OP_MAX, RID());
4061
4062
const FramebufferPass &pass = fb_format.E->key().passes[p_for_render_pass];
4063
ERR_FAIL_COND_V(p_blend_state.attachments.size() < pass.color_attachments.size(), RID());
4064
for (int i = 0; i < pass.color_attachments.size(); i++) {
4065
if (pass.color_attachments[i] != ATTACHMENT_UNUSED) {
4066
ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_color_blend_factor, BLEND_FACTOR_MAX, RID());
4067
ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_color_blend_factor, BLEND_FACTOR_MAX, RID());
4068
ERR_FAIL_INDEX_V(p_blend_state.attachments[i].color_blend_op, BLEND_OP_MAX, RID());
4069
4070
ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
4071
ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
4072
ERR_FAIL_INDEX_V(p_blend_state.attachments[i].alpha_blend_op, BLEND_OP_MAX, RID());
4073
}
4074
}
4075
4076
for (int i = 0; i < shader->specialization_constants.size(); i++) {
4077
const ShaderSpecializationConstant &sc = shader->specialization_constants[i];
4078
for (int j = 0; j < p_specialization_constants.size(); j++) {
4079
const PipelineSpecializationConstant &psc = p_specialization_constants[j];
4080
if (psc.constant_id == sc.constant_id) {
4081
ERR_FAIL_COND_V_MSG(psc.type != sc.type, RID(), "Specialization constant provided for id (" + itos(sc.constant_id) + ") is of the wrong type.");
4082
break;
4083
}
4084
}
4085
}
4086
4087
RenderPipeline pipeline;
4088
pipeline.driver_id = driver->render_pipeline_create(
4089
shader->driver_id,
4090
driver_vertex_format,
4091
p_render_primitive,
4092
p_rasterization_state,
4093
p_multisample_state,
4094
p_depth_stencil_state,
4095
p_blend_state,
4096
pass.color_attachments,
4097
p_dynamic_state_flags,
4098
fb_format.render_pass,
4099
p_for_render_pass,
4100
p_specialization_constants);
4101
ERR_FAIL_COND_V(!pipeline.driver_id, RID());
4102
4103
if (pipeline_cache_enabled) {
4104
_update_pipeline_cache();
4105
}
4106
4107
pipeline.shader = p_shader;
4108
pipeline.shader_driver_id = shader->driver_id;
4109
pipeline.shader_layout_hash = shader->layout_hash;
4110
pipeline.set_formats = shader->set_formats;
4111
pipeline.push_constant_size = shader->push_constant_size;
4112
pipeline.stage_bits = shader->stage_bits;
4113
4114
#ifdef DEBUG_ENABLED
4115
pipeline.validation.dynamic_state = p_dynamic_state_flags;
4116
pipeline.validation.framebuffer_format = p_framebuffer_format;
4117
pipeline.validation.render_pass = p_for_render_pass;
4118
pipeline.validation.vertex_format = p_vertex_format;
4119
pipeline.validation.uses_restart_indices = p_render_primitive == RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX;
4120
4121
static const uint32_t primitive_divisor[RENDER_PRIMITIVE_MAX] = {
4122
1, 2, 1, 1, 1, 3, 1, 1, 1, 1, 1
4123
};
4124
pipeline.validation.primitive_divisor = primitive_divisor[p_render_primitive];
4125
static const uint32_t primitive_minimum[RENDER_PRIMITIVE_MAX] = {
4126
1,
4127
2,
4128
2,
4129
2,
4130
2,
4131
3,
4132
3,
4133
3,
4134
3,
4135
3,
4136
1,
4137
};
4138
pipeline.validation.primitive_minimum = primitive_minimum[p_render_primitive];
4139
#endif
4140
4141
// Create ID to associate with this pipeline.
4142
RID id = render_pipeline_owner.make_rid(pipeline);
4143
{
4144
_THREAD_SAFE_METHOD_
4145
4146
#ifdef DEV_ENABLED
4147
set_resource_name(id, "RID:" + itos(id.get_id()));
4148
#endif
4149
// Now add all the dependencies.
4150
_add_dependency(id, p_shader);
4151
}
4152
4153
return id;
4154
}
4155
4156
bool RenderingDevice::render_pipeline_is_valid(RID p_pipeline) {
4157
_THREAD_SAFE_METHOD_
4158
4159
return render_pipeline_owner.owns(p_pipeline);
4160
}
4161
4162
RID RenderingDevice::compute_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
4163
Shader *shader;
4164
4165
{
4166
_THREAD_SAFE_METHOD_
4167
4168
// Needs a shader.
4169
shader = shader_owner.get_or_null(p_shader);
4170
ERR_FAIL_NULL_V(shader, RID());
4171
4172
ERR_FAIL_COND_V_MSG(!shader->is_compute, RID(),
4173
"Non-compute shaders can't be used in compute pipelines");
4174
}
4175
4176
for (int i = 0; i < shader->specialization_constants.size(); i++) {
4177
const ShaderSpecializationConstant &sc = shader->specialization_constants[i];
4178
for (int j = 0; j < p_specialization_constants.size(); j++) {
4179
const PipelineSpecializationConstant &psc = p_specialization_constants[j];
4180
if (psc.constant_id == sc.constant_id) {
4181
ERR_FAIL_COND_V_MSG(psc.type != sc.type, RID(), "Specialization constant provided for id (" + itos(sc.constant_id) + ") is of the wrong type.");
4182
break;
4183
}
4184
}
4185
}
4186
4187
ComputePipeline pipeline;
4188
pipeline.driver_id = driver->compute_pipeline_create(shader->driver_id, p_specialization_constants);
4189
ERR_FAIL_COND_V(!pipeline.driver_id, RID());
4190
4191
if (pipeline_cache_enabled) {
4192
_update_pipeline_cache();
4193
}
4194
4195
pipeline.shader = p_shader;
4196
pipeline.shader_driver_id = shader->driver_id;
4197
pipeline.shader_layout_hash = shader->layout_hash;
4198
pipeline.set_formats = shader->set_formats;
4199
pipeline.push_constant_size = shader->push_constant_size;
4200
pipeline.local_group_size[0] = shader->compute_local_size[0];
4201
pipeline.local_group_size[1] = shader->compute_local_size[1];
4202
pipeline.local_group_size[2] = shader->compute_local_size[2];
4203
4204
// Create ID to associate with this pipeline.
4205
RID id = compute_pipeline_owner.make_rid(pipeline);
4206
{
4207
_THREAD_SAFE_METHOD_
4208
4209
#ifdef DEV_ENABLED
4210
set_resource_name(id, "RID:" + itos(id.get_id()));
4211
#endif
4212
// Now add all the dependencies.
4213
_add_dependency(id, p_shader);
4214
}
4215
4216
return id;
4217
}
4218
4219
bool RenderingDevice::compute_pipeline_is_valid(RID p_pipeline) {
4220
_THREAD_SAFE_METHOD_
4221
4222
return compute_pipeline_owner.owns(p_pipeline);
4223
}
4224
4225
/****************/
4226
/**** SCREEN ****/
4227
/****************/
4228
4229
uint32_t RenderingDevice::_get_swap_chain_desired_count() const {
4230
return MAX(2U, uint32_t(GLOBAL_GET_CACHED(uint32_t, "rendering/rendering_device/vsync/swapchain_image_count")));
4231
}
4232
4233
Error RenderingDevice::screen_create(DisplayServer::WindowID p_screen) {
4234
_THREAD_SAFE_METHOD_
4235
4236
RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
4237
ERR_FAIL_COND_V_MSG(surface == 0, ERR_CANT_CREATE, "A surface was not created for the screen.");
4238
4239
HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
4240
ERR_FAIL_COND_V_MSG(it != screen_swap_chains.end(), ERR_CANT_CREATE, "A swap chain was already created for the screen.");
4241
4242
RDD::SwapChainID swap_chain = driver->swap_chain_create(surface);
4243
ERR_FAIL_COND_V_MSG(swap_chain.id == 0, ERR_CANT_CREATE, "Unable to create swap chain.");
4244
4245
screen_swap_chains[p_screen] = swap_chain;
4246
4247
return OK;
4248
}
4249
4250
Error RenderingDevice::screen_prepare_for_drawing(DisplayServer::WindowID p_screen) {
4251
_THREAD_SAFE_METHOD_
4252
4253
// After submitting work, acquire the swapchain image(s).
4254
HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
4255
ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), ERR_CANT_CREATE, "A swap chain was not created for the screen.");
4256
4257
// Erase the framebuffer corresponding to this screen from the map in case any of the operations fail.
4258
screen_framebuffers.erase(p_screen);
4259
4260
// If this frame has already queued this swap chain for presentation, we present it and remove it from the pending list.
4261
uint32_t to_present_index = 0;
4262
while (to_present_index < frames[frame].swap_chains_to_present.size()) {
4263
if (frames[frame].swap_chains_to_present[to_present_index] == it->value) {
4264
driver->command_queue_execute_and_present(present_queue, {}, {}, {}, {}, it->value);
4265
frames[frame].swap_chains_to_present.remove_at(to_present_index);
4266
} else {
4267
to_present_index++;
4268
}
4269
}
4270
4271
bool resize_required = false;
4272
RDD::FramebufferID framebuffer = driver->swap_chain_acquire_framebuffer(main_queue, it->value, resize_required);
4273
if (resize_required) {
4274
// Flush everything so nothing can be using the swap chain before resizing it.
4275
_flush_and_stall_for_all_frames();
4276
4277
Error err = driver->swap_chain_resize(main_queue, it->value, _get_swap_chain_desired_count());
4278
if (err != OK) {
4279
// Resize is allowed to fail silently because the window can be minimized.
4280
return err;
4281
}
4282
4283
framebuffer = driver->swap_chain_acquire_framebuffer(main_queue, it->value, resize_required);
4284
}
4285
4286
if (framebuffer.id == 0) {
4287
// Some drivers like NVIDIA are fast enough to invalidate the swap chain between resizing and acquisition (GH-94104).
4288
// This typically occurs during continuous window resizing operations, especially if done quickly.
4289
// Allow this to fail silently since it has no visual consequences.
4290
return ERR_CANT_CREATE;
4291
}
4292
4293
// Store the framebuffer that will be used next to draw to this screen.
4294
screen_framebuffers[p_screen] = framebuffer;
4295
frames[frame].swap_chains_to_present.push_back(it->value);
4296
4297
return OK;
4298
}
4299
4300
int RenderingDevice::screen_get_width(DisplayServer::WindowID p_screen) const {
4301
_THREAD_SAFE_METHOD_
4302
4303
RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
4304
ERR_FAIL_COND_V_MSG(surface == 0, 0, "A surface was not created for the screen.");
4305
return context->surface_get_width(surface);
4306
}
4307
4308
int RenderingDevice::screen_get_height(DisplayServer::WindowID p_screen) const {
4309
_THREAD_SAFE_METHOD_
4310
4311
RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
4312
ERR_FAIL_COND_V_MSG(surface == 0, 0, "A surface was not created for the screen.");
4313
return context->surface_get_height(surface);
4314
}
4315
4316
int RenderingDevice::screen_get_pre_rotation_degrees(DisplayServer::WindowID p_screen) const {
4317
_THREAD_SAFE_METHOD_
4318
4319
HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
4320
ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), ERR_CANT_CREATE, "A swap chain was not created for the screen.");
4321
4322
return driver->swap_chain_get_pre_rotation_degrees(it->value);
4323
}
4324
4325
RenderingDevice::FramebufferFormatID RenderingDevice::screen_get_framebuffer_format(DisplayServer::WindowID p_screen) const {
4326
_THREAD_SAFE_METHOD_
4327
4328
HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
4329
ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), FAILED, "Screen was never prepared.");
4330
4331
DataFormat format = driver->swap_chain_get_format(it->value);
4332
ERR_FAIL_COND_V(format == DATA_FORMAT_MAX, INVALID_ID);
4333
4334
AttachmentFormat attachment;
4335
attachment.format = format;
4336
attachment.samples = TEXTURE_SAMPLES_1;
4337
attachment.usage_flags = TEXTURE_USAGE_COLOR_ATTACHMENT_BIT;
4338
Vector<AttachmentFormat> screen_attachment;
4339
screen_attachment.push_back(attachment);
4340
return const_cast<RenderingDevice *>(this)->framebuffer_format_create(screen_attachment);
4341
}
4342
4343
Error RenderingDevice::screen_free(DisplayServer::WindowID p_screen) {
4344
_THREAD_SAFE_METHOD_
4345
4346
HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
4347
ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), FAILED, "Screen was never created.");
4348
4349
// Flush everything so nothing can be using the swap chain before erasing it.
4350
_flush_and_stall_for_all_frames();
4351
4352
const DisplayServer::WindowID screen = it->key;
4353
const RDD::SwapChainID swap_chain = it->value;
4354
driver->swap_chain_free(swap_chain);
4355
screen_framebuffers.erase(screen);
4356
screen_swap_chains.erase(screen);
4357
4358
return OK;
4359
}
4360
4361
/*******************/
4362
/**** DRAW LIST ****/
4363
/*******************/
4364
4365
RenderingDevice::DrawListID RenderingDevice::draw_list_begin_for_screen(DisplayServer::WindowID p_screen, const Color &p_clear_color) {
4366
ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
4367
4368
ERR_FAIL_COND_V_MSG(draw_list.active, INVALID_ID, "Only one draw list can be active at the same time.");
4369
ERR_FAIL_COND_V_MSG(compute_list.active, INVALID_ID, "Only one draw/compute list can be active at the same time.");
4370
4371
RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
4372
HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator sc_it = screen_swap_chains.find(p_screen);
4373
HashMap<DisplayServer::WindowID, RDD::FramebufferID>::ConstIterator fb_it = screen_framebuffers.find(p_screen);
4374
ERR_FAIL_COND_V_MSG(surface == 0, 0, "A surface was not created for the screen.");
4375
ERR_FAIL_COND_V_MSG(sc_it == screen_swap_chains.end(), INVALID_ID, "Screen was never prepared.");
4376
ERR_FAIL_COND_V_MSG(fb_it == screen_framebuffers.end(), INVALID_ID, "Framebuffer was never prepared.");
4377
4378
Rect2i viewport = Rect2i(0, 0, context->surface_get_width(surface), context->surface_get_height(surface));
4379
4380
_draw_list_start(viewport);
4381
#ifdef DEBUG_ENABLED
4382
draw_list_framebuffer_format = screen_get_framebuffer_format(p_screen);
4383
#endif
4384
draw_list_subpass_count = 1;
4385
4386
RDD::RenderPassClearValue clear_value;
4387
clear_value.color = p_clear_color;
4388
4389
RDD::RenderPassID render_pass = driver->swap_chain_get_render_pass(sc_it->value);
4390
draw_graph.add_draw_list_begin(render_pass, fb_it->value, viewport, RDG::ATTACHMENT_OPERATION_CLEAR, clear_value, RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, RDD::BreadcrumbMarker::BLIT_PASS, split_swapchain_into_its_own_cmd_buffer);
4391
4392
draw_graph.add_draw_list_set_viewport(viewport);
4393
draw_graph.add_draw_list_set_scissor(viewport);
4394
4395
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
4396
}
4397
4398
RenderingDevice::DrawListID RenderingDevice::_draw_list_begin_bind(RID p_framebuffer, BitField<DrawFlags> p_draw_flags, const Vector<Color> &p_clear_color_values, float p_clear_depth_value, uint32_t p_clear_stencil_value, const Rect2 &p_region, uint32_t p_breadcrumb) {
4399
return draw_list_begin(p_framebuffer, p_draw_flags, p_clear_color_values, p_clear_depth_value, p_clear_stencil_value, p_region, p_breadcrumb);
4400
}
4401
4402
RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer, BitField<DrawFlags> p_draw_flags, VectorView<Color> p_clear_color_values, float p_clear_depth_value, uint32_t p_clear_stencil_value, const Rect2 &p_region, uint32_t p_breadcrumb) {
4403
ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
4404
4405
ERR_FAIL_COND_V_MSG(draw_list.active, INVALID_ID, "Only one draw list can be active at the same time.");
4406
4407
Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
4408
ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
4409
4410
const FramebufferFormatKey &framebuffer_key = framebuffer_formats[framebuffer->format_id].E->key();
4411
Point2i viewport_offset;
4412
Point2i viewport_size = framebuffer->size;
4413
4414
if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region.
4415
Rect2i viewport(viewport_offset, viewport_size);
4416
Rect2i regioni = p_region;
4417
if (!((regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
4418
((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
4419
((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y)))) {
4420
ERR_FAIL_V_MSG(INVALID_ID, "When supplying a custom region, it must be contained within the framebuffer rectangle");
4421
}
4422
4423
viewport_offset = regioni.position;
4424
viewport_size = regioni.size;
4425
}
4426
4427
thread_local LocalVector<RDG::AttachmentOperation> operations;
4428
thread_local LocalVector<RDD::RenderPassClearValue> clear_values;
4429
thread_local LocalVector<RDG::ResourceTracker *> resource_trackers;
4430
thread_local LocalVector<RDG::ResourceUsage> resource_usages;
4431
BitField<RDD::PipelineStageBits> stages = {};
4432
operations.resize(framebuffer->texture_ids.size());
4433
clear_values.resize(framebuffer->texture_ids.size());
4434
resource_trackers.clear();
4435
resource_usages.clear();
4436
stages.clear();
4437
4438
uint32_t color_index = 0;
4439
for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
4440
RID texture_rid = framebuffer->texture_ids[i];
4441
Texture *texture = texture_owner.get_or_null(texture_rid);
4442
if (texture == nullptr) {
4443
operations[i] = RDG::ATTACHMENT_OPERATION_DEFAULT;
4444
clear_values[i] = RDD::RenderPassClearValue();
4445
continue;
4446
}
4447
4448
// Indicate the texture will get modified for the shared texture fallback.
4449
_texture_update_shared_fallback(texture_rid, texture, true);
4450
4451
RDG::AttachmentOperation operation = RDG::ATTACHMENT_OPERATION_DEFAULT;
4452
RDD::RenderPassClearValue clear_value;
4453
if (framebuffer_key.vrs_attachment == i && (texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT)) {
4454
resource_trackers.push_back(texture->draw_tracker);
4455
resource_usages.push_back(_vrs_usage_from_method(framebuffer_key.vrs_method));
4456
stages.set_flag(_vrs_stages_from_method(framebuffer_key.vrs_method));
4457
} else if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
4458
if (p_draw_flags.has_flag(DrawFlags(DRAW_CLEAR_COLOR_0 << color_index))) {
4459
ERR_FAIL_COND_V_MSG(color_index >= p_clear_color_values.size(), INVALID_ID, vformat("Color texture (%d) was specified to be cleared but no color value was provided.", color_index));
4460
operation = RDG::ATTACHMENT_OPERATION_CLEAR;
4461
clear_value.color = p_clear_color_values[color_index];
4462
} else if (p_draw_flags.has_flag(DrawFlags(DRAW_IGNORE_COLOR_0 << color_index))) {
4463
operation = RDG::ATTACHMENT_OPERATION_IGNORE;
4464
}
4465
4466
resource_trackers.push_back(texture->draw_tracker);
4467
resource_usages.push_back(RDG::RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE);
4468
stages.set_flag(RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
4469
color_index++;
4470
} else if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
4471
if (p_draw_flags.has_flag(DRAW_CLEAR_DEPTH) || p_draw_flags.has_flag(DRAW_CLEAR_STENCIL)) {
4472
operation = RDG::ATTACHMENT_OPERATION_CLEAR;
4473
clear_value.depth = p_clear_depth_value;
4474
clear_value.stencil = p_clear_stencil_value;
4475
} else if (p_draw_flags.has_flag(DRAW_IGNORE_DEPTH) || p_draw_flags.has_flag(DRAW_IGNORE_STENCIL)) {
4476
operation = RDG::ATTACHMENT_OPERATION_IGNORE;
4477
}
4478
4479
resource_trackers.push_back(texture->draw_tracker);
4480
resource_usages.push_back(RDG::RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE);
4481
stages.set_flag(RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT);
4482
stages.set_flag(RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
4483
}
4484
4485
operations[i] = operation;
4486
clear_values[i] = clear_value;
4487
}
4488
4489
draw_graph.add_draw_list_begin(framebuffer->framebuffer_cache, Rect2i(viewport_offset, viewport_size), operations, clear_values, stages, p_breadcrumb);
4490
draw_graph.add_draw_list_usages(resource_trackers, resource_usages);
4491
4492
// Mark textures as bound.
4493
draw_list_bound_textures.clear();
4494
4495
for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
4496
Texture *texture = texture_owner.get_or_null(framebuffer->texture_ids[i]);
4497
if (texture == nullptr) {
4498
continue;
4499
}
4500
4501
texture->bound = true;
4502
draw_list_bound_textures.push_back(framebuffer->texture_ids[i]);
4503
}
4504
4505
_draw_list_start(Rect2i(viewport_offset, viewport_size));
4506
#ifdef DEBUG_ENABLED
4507
draw_list_framebuffer_format = framebuffer->format_id;
4508
#endif
4509
draw_list_current_subpass = 0;
4510
draw_list_subpass_count = framebuffer_key.passes.size();
4511
4512
Rect2i viewport_rect(viewport_offset, viewport_size);
4513
draw_graph.add_draw_list_set_viewport(viewport_rect);
4514
draw_graph.add_draw_list_set_scissor(viewport_rect);
4515
4516
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
4517
}
4518
4519
#ifndef DISABLE_DEPRECATED
4520
Error RenderingDevice::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
4521
ERR_FAIL_V_MSG(ERR_UNAVAILABLE, "Deprecated. Split draw lists are used automatically by RenderingDevice.");
4522
}
4523
#endif
4524
4525
void RenderingDevice::draw_list_set_blend_constants(DrawListID p_list, const Color &p_color) {
4526
ERR_RENDER_THREAD_GUARD();
4527
4528
ERR_FAIL_COND(!draw_list.active);
4529
4530
draw_graph.add_draw_list_set_blend_constants(p_color);
4531
}
4532
4533
void RenderingDevice::draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline) {
4534
ERR_RENDER_THREAD_GUARD();
4535
4536
ERR_FAIL_COND(!draw_list.active);
4537
4538
const RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_render_pipeline);
4539
ERR_FAIL_NULL(pipeline);
4540
#ifdef DEBUG_ENABLED
4541
ERR_FAIL_COND(pipeline->validation.framebuffer_format != draw_list_framebuffer_format && pipeline->validation.render_pass != draw_list_current_subpass);
4542
#endif
4543
4544
if (p_render_pipeline == draw_list.state.pipeline) {
4545
return; // Redundant state, return.
4546
}
4547
4548
draw_list.state.pipeline = p_render_pipeline;
4549
4550
draw_graph.add_draw_list_bind_pipeline(pipeline->driver_id, pipeline->stage_bits);
4551
4552
if (draw_list.state.pipeline_shader != pipeline->shader) {
4553
// Shader changed, so descriptor sets may become incompatible.
4554
4555
uint32_t pcount = pipeline->set_formats.size(); // Formats count in this pipeline.
4556
draw_list.state.set_count = MAX(draw_list.state.set_count, pcount);
4557
const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats.
4558
4559
uint32_t first_invalid_set = UINT32_MAX; // All valid by default.
4560
if (pipeline->push_constant_size != draw_list.state.pipeline_push_constant_size) {
4561
// All sets must be invalidated as the pipeline layout is not compatible if the push constant range is different.
4562
draw_list.state.pipeline_push_constant_size = pipeline->push_constant_size;
4563
first_invalid_set = 0;
4564
} else {
4565
switch (driver->api_trait_get(RDD::API_TRAIT_SHADER_CHANGE_INVALIDATION)) {
4566
case RDD::SHADER_CHANGE_INVALIDATION_ALL_BOUND_UNIFORM_SETS: {
4567
first_invalid_set = 0;
4568
} break;
4569
case RDD::SHADER_CHANGE_INVALIDATION_INCOMPATIBLE_SETS_PLUS_CASCADE: {
4570
for (uint32_t i = 0; i < pcount; i++) {
4571
if (draw_list.state.sets[i].pipeline_expected_format != pformats[i]) {
4572
first_invalid_set = i;
4573
break;
4574
}
4575
}
4576
} break;
4577
case RDD::SHADER_CHANGE_INVALIDATION_ALL_OR_NONE_ACCORDING_TO_LAYOUT_HASH: {
4578
if (draw_list.state.pipeline_shader_layout_hash != pipeline->shader_layout_hash) {
4579
first_invalid_set = 0;
4580
}
4581
} break;
4582
}
4583
}
4584
4585
if (pipeline->push_constant_size) {
4586
#ifdef DEBUG_ENABLED
4587
draw_list.validation.pipeline_push_constant_supplied = false;
4588
#endif
4589
}
4590
4591
for (uint32_t i = 0; i < pcount; i++) {
4592
draw_list.state.sets[i].bound = draw_list.state.sets[i].bound && i < first_invalid_set;
4593
draw_list.state.sets[i].pipeline_expected_format = pformats[i];
4594
}
4595
4596
for (uint32_t i = pcount; i < draw_list.state.set_count; i++) {
4597
// Unbind the ones above (not used) if exist.
4598
draw_list.state.sets[i].bound = false;
4599
}
4600
4601
draw_list.state.set_count = pcount; // Update set count.
4602
4603
draw_list.state.pipeline_shader = pipeline->shader;
4604
draw_list.state.pipeline_shader_driver_id = pipeline->shader_driver_id;
4605
draw_list.state.pipeline_shader_layout_hash = pipeline->shader_layout_hash;
4606
}
4607
4608
#ifdef DEBUG_ENABLED
4609
// Update render pass pipeline info.
4610
draw_list.validation.pipeline_active = true;
4611
draw_list.validation.pipeline_dynamic_state = pipeline->validation.dynamic_state;
4612
draw_list.validation.pipeline_vertex_format = pipeline->validation.vertex_format;
4613
draw_list.validation.pipeline_uses_restart_indices = pipeline->validation.uses_restart_indices;
4614
draw_list.validation.pipeline_primitive_divisor = pipeline->validation.primitive_divisor;
4615
draw_list.validation.pipeline_primitive_minimum = pipeline->validation.primitive_minimum;
4616
draw_list.validation.pipeline_push_constant_size = pipeline->push_constant_size;
4617
#endif
4618
}
4619
4620
void RenderingDevice::draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index) {
4621
ERR_RENDER_THREAD_GUARD();
4622
4623
#ifdef DEBUG_ENABLED
4624
ERR_FAIL_COND_MSG(p_index >= driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS) || p_index >= MAX_UNIFORM_SETS,
4625
"Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS)) + ").");
4626
#endif
4627
4628
ERR_FAIL_COND(!draw_list.active);
4629
4630
const UniformSet *uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
4631
ERR_FAIL_NULL(uniform_set);
4632
4633
if (p_index > draw_list.state.set_count) {
4634
draw_list.state.set_count = p_index;
4635
}
4636
4637
draw_list.state.sets[p_index].uniform_set_driver_id = uniform_set->driver_id; // Update set pointer.
4638
draw_list.state.sets[p_index].bound = false; // Needs rebind.
4639
draw_list.state.sets[p_index].uniform_set_format = uniform_set->format;
4640
draw_list.state.sets[p_index].uniform_set = p_uniform_set;
4641
4642
#ifdef DEBUG_ENABLED
4643
{ // Validate that textures bound are not attached as framebuffer bindings.
4644
uint32_t attachable_count = uniform_set->attachable_textures.size();
4645
const UniformSet::AttachableTexture *attachable_ptr = uniform_set->attachable_textures.ptr();
4646
uint32_t bound_count = draw_list_bound_textures.size();
4647
const RID *bound_ptr = draw_list_bound_textures.ptr();
4648
for (uint32_t i = 0; i < attachable_count; i++) {
4649
for (uint32_t j = 0; j < bound_count; j++) {
4650
ERR_FAIL_COND_MSG(attachable_ptr[i].texture == bound_ptr[j],
4651
"Attempted to use the same texture in framebuffer attachment and a uniform (set: " + itos(p_index) + ", binding: " + itos(attachable_ptr[i].bind) + "), this is not allowed.");
4652
}
4653
}
4654
}
4655
#endif
4656
}
4657
4658
void RenderingDevice::draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array) {
4659
ERR_RENDER_THREAD_GUARD();
4660
4661
ERR_FAIL_COND(!draw_list.active);
4662
4663
VertexArray *vertex_array = vertex_array_owner.get_or_null(p_vertex_array);
4664
ERR_FAIL_NULL(vertex_array);
4665
4666
if (draw_list.state.vertex_array == p_vertex_array) {
4667
return; // Already set.
4668
}
4669
4670
_check_transfer_worker_vertex_array(vertex_array);
4671
4672
draw_list.state.vertex_array = p_vertex_array;
4673
4674
#ifdef DEBUG_ENABLED
4675
draw_list.validation.vertex_format = vertex_array->description;
4676
draw_list.validation.vertex_max_instances_allowed = vertex_array->max_instances_allowed;
4677
#endif
4678
draw_list.validation.vertex_array_size = vertex_array->vertex_count;
4679
4680
draw_graph.add_draw_list_bind_vertex_buffers(vertex_array->buffers, vertex_array->offsets);
4681
4682
for (int i = 0; i < vertex_array->draw_trackers.size(); i++) {
4683
draw_graph.add_draw_list_usage(vertex_array->draw_trackers[i], RDG::RESOURCE_USAGE_VERTEX_BUFFER_READ);
4684
}
4685
}
4686
4687
void RenderingDevice::draw_list_bind_index_array(DrawListID p_list, RID p_index_array) {
4688
ERR_RENDER_THREAD_GUARD();
4689
4690
ERR_FAIL_COND(!draw_list.active);
4691
4692
IndexArray *index_array = index_array_owner.get_or_null(p_index_array);
4693
ERR_FAIL_NULL(index_array);
4694
4695
if (draw_list.state.index_array == p_index_array) {
4696
return; // Already set.
4697
}
4698
4699
_check_transfer_worker_index_array(index_array);
4700
4701
draw_list.state.index_array = p_index_array;
4702
#ifdef DEBUG_ENABLED
4703
draw_list.validation.index_array_max_index = index_array->max_index;
4704
#endif
4705
draw_list.validation.index_array_count = index_array->indices;
4706
4707
const uint64_t offset_bytes = index_array->offset * (index_array->format == INDEX_BUFFER_FORMAT_UINT16 ? sizeof(uint16_t) : sizeof(uint32_t));
4708
draw_graph.add_draw_list_bind_index_buffer(index_array->driver_id, index_array->format, offset_bytes);
4709
4710
if (index_array->draw_tracker != nullptr) {
4711
draw_graph.add_draw_list_usage(index_array->draw_tracker, RDG::RESOURCE_USAGE_INDEX_BUFFER_READ);
4712
}
4713
}
4714
4715
void RenderingDevice::draw_list_set_line_width(DrawListID p_list, float p_width) {
4716
ERR_RENDER_THREAD_GUARD();
4717
4718
ERR_FAIL_COND(!draw_list.active);
4719
4720
draw_graph.add_draw_list_set_line_width(p_width);
4721
}
4722
4723
void RenderingDevice::draw_list_set_push_constant(DrawListID p_list, const void *p_data, uint32_t p_data_size) {
4724
ERR_RENDER_THREAD_GUARD();
4725
4726
ERR_FAIL_COND(!draw_list.active);
4727
4728
#ifdef DEBUG_ENABLED
4729
ERR_FAIL_COND_MSG(p_data_size != draw_list.validation.pipeline_push_constant_size,
4730
"This render pipeline requires (" + itos(draw_list.validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
4731
#endif
4732
4733
draw_graph.add_draw_list_set_push_constant(draw_list.state.pipeline_shader_driver_id, p_data, p_data_size);
4734
4735
#ifdef DEBUG_ENABLED
4736
draw_list.validation.pipeline_push_constant_supplied = true;
4737
#endif
4738
}
4739
4740
void RenderingDevice::draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances, uint32_t p_procedural_vertices) {
4741
ERR_RENDER_THREAD_GUARD();
4742
4743
ERR_FAIL_COND(!draw_list.active);
4744
4745
#ifdef DEBUG_ENABLED
4746
ERR_FAIL_COND_MSG(!draw_list.validation.pipeline_active,
4747
"No render pipeline was set before attempting to draw.");
4748
if (draw_list.validation.pipeline_vertex_format != INVALID_ID) {
4749
// Pipeline uses vertices, validate format.
4750
ERR_FAIL_COND_MSG(draw_list.validation.vertex_format == INVALID_ID,
4751
"No vertex array was bound, and render pipeline expects vertices.");
4752
// Make sure format is right.
4753
ERR_FAIL_COND_MSG(draw_list.validation.pipeline_vertex_format != draw_list.validation.vertex_format,
4754
"The vertex format used to create the pipeline does not match the vertex format bound.");
4755
// Make sure number of instances is valid.
4756
ERR_FAIL_COND_MSG(p_instances > draw_list.validation.vertex_max_instances_allowed,
4757
"Number of instances requested (" + itos(p_instances) + " is larger than the maximum number supported by the bound vertex array (" + itos(draw_list.validation.vertex_max_instances_allowed) + ").");
4758
}
4759
4760
if (draw_list.validation.pipeline_push_constant_size > 0) {
4761
// Using push constants, check that they were supplied.
4762
ERR_FAIL_COND_MSG(!draw_list.validation.pipeline_push_constant_supplied,
4763
"The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
4764
}
4765
4766
#endif
4767
4768
#ifdef DEBUG_ENABLED
4769
for (uint32_t i = 0; i < draw_list.state.set_count; i++) {
4770
if (draw_list.state.sets[i].pipeline_expected_format == 0) {
4771
// Nothing expected by this pipeline.
4772
continue;
4773
}
4774
4775
if (draw_list.state.sets[i].pipeline_expected_format != draw_list.state.sets[i].uniform_set_format) {
4776
if (draw_list.state.sets[i].uniform_set_format == 0) {
4777
ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline.");
4778
} else if (uniform_set_owner.owns(draw_list.state.sets[i].uniform_set)) {
4779
UniformSet *us = uniform_set_owner.get_or_null(draw_list.state.sets[i].uniform_set);
4780
ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(draw_list.state.pipeline_shader));
4781
} else {
4782
ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(draw_list.state.pipeline_shader));
4783
}
4784
}
4785
}
4786
#endif
4787
thread_local LocalVector<RDD::UniformSetID> valid_descriptor_ids;
4788
valid_descriptor_ids.clear();
4789
valid_descriptor_ids.resize(draw_list.state.set_count);
4790
uint32_t valid_set_count = 0;
4791
uint32_t first_set_index = 0;
4792
uint32_t last_set_index = 0;
4793
bool found_first_set = false;
4794
4795
for (uint32_t i = 0; i < draw_list.state.set_count; i++) {
4796
if (draw_list.state.sets[i].pipeline_expected_format == 0) {
4797
continue; // Nothing expected by this pipeline.
4798
}
4799
4800
if (!draw_list.state.sets[i].bound && !found_first_set) {
4801
first_set_index = i;
4802
found_first_set = true;
4803
}
4804
// Prepare descriptor sets if the API doesn't use pipeline barriers.
4805
if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
4806
draw_graph.add_draw_list_uniform_set_prepare_for_use(draw_list.state.pipeline_shader_driver_id, draw_list.state.sets[i].uniform_set_driver_id, i);
4807
}
4808
}
4809
4810
// Bind descriptor sets.
4811
for (uint32_t i = first_set_index; i < draw_list.state.set_count; i++) {
4812
if (draw_list.state.sets[i].pipeline_expected_format == 0) {
4813
continue; // Nothing expected by this pipeline.
4814
}
4815
4816
if (!draw_list.state.sets[i].bound) {
4817
// Batch contiguous descriptor sets in a single call.
4818
if (descriptor_set_batching) {
4819
// All good, see if this requires re-binding.
4820
if (i - last_set_index > 1) {
4821
// If the descriptor sets are not contiguous, bind the previous ones and start a new batch.
4822
draw_graph.add_draw_list_bind_uniform_sets(draw_list.state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
4823
4824
first_set_index = i;
4825
valid_set_count = 1;
4826
valid_descriptor_ids[0] = draw_list.state.sets[i].uniform_set_driver_id;
4827
} else {
4828
// Otherwise, keep storing in the current batch.
4829
valid_descriptor_ids[valid_set_count] = draw_list.state.sets[i].uniform_set_driver_id;
4830
valid_set_count++;
4831
}
4832
4833
UniformSet *uniform_set = uniform_set_owner.get_or_null(draw_list.state.sets[i].uniform_set);
4834
_uniform_set_update_shared(uniform_set);
4835
draw_graph.add_draw_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
4836
draw_list.state.sets[i].bound = true;
4837
4838
last_set_index = i;
4839
} else {
4840
draw_graph.add_draw_list_bind_uniform_set(draw_list.state.pipeline_shader_driver_id, draw_list.state.sets[i].uniform_set_driver_id, i);
4841
}
4842
}
4843
}
4844
4845
// Bind the remaining batch.
4846
if (descriptor_set_batching && valid_set_count > 0) {
4847
draw_graph.add_draw_list_bind_uniform_sets(draw_list.state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
4848
}
4849
4850
if (p_use_indices) {
4851
#ifdef DEBUG_ENABLED
4852
ERR_FAIL_COND_MSG(p_procedural_vertices > 0,
4853
"Procedural vertices can't be used together with indices.");
4854
4855
ERR_FAIL_COND_MSG(!draw_list.validation.index_array_count,
4856
"Draw command requested indices, but no index buffer was set.");
4857
4858
ERR_FAIL_COND_MSG(draw_list.validation.pipeline_uses_restart_indices != draw_list.validation.index_buffer_uses_restart_indices,
4859
"The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
4860
#endif
4861
uint32_t to_draw = draw_list.validation.index_array_count;
4862
4863
#ifdef DEBUG_ENABLED
4864
ERR_FAIL_COND_MSG(to_draw < draw_list.validation.pipeline_primitive_minimum,
4865
"Too few indices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(draw_list.validation.pipeline_primitive_minimum) + ").");
4866
4867
ERR_FAIL_COND_MSG((to_draw % draw_list.validation.pipeline_primitive_divisor) != 0,
4868
"Index amount (" + itos(to_draw) + ") must be a multiple of the amount of indices required by the render primitive (" + itos(draw_list.validation.pipeline_primitive_divisor) + ").");
4869
#endif
4870
4871
draw_graph.add_draw_list_draw_indexed(to_draw, p_instances, 0);
4872
} else {
4873
uint32_t to_draw;
4874
4875
if (p_procedural_vertices > 0) {
4876
#ifdef DEBUG_ENABLED
4877
ERR_FAIL_COND_MSG(draw_list.validation.pipeline_vertex_format != INVALID_ID,
4878
"Procedural vertices requested, but pipeline expects a vertex array.");
4879
#endif
4880
to_draw = p_procedural_vertices;
4881
} else {
4882
#ifdef DEBUG_ENABLED
4883
ERR_FAIL_COND_MSG(draw_list.validation.pipeline_vertex_format == INVALID_ID,
4884
"Draw command lacks indices, but pipeline format does not use vertices.");
4885
#endif
4886
to_draw = draw_list.validation.vertex_array_size;
4887
}
4888
4889
#ifdef DEBUG_ENABLED
4890
ERR_FAIL_COND_MSG(to_draw < draw_list.validation.pipeline_primitive_minimum,
4891
"Too few vertices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(draw_list.validation.pipeline_primitive_minimum) + ").");
4892
4893
ERR_FAIL_COND_MSG((to_draw % draw_list.validation.pipeline_primitive_divisor) != 0,
4894
"Vertex amount (" + itos(to_draw) + ") must be a multiple of the amount of vertices required by the render primitive (" + itos(draw_list.validation.pipeline_primitive_divisor) + ").");
4895
#endif
4896
4897
draw_graph.add_draw_list_draw(to_draw, p_instances);
4898
}
4899
4900
draw_list.state.draw_count++;
4901
}
4902
4903
void RenderingDevice::draw_list_draw_indirect(DrawListID p_list, bool p_use_indices, RID p_buffer, uint32_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
4904
ERR_RENDER_THREAD_GUARD();
4905
4906
ERR_FAIL_COND(!draw_list.active);
4907
4908
Buffer *buffer = storage_buffer_owner.get_or_null(p_buffer);
4909
ERR_FAIL_NULL(buffer);
4910
4911
ERR_FAIL_COND_MSG(!buffer->usage.has_flag(RDD::BUFFER_USAGE_INDIRECT_BIT), "Buffer provided was not created to do indirect dispatch.");
4912
4913
#ifdef DEBUG_ENABLED
4914
ERR_FAIL_COND_MSG(!draw_list.validation.pipeline_active,
4915
"No render pipeline was set before attempting to draw.");
4916
if (draw_list.validation.pipeline_vertex_format != INVALID_ID) {
4917
// Pipeline uses vertices, validate format.
4918
ERR_FAIL_COND_MSG(draw_list.validation.vertex_format == INVALID_ID,
4919
"No vertex array was bound, and render pipeline expects vertices.");
4920
// Make sure format is right.
4921
ERR_FAIL_COND_MSG(draw_list.validation.pipeline_vertex_format != draw_list.validation.vertex_format,
4922
"The vertex format used to create the pipeline does not match the vertex format bound.");
4923
}
4924
4925
if (draw_list.validation.pipeline_push_constant_size > 0) {
4926
// Using push constants, check that they were supplied.
4927
ERR_FAIL_COND_MSG(!draw_list.validation.pipeline_push_constant_supplied,
4928
"The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
4929
}
4930
#endif
4931
4932
#ifdef DEBUG_ENABLED
4933
for (uint32_t i = 0; i < draw_list.state.set_count; i++) {
4934
if (draw_list.state.sets[i].pipeline_expected_format == 0) {
4935
// Nothing expected by this pipeline.
4936
continue;
4937
}
4938
4939
if (draw_list.state.sets[i].pipeline_expected_format != draw_list.state.sets[i].uniform_set_format) {
4940
if (draw_list.state.sets[i].uniform_set_format == 0) {
4941
ERR_FAIL_MSG(vformat("Uniforms were never supplied for set (%d) at the time of drawing, which are required by the pipeline.", i));
4942
} else if (uniform_set_owner.owns(draw_list.state.sets[i].uniform_set)) {
4943
UniformSet *us = uniform_set_owner.get_or_null(draw_list.state.sets[i].uniform_set);
4944
ERR_FAIL_MSG(vformat("Uniforms supplied for set (%d):\n%s\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n%s", i, _shader_uniform_debug(us->shader_id, us->shader_set), _shader_uniform_debug(draw_list.state.pipeline_shader)));
4945
} else {
4946
ERR_FAIL_MSG(vformat("Uniforms supplied for set (%s, which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n%s", i, _shader_uniform_debug(draw_list.state.pipeline_shader)));
4947
}
4948
}
4949
}
4950
#endif
4951
4952
// Prepare descriptor sets if the API doesn't use pipeline barriers.
4953
if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
4954
for (uint32_t i = 0; i < draw_list.state.set_count; i++) {
4955
if (draw_list.state.sets[i].pipeline_expected_format == 0) {
4956
// Nothing expected by this pipeline.
4957
continue;
4958
}
4959
4960
draw_graph.add_draw_list_uniform_set_prepare_for_use(draw_list.state.pipeline_shader_driver_id, draw_list.state.sets[i].uniform_set_driver_id, i);
4961
}
4962
}
4963
4964
// Bind descriptor sets.
4965
for (uint32_t i = 0; i < draw_list.state.set_count; i++) {
4966
if (draw_list.state.sets[i].pipeline_expected_format == 0) {
4967
continue; // Nothing expected by this pipeline.
4968
}
4969
if (!draw_list.state.sets[i].bound) {
4970
// All good, see if this requires re-binding.
4971
draw_graph.add_draw_list_bind_uniform_set(draw_list.state.pipeline_shader_driver_id, draw_list.state.sets[i].uniform_set_driver_id, i);
4972
4973
UniformSet *uniform_set = uniform_set_owner.get_or_null(draw_list.state.sets[i].uniform_set);
4974
_uniform_set_update_shared(uniform_set);
4975
4976
draw_graph.add_draw_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
4977
4978
draw_list.state.sets[i].bound = true;
4979
}
4980
}
4981
4982
if (p_use_indices) {
4983
#ifdef DEBUG_ENABLED
4984
ERR_FAIL_COND_MSG(!draw_list.validation.index_array_count,
4985
"Draw command requested indices, but no index buffer was set.");
4986
4987
ERR_FAIL_COND_MSG(draw_list.validation.pipeline_uses_restart_indices != draw_list.validation.index_buffer_uses_restart_indices,
4988
"The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
4989
#endif
4990
4991
ERR_FAIL_COND_MSG(p_offset + 20 > buffer->size, "Offset provided (+20) is past the end of buffer.");
4992
4993
draw_graph.add_draw_list_draw_indexed_indirect(buffer->driver_id, p_offset, p_draw_count, p_stride);
4994
} else {
4995
ERR_FAIL_COND_MSG(p_offset + 16 > buffer->size, "Offset provided (+16) is past the end of buffer.");
4996
4997
draw_graph.add_draw_list_draw_indirect(buffer->driver_id, p_offset, p_draw_count, p_stride);
4998
}
4999
5000
draw_list.state.draw_count++;
5001
5002
if (buffer->draw_tracker != nullptr) {
5003
draw_graph.add_draw_list_usage(buffer->draw_tracker, RDG::RESOURCE_USAGE_INDIRECT_BUFFER_READ);
5004
}
5005
5006
_check_transfer_worker_buffer(buffer);
5007
}
5008
5009
void RenderingDevice::draw_list_set_viewport(DrawListID p_list, const Rect2 &p_rect) {
5010
ERR_FAIL_COND(!draw_list.active);
5011
5012
if (p_rect.get_area() == 0) {
5013
return;
5014
}
5015
5016
draw_list.viewport = p_rect;
5017
draw_graph.add_draw_list_set_viewport(p_rect);
5018
}
5019
5020
void RenderingDevice::draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect) {
5021
ERR_RENDER_THREAD_GUARD();
5022
5023
ERR_FAIL_COND(!draw_list.active);
5024
5025
Rect2i rect = p_rect;
5026
rect.position += draw_list.viewport.position;
5027
5028
rect = draw_list.viewport.intersection(rect);
5029
5030
if (rect.get_area() == 0) {
5031
return;
5032
}
5033
5034
draw_graph.add_draw_list_set_scissor(rect);
5035
}
5036
5037
void RenderingDevice::draw_list_disable_scissor(DrawListID p_list) {
5038
ERR_RENDER_THREAD_GUARD();
5039
5040
ERR_FAIL_COND(!draw_list.active);
5041
5042
draw_graph.add_draw_list_set_scissor(draw_list.viewport);
5043
}
5044
5045
uint32_t RenderingDevice::draw_list_get_current_pass() {
5046
ERR_RENDER_THREAD_GUARD_V(0);
5047
5048
return draw_list_current_subpass;
5049
}
5050
5051
RenderingDevice::DrawListID RenderingDevice::draw_list_switch_to_next_pass() {
5052
ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
5053
5054
ERR_FAIL_COND_V(!draw_list.active, INVALID_FORMAT_ID);
5055
ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, INVALID_FORMAT_ID);
5056
5057
draw_list_current_subpass++;
5058
5059
Rect2i viewport;
5060
_draw_list_end(&viewport);
5061
5062
draw_graph.add_draw_list_next_subpass(RDD::COMMAND_BUFFER_TYPE_PRIMARY);
5063
5064
_draw_list_start(viewport);
5065
5066
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
5067
}
5068
5069
#ifndef DISABLE_DEPRECATED
5070
Error RenderingDevice::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) {
5071
ERR_FAIL_V_MSG(ERR_UNAVAILABLE, "Deprecated. Split draw lists are used automatically by RenderingDevice.");
5072
}
5073
#endif
5074
5075
void RenderingDevice::_draw_list_start(const Rect2i &p_viewport) {
5076
draw_list.viewport = p_viewport;
5077
draw_list.active = true;
5078
}
5079
5080
void RenderingDevice::_draw_list_end(Rect2i *r_last_viewport) {
5081
if (r_last_viewport) {
5082
*r_last_viewport = draw_list.viewport;
5083
}
5084
5085
draw_list = DrawList();
5086
}
5087
5088
void RenderingDevice::draw_list_end() {
5089
ERR_RENDER_THREAD_GUARD();
5090
5091
ERR_FAIL_COND_MSG(!draw_list.active, "Immediate draw list is already inactive.");
5092
5093
draw_graph.add_draw_list_end();
5094
5095
_draw_list_end();
5096
5097
for (uint32_t i = 0; i < draw_list_bound_textures.size(); i++) {
5098
Texture *texture = texture_owner.get_or_null(draw_list_bound_textures[i]);
5099
ERR_CONTINUE(!texture); // Wtf.
5100
if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
5101
texture->bound = false;
5102
}
5103
if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
5104
texture->bound = false;
5105
}
5106
}
5107
5108
draw_list_bound_textures.clear();
5109
}
5110
5111
/***********************/
5112
/**** COMPUTE LISTS ****/
5113
/***********************/
5114
5115
RenderingDevice::ComputeListID RenderingDevice::compute_list_begin() {
5116
ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
5117
5118
ERR_FAIL_COND_V_MSG(compute_list.active, INVALID_ID, "Only one draw/compute list can be active at the same time.");
5119
5120
compute_list.active = true;
5121
5122
draw_graph.add_compute_list_begin();
5123
5124
return ID_TYPE_COMPUTE_LIST;
5125
}
5126
5127
void RenderingDevice::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) {
5128
ERR_RENDER_THREAD_GUARD();
5129
5130
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
5131
ERR_FAIL_COND(!compute_list.active);
5132
5133
const ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_compute_pipeline);
5134
ERR_FAIL_NULL(pipeline);
5135
5136
if (p_compute_pipeline == compute_list.state.pipeline) {
5137
return; // Redundant state, return.
5138
}
5139
5140
compute_list.state.pipeline = p_compute_pipeline;
5141
5142
draw_graph.add_compute_list_bind_pipeline(pipeline->driver_id);
5143
5144
if (compute_list.state.pipeline_shader != pipeline->shader) {
5145
// Shader changed, so descriptor sets may become incompatible.
5146
5147
uint32_t pcount = pipeline->set_formats.size(); // Formats count in this pipeline.
5148
compute_list.state.set_count = MAX(compute_list.state.set_count, pcount);
5149
const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats.
5150
5151
uint32_t first_invalid_set = UINT32_MAX; // All valid by default.
5152
switch (driver->api_trait_get(RDD::API_TRAIT_SHADER_CHANGE_INVALIDATION)) {
5153
case RDD::SHADER_CHANGE_INVALIDATION_ALL_BOUND_UNIFORM_SETS: {
5154
first_invalid_set = 0;
5155
} break;
5156
case RDD::SHADER_CHANGE_INVALIDATION_INCOMPATIBLE_SETS_PLUS_CASCADE: {
5157
for (uint32_t i = 0; i < pcount; i++) {
5158
if (compute_list.state.sets[i].pipeline_expected_format != pformats[i]) {
5159
first_invalid_set = i;
5160
break;
5161
}
5162
}
5163
} break;
5164
case RDD::SHADER_CHANGE_INVALIDATION_ALL_OR_NONE_ACCORDING_TO_LAYOUT_HASH: {
5165
if (compute_list.state.pipeline_shader_layout_hash != pipeline->shader_layout_hash) {
5166
first_invalid_set = 0;
5167
}
5168
} break;
5169
}
5170
5171
for (uint32_t i = 0; i < pcount; i++) {
5172
compute_list.state.sets[i].bound = compute_list.state.sets[i].bound && i < first_invalid_set;
5173
compute_list.state.sets[i].pipeline_expected_format = pformats[i];
5174
}
5175
5176
for (uint32_t i = pcount; i < compute_list.state.set_count; i++) {
5177
// Unbind the ones above (not used) if exist.
5178
compute_list.state.sets[i].bound = false;
5179
}
5180
5181
compute_list.state.set_count = pcount; // Update set count.
5182
5183
if (pipeline->push_constant_size) {
5184
#ifdef DEBUG_ENABLED
5185
compute_list.validation.pipeline_push_constant_supplied = false;
5186
#endif
5187
}
5188
5189
compute_list.state.pipeline_shader = pipeline->shader;
5190
compute_list.state.pipeline_shader_driver_id = pipeline->shader_driver_id;
5191
compute_list.state.pipeline_shader_layout_hash = pipeline->shader_layout_hash;
5192
compute_list.state.local_group_size[0] = pipeline->local_group_size[0];
5193
compute_list.state.local_group_size[1] = pipeline->local_group_size[1];
5194
compute_list.state.local_group_size[2] = pipeline->local_group_size[2];
5195
}
5196
5197
#ifdef DEBUG_ENABLED
5198
// Update compute pass pipeline info.
5199
compute_list.validation.pipeline_active = true;
5200
compute_list.validation.pipeline_push_constant_size = pipeline->push_constant_size;
5201
#endif
5202
}
5203
5204
void RenderingDevice::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) {
5205
ERR_RENDER_THREAD_GUARD();
5206
5207
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
5208
ERR_FAIL_COND(!compute_list.active);
5209
5210
#ifdef DEBUG_ENABLED
5211
ERR_FAIL_COND_MSG(p_index >= driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS) || p_index >= MAX_UNIFORM_SETS,
5212
"Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS)) + ").");
5213
#endif
5214
5215
UniformSet *uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
5216
ERR_FAIL_NULL(uniform_set);
5217
5218
if (p_index > compute_list.state.set_count) {
5219
compute_list.state.set_count = p_index;
5220
}
5221
5222
compute_list.state.sets[p_index].uniform_set_driver_id = uniform_set->driver_id; // Update set pointer.
5223
compute_list.state.sets[p_index].bound = false; // Needs rebind.
5224
compute_list.state.sets[p_index].uniform_set_format = uniform_set->format;
5225
compute_list.state.sets[p_index].uniform_set = p_uniform_set;
5226
5227
#if 0
5228
{ // Validate that textures bound are not attached as framebuffer bindings.
5229
uint32_t attachable_count = uniform_set->attachable_textures.size();
5230
const RID *attachable_ptr = uniform_set->attachable_textures.ptr();
5231
uint32_t bound_count = draw_list_bound_textures.size();
5232
const RID *bound_ptr = draw_list_bound_textures.ptr();
5233
for (uint32_t i = 0; i < attachable_count; i++) {
5234
for (uint32_t j = 0; j < bound_count; j++) {
5235
ERR_FAIL_COND_MSG(attachable_ptr[i] == bound_ptr[j],
5236
"Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed.");
5237
}
5238
}
5239
}
5240
#endif
5241
}
5242
5243
void RenderingDevice::compute_list_set_push_constant(ComputeListID p_list, const void *p_data, uint32_t p_data_size) {
5244
ERR_RENDER_THREAD_GUARD();
5245
5246
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
5247
ERR_FAIL_COND(!compute_list.active);
5248
ERR_FAIL_COND_MSG(p_data_size > MAX_PUSH_CONSTANT_SIZE, "Push constants can't be bigger than 128 bytes to maintain compatibility.");
5249
5250
#ifdef DEBUG_ENABLED
5251
ERR_FAIL_COND_MSG(p_data_size != compute_list.validation.pipeline_push_constant_size,
5252
"This compute pipeline requires (" + itos(compute_list.validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
5253
#endif
5254
5255
draw_graph.add_compute_list_set_push_constant(compute_list.state.pipeline_shader_driver_id, p_data, p_data_size);
5256
5257
// Store it in the state in case we need to restart the compute list.
5258
memcpy(compute_list.state.push_constant_data, p_data, p_data_size);
5259
compute_list.state.push_constant_size = p_data_size;
5260
5261
#ifdef DEBUG_ENABLED
5262
compute_list.validation.pipeline_push_constant_supplied = true;
5263
#endif
5264
}
5265
5266
void RenderingDevice::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
5267
ERR_RENDER_THREAD_GUARD();
5268
5269
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
5270
ERR_FAIL_COND(!compute_list.active);
5271
5272
#ifdef DEBUG_ENABLED
5273
ERR_FAIL_COND_MSG(p_x_groups == 0, "Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is zero.");
5274
ERR_FAIL_COND_MSG(p_z_groups == 0, "Dispatch amount of Z compute groups (" + itos(p_z_groups) + ") is zero.");
5275
ERR_FAIL_COND_MSG(p_y_groups == 0, "Dispatch amount of Y compute groups (" + itos(p_y_groups) + ") is zero.");
5276
ERR_FAIL_COND_MSG(p_x_groups > driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X),
5277
"Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X)) + ")");
5278
ERR_FAIL_COND_MSG(p_y_groups > driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y),
5279
"Dispatch amount of Y compute groups (" + itos(p_y_groups) + ") is larger than device limit (" + itos(driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y)) + ")");
5280
ERR_FAIL_COND_MSG(p_z_groups > driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z),
5281
"Dispatch amount of Z compute groups (" + itos(p_z_groups) + ") is larger than device limit (" + itos(driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z)) + ")");
5282
#endif
5283
5284
#ifdef DEBUG_ENABLED
5285
5286
ERR_FAIL_COND_MSG(!compute_list.validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
5287
5288
if (compute_list.validation.pipeline_push_constant_size > 0) {
5289
// Using push constants, check that they were supplied.
5290
ERR_FAIL_COND_MSG(!compute_list.validation.pipeline_push_constant_supplied,
5291
"The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
5292
}
5293
5294
#endif
5295
5296
#ifdef DEBUG_ENABLED
5297
for (uint32_t i = 0; i < compute_list.state.set_count; i++) {
5298
if (compute_list.state.sets[i].pipeline_expected_format == 0) {
5299
// Nothing expected by this pipeline.
5300
continue;
5301
}
5302
5303
if (compute_list.state.sets[i].pipeline_expected_format != compute_list.state.sets[i].uniform_set_format) {
5304
if (compute_list.state.sets[i].uniform_set_format == 0) {
5305
ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline.");
5306
} else if (uniform_set_owner.owns(compute_list.state.sets[i].uniform_set)) {
5307
UniformSet *us = uniform_set_owner.get_or_null(compute_list.state.sets[i].uniform_set);
5308
ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(compute_list.state.pipeline_shader));
5309
} else {
5310
ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(compute_list.state.pipeline_shader));
5311
}
5312
}
5313
}
5314
#endif
5315
thread_local LocalVector<RDD::UniformSetID> valid_descriptor_ids;
5316
valid_descriptor_ids.clear();
5317
valid_descriptor_ids.resize(compute_list.state.set_count);
5318
5319
uint32_t valid_set_count = 0;
5320
uint32_t first_set_index = 0;
5321
uint32_t last_set_index = 0;
5322
bool found_first_set = false;
5323
5324
for (uint32_t i = 0; i < compute_list.state.set_count; i++) {
5325
if (compute_list.state.sets[i].pipeline_expected_format == 0) {
5326
// Nothing expected by this pipeline.
5327
continue;
5328
}
5329
5330
if (!compute_list.state.sets[i].bound && !found_first_set) {
5331
first_set_index = i;
5332
found_first_set = true;
5333
}
5334
// Prepare descriptor sets if the API doesn't use pipeline barriers.
5335
if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
5336
draw_graph.add_compute_list_uniform_set_prepare_for_use(compute_list.state.pipeline_shader_driver_id, compute_list.state.sets[i].uniform_set_driver_id, i);
5337
}
5338
}
5339
5340
// Bind descriptor sets.
5341
for (uint32_t i = first_set_index; i < compute_list.state.set_count; i++) {
5342
if (compute_list.state.sets[i].pipeline_expected_format == 0) {
5343
continue; // Nothing expected by this pipeline.
5344
}
5345
5346
if (!compute_list.state.sets[i].bound) {
5347
// Descriptor set batching
5348
if (descriptor_set_batching) {
5349
// All good, see if this requires re-binding.
5350
if (i - last_set_index > 1) {
5351
// If the descriptor sets are not contiguous, bind the previous ones and start a new batch.
5352
draw_graph.add_compute_list_bind_uniform_sets(compute_list.state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
5353
5354
first_set_index = i;
5355
valid_set_count = 1;
5356
valid_descriptor_ids[0] = compute_list.state.sets[i].uniform_set_driver_id;
5357
} else {
5358
// Otherwise, keep storing in the current batch.
5359
valid_descriptor_ids[valid_set_count] = compute_list.state.sets[i].uniform_set_driver_id;
5360
valid_set_count++;
5361
}
5362
5363
last_set_index = i;
5364
} else {
5365
draw_graph.add_compute_list_bind_uniform_set(compute_list.state.pipeline_shader_driver_id, compute_list.state.sets[i].uniform_set_driver_id, i);
5366
}
5367
UniformSet *uniform_set = uniform_set_owner.get_or_null(compute_list.state.sets[i].uniform_set);
5368
_uniform_set_update_shared(uniform_set);
5369
5370
draw_graph.add_compute_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
5371
compute_list.state.sets[i].bound = true;
5372
}
5373
}
5374
5375
// Bind the remaining batch.
5376
if (valid_set_count > 0) {
5377
draw_graph.add_compute_list_bind_uniform_sets(compute_list.state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
5378
}
5379
draw_graph.add_compute_list_dispatch(p_x_groups, p_y_groups, p_z_groups);
5380
compute_list.state.dispatch_count++;
5381
}
5382
5383
void RenderingDevice::compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads) {
5384
ERR_RENDER_THREAD_GUARD();
5385
5386
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
5387
ERR_FAIL_COND(!compute_list.active);
5388
5389
#ifdef DEBUG_ENABLED
5390
ERR_FAIL_COND_MSG(p_x_threads == 0, "Dispatch amount of X compute threads (" + itos(p_x_threads) + ") is zero.");
5391
ERR_FAIL_COND_MSG(p_y_threads == 0, "Dispatch amount of Y compute threads (" + itos(p_y_threads) + ") is zero.");
5392
ERR_FAIL_COND_MSG(p_z_threads == 0, "Dispatch amount of Z compute threads (" + itos(p_z_threads) + ") is zero.");
5393
#endif
5394
5395
#ifdef DEBUG_ENABLED
5396
5397
ERR_FAIL_COND_MSG(!compute_list.validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
5398
5399
if (compute_list.validation.pipeline_push_constant_size > 0) {
5400
// Using push constants, check that they were supplied.
5401
ERR_FAIL_COND_MSG(!compute_list.validation.pipeline_push_constant_supplied,
5402
"The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
5403
}
5404
5405
#endif
5406
5407
compute_list_dispatch(p_list, Math::division_round_up(p_x_threads, compute_list.state.local_group_size[0]), Math::division_round_up(p_y_threads, compute_list.state.local_group_size[1]), Math::division_round_up(p_z_threads, compute_list.state.local_group_size[2]));
5408
}
5409
5410
void RenderingDevice::compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset) {
5411
ERR_RENDER_THREAD_GUARD();
5412
5413
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
5414
ERR_FAIL_COND(!compute_list.active);
5415
5416
Buffer *buffer = storage_buffer_owner.get_or_null(p_buffer);
5417
ERR_FAIL_NULL(buffer);
5418
5419
ERR_FAIL_COND_MSG(!buffer->usage.has_flag(RDD::BUFFER_USAGE_INDIRECT_BIT), "Buffer provided was not created to do indirect dispatch.");
5420
5421
ERR_FAIL_COND_MSG(p_offset + 12 > buffer->size, "Offset provided (+12) is past the end of buffer.");
5422
5423
#ifdef DEBUG_ENABLED
5424
5425
ERR_FAIL_COND_MSG(!compute_list.validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
5426
5427
if (compute_list.validation.pipeline_push_constant_size > 0) {
5428
// Using push constants, check that they were supplied.
5429
ERR_FAIL_COND_MSG(!compute_list.validation.pipeline_push_constant_supplied,
5430
"The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
5431
}
5432
5433
#endif
5434
5435
#ifdef DEBUG_ENABLED
5436
for (uint32_t i = 0; i < compute_list.state.set_count; i++) {
5437
if (compute_list.state.sets[i].pipeline_expected_format == 0) {
5438
// Nothing expected by this pipeline.
5439
continue;
5440
}
5441
5442
if (compute_list.state.sets[i].pipeline_expected_format != compute_list.state.sets[i].uniform_set_format) {
5443
if (compute_list.state.sets[i].uniform_set_format == 0) {
5444
ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline.");
5445
} else if (uniform_set_owner.owns(compute_list.state.sets[i].uniform_set)) {
5446
UniformSet *us = uniform_set_owner.get_or_null(compute_list.state.sets[i].uniform_set);
5447
ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(compute_list.state.pipeline_shader));
5448
} else {
5449
ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(compute_list.state.pipeline_shader));
5450
}
5451
}
5452
}
5453
#endif
5454
thread_local LocalVector<RDD::UniformSetID> valid_descriptor_ids;
5455
valid_descriptor_ids.clear();
5456
valid_descriptor_ids.resize(compute_list.state.set_count);
5457
5458
uint32_t valid_set_count = 0;
5459
uint32_t first_set_index = 0;
5460
uint32_t last_set_index = 0;
5461
bool found_first_set = false;
5462
5463
for (uint32_t i = 0; i < compute_list.state.set_count; i++) {
5464
if (compute_list.state.sets[i].pipeline_expected_format == 0) {
5465
// Nothing expected by this pipeline.
5466
continue;
5467
}
5468
5469
if (!compute_list.state.sets[i].bound && !found_first_set) {
5470
first_set_index = i;
5471
found_first_set = true;
5472
}
5473
5474
// Prepare descriptor sets if the API doesn't use pipeline barriers.
5475
if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
5476
draw_graph.add_compute_list_uniform_set_prepare_for_use(compute_list.state.pipeline_shader_driver_id, compute_list.state.sets[i].uniform_set_driver_id, i);
5477
}
5478
}
5479
5480
// Bind descriptor sets.
5481
for (uint32_t i = first_set_index; i < compute_list.state.set_count; i++) {
5482
if (compute_list.state.sets[i].pipeline_expected_format == 0) {
5483
continue; // Nothing expected by this pipeline.
5484
}
5485
5486
if (!compute_list.state.sets[i].bound) {
5487
// All good, see if this requires re-binding.
5488
if (i - last_set_index > 1) {
5489
// If the descriptor sets are not contiguous, bind the previous ones and start a new batch.
5490
draw_graph.add_compute_list_bind_uniform_sets(compute_list.state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
5491
5492
first_set_index = i;
5493
valid_set_count = 1;
5494
valid_descriptor_ids[0] = compute_list.state.sets[i].uniform_set_driver_id;
5495
} else {
5496
// Otherwise, keep storing in the current batch.
5497
valid_descriptor_ids[valid_set_count] = compute_list.state.sets[i].uniform_set_driver_id;
5498
valid_set_count++;
5499
}
5500
5501
last_set_index = i;
5502
5503
UniformSet *uniform_set = uniform_set_owner.get_or_null(compute_list.state.sets[i].uniform_set);
5504
_uniform_set_update_shared(uniform_set);
5505
5506
draw_graph.add_compute_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
5507
compute_list.state.sets[i].bound = true;
5508
}
5509
}
5510
5511
// Bind the remaining batch.
5512
if (valid_set_count > 0) {
5513
draw_graph.add_compute_list_bind_uniform_sets(compute_list.state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
5514
}
5515
5516
draw_graph.add_compute_list_dispatch_indirect(buffer->driver_id, p_offset);
5517
compute_list.state.dispatch_count++;
5518
5519
if (buffer->draw_tracker != nullptr) {
5520
draw_graph.add_compute_list_usage(buffer->draw_tracker, RDG::RESOURCE_USAGE_INDIRECT_BUFFER_READ);
5521
}
5522
5523
_check_transfer_worker_buffer(buffer);
5524
}
5525
5526
void RenderingDevice::compute_list_add_barrier(ComputeListID p_list) {
5527
ERR_RENDER_THREAD_GUARD();
5528
5529
compute_list_barrier_state = compute_list.state;
5530
compute_list_end();
5531
compute_list_begin();
5532
5533
if (compute_list_barrier_state.pipeline.is_valid()) {
5534
compute_list_bind_compute_pipeline(p_list, compute_list_barrier_state.pipeline);
5535
}
5536
5537
for (uint32_t i = 0; i < compute_list_barrier_state.set_count; i++) {
5538
if (compute_list_barrier_state.sets[i].uniform_set.is_valid()) {
5539
compute_list_bind_uniform_set(p_list, compute_list_barrier_state.sets[i].uniform_set, i);
5540
}
5541
}
5542
5543
if (compute_list_barrier_state.push_constant_size > 0) {
5544
compute_list_set_push_constant(p_list, compute_list_barrier_state.push_constant_data, compute_list_barrier_state.push_constant_size);
5545
}
5546
}
5547
5548
void RenderingDevice::compute_list_end() {
5549
ERR_RENDER_THREAD_GUARD();
5550
5551
ERR_FAIL_COND(!compute_list.active);
5552
5553
draw_graph.add_compute_list_end();
5554
5555
compute_list = ComputeList();
5556
}
5557
5558
#ifndef DISABLE_DEPRECATED
5559
void RenderingDevice::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) {
5560
WARN_PRINT("Deprecated. Barriers are automatically inserted by RenderingDevice.");
5561
}
5562
5563
void RenderingDevice::full_barrier() {
5564
WARN_PRINT("Deprecated. Barriers are automatically inserted by RenderingDevice.");
5565
}
5566
#endif
5567
5568
/*************************/
5569
/**** TRANSFER WORKER ****/
5570
/*************************/
5571
5572
static uint32_t _get_alignment_offset(uint32_t p_offset, uint32_t p_required_align) {
5573
uint32_t alignment_offset = (p_required_align > 0) ? (p_offset % p_required_align) : 0;
5574
if (alignment_offset != 0) {
5575
// If a particular alignment is required, add the offset as part of the required size.
5576
alignment_offset = p_required_align - alignment_offset;
5577
}
5578
5579
return alignment_offset;
5580
}
5581
5582
RenderingDevice::TransferWorker *RenderingDevice::_acquire_transfer_worker(uint32_t p_transfer_size, uint32_t p_required_align, uint32_t &r_staging_offset) {
5583
// Find the first worker that is not currently executing anything and has enough size for the transfer.
5584
// If no workers are available, we make a new one. If we're not allowed to make new ones, we wait until one of them is available.
5585
TransferWorker *transfer_worker = nullptr;
5586
uint32_t available_list_index = 0;
5587
bool transfer_worker_busy = true;
5588
bool transfer_worker_full = true;
5589
{
5590
MutexLock pool_lock(transfer_worker_pool_mutex);
5591
5592
// If no workers are available and we've reached the max pool capacity, wait until one of them becomes available.
5593
bool transfer_worker_pool_full = transfer_worker_pool.size() >= transfer_worker_pool_max_size;
5594
while (transfer_worker_pool_available_list.is_empty() && transfer_worker_pool_full) {
5595
transfer_worker_pool_condition.wait(pool_lock);
5596
}
5597
5598
// Look at all available workers first.
5599
for (uint32_t i = 0; i < transfer_worker_pool_available_list.size(); i++) {
5600
uint32_t worker_index = transfer_worker_pool_available_list[i];
5601
TransferWorker *candidate_worker = transfer_worker_pool[worker_index];
5602
candidate_worker->thread_mutex.lock();
5603
5604
// Figure out if the worker can fit the transfer.
5605
uint32_t alignment_offset = _get_alignment_offset(candidate_worker->staging_buffer_size_in_use, p_required_align);
5606
uint32_t required_size = candidate_worker->staging_buffer_size_in_use + p_transfer_size + alignment_offset;
5607
bool candidate_worker_busy = candidate_worker->submitted;
5608
bool candidate_worker_full = required_size > candidate_worker->staging_buffer_size_allocated;
5609
bool pick_candidate = false;
5610
if (!candidate_worker_busy && !candidate_worker_full) {
5611
// A worker that can fit the transfer and is not waiting for a previous execution is the best possible candidate.
5612
pick_candidate = true;
5613
} else if (!candidate_worker_busy) {
5614
// The worker can't fit the transfer but it's not currently doing anything.
5615
// We pick it as a possible candidate if the current one is busy.
5616
pick_candidate = transfer_worker_busy;
5617
} else if (!candidate_worker_full) {
5618
// The worker can fit the transfer but it's currently executing previous work.
5619
// We pick it as a possible candidate if the current one is both busy and full.
5620
pick_candidate = transfer_worker_busy && transfer_worker_full;
5621
} else if (transfer_worker == nullptr) {
5622
// The worker can't fit the transfer and it's currently executing work, so it's the worst candidate.
5623
// We only pick if no candidate has been picked yet.
5624
pick_candidate = true;
5625
}
5626
5627
if (pick_candidate) {
5628
if (transfer_worker != nullptr) {
5629
// Release the lock for the worker that was picked previously.
5630
transfer_worker->thread_mutex.unlock();
5631
}
5632
5633
// Keep the lock active for this worker.
5634
transfer_worker = candidate_worker;
5635
transfer_worker_busy = candidate_worker_busy;
5636
transfer_worker_full = candidate_worker_full;
5637
available_list_index = i;
5638
5639
if (!transfer_worker_busy && !transfer_worker_full) {
5640
// Best possible candidate, stop searching early.
5641
break;
5642
}
5643
} else {
5644
// Release the lock for the candidate.
5645
candidate_worker->thread_mutex.unlock();
5646
}
5647
}
5648
5649
if (transfer_worker != nullptr) {
5650
// A worker was picked, remove it from the available list.
5651
transfer_worker_pool_available_list.remove_at(available_list_index);
5652
} else {
5653
DEV_ASSERT(!transfer_worker_pool_full && "A transfer worker should never be created when the pool is full.");
5654
5655
// No existing worker was picked, we create a new one.
5656
transfer_worker = memnew(TransferWorker);
5657
transfer_worker->command_fence = driver->fence_create();
5658
transfer_worker->command_pool = driver->command_pool_create(transfer_queue_family, RDD::COMMAND_BUFFER_TYPE_PRIMARY);
5659
transfer_worker->command_buffer = driver->command_buffer_create(transfer_worker->command_pool);
5660
transfer_worker->index = transfer_worker_pool.size();
5661
transfer_worker_pool.push_back(transfer_worker);
5662
transfer_worker_operation_used_by_draw.push_back(0);
5663
transfer_worker->thread_mutex.lock();
5664
}
5665
}
5666
5667
if (transfer_worker->submitted) {
5668
// Wait for the worker if the command buffer was submitted but it hasn't finished processing yet.
5669
_wait_for_transfer_worker(transfer_worker);
5670
}
5671
5672
uint32_t alignment_offset = _get_alignment_offset(transfer_worker->staging_buffer_size_in_use, p_required_align);
5673
transfer_worker->max_transfer_size = MAX(transfer_worker->max_transfer_size, p_transfer_size);
5674
5675
uint32_t required_size = transfer_worker->staging_buffer_size_in_use + p_transfer_size + alignment_offset;
5676
if (required_size > transfer_worker->staging_buffer_size_allocated) {
5677
// If there's not enough bytes to use on the staging buffer, we submit everything pending from the worker and wait for the work to be finished.
5678
if (transfer_worker->recording) {
5679
_end_transfer_worker(transfer_worker);
5680
_submit_transfer_worker(transfer_worker);
5681
}
5682
5683
if (transfer_worker->submitted) {
5684
_wait_for_transfer_worker(transfer_worker);
5685
}
5686
5687
alignment_offset = 0;
5688
5689
// If the staging buffer can't fit the transfer, we recreate the buffer.
5690
const uint32_t expected_buffer_size_minimum = 16 * 1024;
5691
uint32_t expected_buffer_size = MAX(transfer_worker->max_transfer_size, expected_buffer_size_minimum);
5692
if (expected_buffer_size > transfer_worker->staging_buffer_size_allocated) {
5693
if (transfer_worker->staging_buffer.id != 0) {
5694
driver->buffer_free(transfer_worker->staging_buffer);
5695
}
5696
5697
uint32_t new_staging_buffer_size = next_power_of_2(expected_buffer_size);
5698
transfer_worker->staging_buffer_size_allocated = new_staging_buffer_size;
5699
transfer_worker->staging_buffer = driver->buffer_create(new_staging_buffer_size, RDD::BUFFER_USAGE_TRANSFER_FROM_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
5700
}
5701
}
5702
5703
// Add the alignment before storing the offset that will be returned.
5704
transfer_worker->staging_buffer_size_in_use += alignment_offset;
5705
5706
// Store the offset to return and increment the current size.
5707
r_staging_offset = transfer_worker->staging_buffer_size_in_use;
5708
transfer_worker->staging_buffer_size_in_use += p_transfer_size;
5709
5710
if (!transfer_worker->recording) {
5711
// Begin the command buffer if the worker wasn't recording yet.
5712
driver->command_buffer_begin(transfer_worker->command_buffer);
5713
transfer_worker->recording = true;
5714
}
5715
5716
return transfer_worker;
5717
}
5718
5719
void RenderingDevice::_release_transfer_worker(TransferWorker *p_transfer_worker) {
5720
p_transfer_worker->thread_mutex.unlock();
5721
5722
transfer_worker_pool_mutex.lock();
5723
transfer_worker_pool_available_list.push_back(p_transfer_worker->index);
5724
transfer_worker_pool_mutex.unlock();
5725
transfer_worker_pool_condition.notify_one();
5726
}
5727
5728
void RenderingDevice::_end_transfer_worker(TransferWorker *p_transfer_worker) {
5729
driver->command_buffer_end(p_transfer_worker->command_buffer);
5730
p_transfer_worker->recording = false;
5731
}
5732
5733
void RenderingDevice::_submit_transfer_worker(TransferWorker *p_transfer_worker, VectorView<RDD::SemaphoreID> p_signal_semaphores) {
5734
driver->command_queue_execute_and_present(transfer_queue, {}, p_transfer_worker->command_buffer, p_signal_semaphores, p_transfer_worker->command_fence, {});
5735
5736
for (uint32_t i = 0; i < p_signal_semaphores.size(); i++) {
5737
// Indicate the frame should wait on these semaphores before executing the main command buffer.
5738
frames[frame].semaphores_to_wait_on.push_back(p_signal_semaphores[i]);
5739
}
5740
5741
p_transfer_worker->submitted = true;
5742
5743
{
5744
MutexLock lock(p_transfer_worker->operations_mutex);
5745
p_transfer_worker->operations_submitted = p_transfer_worker->operations_counter;
5746
}
5747
}
5748
5749
void RenderingDevice::_wait_for_transfer_worker(TransferWorker *p_transfer_worker) {
5750
driver->fence_wait(p_transfer_worker->command_fence);
5751
driver->command_pool_reset(p_transfer_worker->command_pool);
5752
p_transfer_worker->staging_buffer_size_in_use = 0;
5753
p_transfer_worker->submitted = false;
5754
5755
{
5756
MutexLock lock(p_transfer_worker->operations_mutex);
5757
p_transfer_worker->operations_processed = p_transfer_worker->operations_submitted;
5758
}
5759
5760
_flush_barriers_for_transfer_worker(p_transfer_worker);
5761
}
5762
5763
void RenderingDevice::_flush_barriers_for_transfer_worker(TransferWorker *p_transfer_worker) {
5764
// Caller must have already acquired the mutex for the worker.
5765
if (!p_transfer_worker->texture_barriers.is_empty()) {
5766
MutexLock transfer_worker_lock(transfer_worker_pool_texture_barriers_mutex);
5767
for (uint32_t i = 0; i < p_transfer_worker->texture_barriers.size(); i++) {
5768
transfer_worker_pool_texture_barriers.push_back(p_transfer_worker->texture_barriers[i]);
5769
}
5770
5771
p_transfer_worker->texture_barriers.clear();
5772
}
5773
}
5774
5775
void RenderingDevice::_check_transfer_worker_operation(uint32_t p_transfer_worker_index, uint64_t p_transfer_worker_operation) {
5776
TransferWorker *transfer_worker = transfer_worker_pool[p_transfer_worker_index];
5777
MutexLock lock(transfer_worker->operations_mutex);
5778
uint64_t &dst_operation = transfer_worker_operation_used_by_draw[transfer_worker->index];
5779
dst_operation = MAX(dst_operation, p_transfer_worker_operation);
5780
}
5781
5782
void RenderingDevice::_check_transfer_worker_buffer(Buffer *p_buffer) {
5783
if (p_buffer->transfer_worker_index >= 0) {
5784
_check_transfer_worker_operation(p_buffer->transfer_worker_index, p_buffer->transfer_worker_operation);
5785
p_buffer->transfer_worker_index = -1;
5786
}
5787
}
5788
5789
void RenderingDevice::_check_transfer_worker_texture(Texture *p_texture) {
5790
if (p_texture->transfer_worker_index >= 0) {
5791
_check_transfer_worker_operation(p_texture->transfer_worker_index, p_texture->transfer_worker_operation);
5792
p_texture->transfer_worker_index = -1;
5793
}
5794
}
5795
5796
void RenderingDevice::_check_transfer_worker_vertex_array(VertexArray *p_vertex_array) {
5797
if (!p_vertex_array->transfer_worker_indices.is_empty()) {
5798
for (int i = 0; i < p_vertex_array->transfer_worker_indices.size(); i++) {
5799
_check_transfer_worker_operation(p_vertex_array->transfer_worker_indices[i], p_vertex_array->transfer_worker_operations[i]);
5800
}
5801
5802
p_vertex_array->transfer_worker_indices.clear();
5803
p_vertex_array->transfer_worker_operations.clear();
5804
}
5805
}
5806
5807
void RenderingDevice::_check_transfer_worker_index_array(IndexArray *p_index_array) {
5808
if (p_index_array->transfer_worker_index >= 0) {
5809
_check_transfer_worker_operation(p_index_array->transfer_worker_index, p_index_array->transfer_worker_operation);
5810
p_index_array->transfer_worker_index = -1;
5811
}
5812
}
5813
5814
void RenderingDevice::_submit_transfer_workers(RDD::CommandBufferID p_draw_command_buffer) {
5815
MutexLock transfer_worker_lock(transfer_worker_pool_mutex);
5816
for (uint32_t i = 0; i < transfer_worker_pool.size(); i++) {
5817
TransferWorker *worker = transfer_worker_pool[i];
5818
if (p_draw_command_buffer) {
5819
MutexLock lock(worker->operations_mutex);
5820
if (worker->operations_processed >= transfer_worker_operation_used_by_draw[worker->index]) {
5821
// The operation used by the draw has already been processed, we don't need to wait on the worker.
5822
continue;
5823
}
5824
}
5825
5826
{
5827
MutexLock lock(worker->thread_mutex);
5828
if (worker->recording) {
5829
VectorView<RDD::SemaphoreID> semaphores = p_draw_command_buffer ? frames[frame].transfer_worker_semaphores[i] : VectorView<RDD::SemaphoreID>();
5830
_end_transfer_worker(worker);
5831
_submit_transfer_worker(worker, semaphores);
5832
}
5833
5834
if (p_draw_command_buffer) {
5835
_flush_barriers_for_transfer_worker(worker);
5836
}
5837
}
5838
}
5839
}
5840
5841
void RenderingDevice::_submit_transfer_barriers(RDD::CommandBufferID p_draw_command_buffer) {
5842
MutexLock transfer_worker_lock(transfer_worker_pool_texture_barriers_mutex);
5843
if (!transfer_worker_pool_texture_barriers.is_empty()) {
5844
driver->command_pipeline_barrier(p_draw_command_buffer, RDD::PIPELINE_STAGE_COPY_BIT, RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT, {}, {}, transfer_worker_pool_texture_barriers);
5845
transfer_worker_pool_texture_barriers.clear();
5846
}
5847
}
5848
5849
void RenderingDevice::_wait_for_transfer_workers() {
5850
MutexLock transfer_worker_lock(transfer_worker_pool_mutex);
5851
for (TransferWorker *worker : transfer_worker_pool) {
5852
MutexLock lock(worker->thread_mutex);
5853
if (worker->submitted) {
5854
_wait_for_transfer_worker(worker);
5855
}
5856
}
5857
}
5858
5859
void RenderingDevice::_free_transfer_workers() {
5860
MutexLock transfer_worker_lock(transfer_worker_pool_mutex);
5861
for (TransferWorker *worker : transfer_worker_pool) {
5862
driver->fence_free(worker->command_fence);
5863
driver->buffer_free(worker->staging_buffer);
5864
driver->command_pool_free(worker->command_pool);
5865
memdelete(worker);
5866
}
5867
5868
transfer_worker_pool.clear();
5869
}
5870
5871
/***********************/
5872
/**** COMMAND GRAPH ****/
5873
/***********************/
5874
5875
bool RenderingDevice::_texture_make_mutable(Texture *p_texture, RID p_texture_id) {
5876
if (p_texture->draw_tracker != nullptr) {
5877
// Texture already has a tracker.
5878
return false;
5879
} else {
5880
if (p_texture->owner.is_valid()) {
5881
// Texture has an owner.
5882
Texture *owner_texture = texture_owner.get_or_null(p_texture->owner);
5883
ERR_FAIL_NULL_V(owner_texture, false);
5884
5885
if (owner_texture->draw_tracker != nullptr) {
5886
// Create a tracker for this dependency in particular.
5887
if (p_texture->slice_type == TEXTURE_SLICE_MAX) {
5888
// Shared texture.
5889
p_texture->draw_tracker = owner_texture->draw_tracker;
5890
p_texture->draw_tracker->reference_count++;
5891
} else {
5892
// Slice texture.
5893
if (owner_texture->slice_trackers == nullptr) {
5894
owner_texture->slice_trackers = memnew((HashMap<Rect2i, RDG::ResourceTracker *>));
5895
}
5896
HashMap<Rect2i, RDG::ResourceTracker *>::ConstIterator draw_tracker_iterator = owner_texture->slice_trackers->find(p_texture->slice_rect);
5897
RDG::ResourceTracker *draw_tracker = nullptr;
5898
if (draw_tracker_iterator != owner_texture->slice_trackers->end()) {
5899
// Reuse the tracker at the matching rectangle.
5900
draw_tracker = draw_tracker_iterator->value;
5901
} else {
5902
// Create a new tracker and store it on the map.
5903
draw_tracker = RDG::resource_tracker_create();
5904
draw_tracker->parent = owner_texture->draw_tracker;
5905
draw_tracker->texture_driver_id = p_texture->driver_id;
5906
draw_tracker->texture_size = Size2i(p_texture->width, p_texture->height);
5907
draw_tracker->texture_subresources = p_texture->barrier_range();
5908
draw_tracker->texture_usage = p_texture->usage_flags;
5909
draw_tracker->texture_slice_or_dirty_rect = p_texture->slice_rect;
5910
(*owner_texture->slice_trackers)[p_texture->slice_rect] = draw_tracker;
5911
}
5912
5913
p_texture->draw_tracker = draw_tracker;
5914
p_texture->draw_tracker->reference_count++;
5915
}
5916
5917
if (p_texture_id.is_valid()) {
5918
_dependencies_make_mutable(p_texture_id, p_texture->draw_tracker);
5919
}
5920
} else {
5921
// Delegate this to the owner instead, as it'll make all its dependencies mutable.
5922
_texture_make_mutable(owner_texture, p_texture->owner);
5923
}
5924
} else {
5925
// Regular texture.
5926
p_texture->draw_tracker = RDG::resource_tracker_create();
5927
p_texture->draw_tracker->texture_driver_id = p_texture->driver_id;
5928
p_texture->draw_tracker->texture_size = Size2i(p_texture->width, p_texture->height);
5929
p_texture->draw_tracker->texture_subresources = p_texture->barrier_range();
5930
p_texture->draw_tracker->texture_usage = p_texture->usage_flags;
5931
p_texture->draw_tracker->is_discardable = p_texture->is_discardable;
5932
p_texture->draw_tracker->reference_count = 1;
5933
5934
if (p_texture_id.is_valid()) {
5935
if (p_texture->has_initial_data) {
5936
// If the texture was initialized with initial data but wasn't made mutable from the start, assume the texture sampling usage.
5937
p_texture->draw_tracker->usage = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
5938
}
5939
5940
_dependencies_make_mutable(p_texture_id, p_texture->draw_tracker);
5941
}
5942
}
5943
5944
return true;
5945
}
5946
}
5947
5948
bool RenderingDevice::_buffer_make_mutable(Buffer *p_buffer, RID p_buffer_id) {
5949
if (p_buffer->draw_tracker != nullptr) {
5950
// Buffer already has a tracker.
5951
return false;
5952
} else {
5953
// Create a tracker for the buffer and make all its dependencies mutable.
5954
p_buffer->draw_tracker = RDG::resource_tracker_create();
5955
p_buffer->draw_tracker->buffer_driver_id = p_buffer->driver_id;
5956
if (p_buffer_id.is_valid()) {
5957
_dependencies_make_mutable(p_buffer_id, p_buffer->draw_tracker);
5958
}
5959
5960
return true;
5961
}
5962
}
5963
5964
bool RenderingDevice::_vertex_array_make_mutable(VertexArray *p_vertex_array, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
5965
if (!p_vertex_array->untracked_buffers.has(p_resource_id)) {
5966
// Vertex array thinks the buffer is already tracked or does not use it.
5967
return false;
5968
} else {
5969
// Vertex array is aware of the buffer but it isn't being tracked.
5970
p_vertex_array->draw_trackers.push_back(p_resource_tracker);
5971
p_vertex_array->untracked_buffers.erase(p_resource_id);
5972
return true;
5973
}
5974
}
5975
5976
bool RenderingDevice::_index_array_make_mutable(IndexArray *p_index_array, RDG::ResourceTracker *p_resource_tracker) {
5977
if (p_index_array->draw_tracker != nullptr) {
5978
// Index array already has a tracker.
5979
return false;
5980
} else {
5981
// Index array should assign the tracker from the buffer.
5982
p_index_array->draw_tracker = p_resource_tracker;
5983
return true;
5984
}
5985
}
5986
5987
bool RenderingDevice::_uniform_set_make_mutable(UniformSet *p_uniform_set, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
5988
HashMap<RID, RDG::ResourceUsage>::Iterator E = p_uniform_set->untracked_usage.find(p_resource_id);
5989
if (!E) {
5990
// Uniform set thinks the resource is already tracked or does not use it.
5991
return false;
5992
} else {
5993
// Uniform set has seen the resource but hasn't added its tracker yet.
5994
p_uniform_set->draw_trackers.push_back(p_resource_tracker);
5995
p_uniform_set->draw_trackers_usage.push_back(E->value);
5996
p_uniform_set->untracked_usage.remove(E);
5997
return true;
5998
}
5999
}
6000
6001
bool RenderingDevice::_dependency_make_mutable(RID p_id, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
6002
if (texture_owner.owns(p_id)) {
6003
Texture *texture = texture_owner.get_or_null(p_id);
6004
return _texture_make_mutable(texture, p_id);
6005
} else if (vertex_array_owner.owns(p_id)) {
6006
VertexArray *vertex_array = vertex_array_owner.get_or_null(p_id);
6007
return _vertex_array_make_mutable(vertex_array, p_resource_id, p_resource_tracker);
6008
} else if (index_array_owner.owns(p_id)) {
6009
IndexArray *index_array = index_array_owner.get_or_null(p_id);
6010
return _index_array_make_mutable(index_array, p_resource_tracker);
6011
} else if (uniform_set_owner.owns(p_id)) {
6012
UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
6013
return _uniform_set_make_mutable(uniform_set, p_resource_id, p_resource_tracker);
6014
} else {
6015
DEV_ASSERT(false && "Unknown resource type to make mutable.");
6016
return false;
6017
}
6018
}
6019
6020
bool RenderingDevice::_dependencies_make_mutable_recursive(RID p_id, RDG::ResourceTracker *p_resource_tracker) {
6021
bool made_mutable = false;
6022
HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id);
6023
if (E) {
6024
for (RID rid : E->value) {
6025
made_mutable = _dependency_make_mutable(rid, p_id, p_resource_tracker) || made_mutable;
6026
}
6027
}
6028
6029
return made_mutable;
6030
}
6031
6032
bool RenderingDevice::_dependencies_make_mutable(RID p_id, RDG::ResourceTracker *p_resource_tracker) {
6033
_THREAD_SAFE_METHOD_
6034
return _dependencies_make_mutable_recursive(p_id, p_resource_tracker);
6035
}
6036
6037
/**************************/
6038
/**** FRAME MANAGEMENT ****/
6039
/**************************/
6040
6041
void RenderingDevice::free_rid(RID p_rid) {
6042
ERR_RENDER_THREAD_GUARD();
6043
6044
_free_dependencies(p_rid); // Recursively erase dependencies first, to avoid potential API problems.
6045
_free_internal(p_rid);
6046
}
6047
6048
void RenderingDevice::_free_internal(RID p_id) {
6049
#ifdef DEV_ENABLED
6050
String resource_name;
6051
if (resource_names.has(p_id)) {
6052
resource_name = resource_names[p_id];
6053
resource_names.erase(p_id);
6054
}
6055
#endif
6056
6057
// Push everything so it's disposed of next time this frame index is processed (means, it's safe to do it).
6058
if (texture_owner.owns(p_id)) {
6059
Texture *texture = texture_owner.get_or_null(p_id);
6060
_check_transfer_worker_texture(texture);
6061
6062
RDG::ResourceTracker *draw_tracker = texture->draw_tracker;
6063
if (draw_tracker != nullptr) {
6064
draw_tracker->reference_count--;
6065
if (draw_tracker->reference_count == 0) {
6066
RDG::resource_tracker_free(draw_tracker);
6067
6068
if (texture->owner.is_valid() && (texture->slice_type != TEXTURE_SLICE_MAX)) {
6069
// If this was a texture slice, erase the tracker from the map.
6070
Texture *owner_texture = texture_owner.get_or_null(texture->owner);
6071
if (owner_texture != nullptr && owner_texture->slice_trackers != nullptr) {
6072
owner_texture->slice_trackers->erase(texture->slice_rect);
6073
6074
if (owner_texture->slice_trackers->is_empty()) {
6075
memdelete(owner_texture->slice_trackers);
6076
owner_texture->slice_trackers = nullptr;
6077
}
6078
}
6079
}
6080
}
6081
}
6082
6083
frames[frame].textures_to_dispose_of.push_back(*texture);
6084
texture_owner.free(p_id);
6085
} else if (framebuffer_owner.owns(p_id)) {
6086
Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id);
6087
frames[frame].framebuffers_to_dispose_of.push_back(*framebuffer);
6088
6089
if (framebuffer->invalidated_callback != nullptr) {
6090
framebuffer->invalidated_callback(framebuffer->invalidated_callback_userdata);
6091
}
6092
6093
framebuffer_owner.free(p_id);
6094
} else if (sampler_owner.owns(p_id)) {
6095
RDD::SamplerID sampler_driver_id = *sampler_owner.get_or_null(p_id);
6096
frames[frame].samplers_to_dispose_of.push_back(sampler_driver_id);
6097
sampler_owner.free(p_id);
6098
} else if (vertex_buffer_owner.owns(p_id)) {
6099
Buffer *vertex_buffer = vertex_buffer_owner.get_or_null(p_id);
6100
_check_transfer_worker_buffer(vertex_buffer);
6101
6102
RDG::resource_tracker_free(vertex_buffer->draw_tracker);
6103
frames[frame].buffers_to_dispose_of.push_back(*vertex_buffer);
6104
vertex_buffer_owner.free(p_id);
6105
} else if (vertex_array_owner.owns(p_id)) {
6106
vertex_array_owner.free(p_id);
6107
} else if (index_buffer_owner.owns(p_id)) {
6108
IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_id);
6109
_check_transfer_worker_buffer(index_buffer);
6110
6111
RDG::resource_tracker_free(index_buffer->draw_tracker);
6112
frames[frame].buffers_to_dispose_of.push_back(*index_buffer);
6113
index_buffer_owner.free(p_id);
6114
} else if (index_array_owner.owns(p_id)) {
6115
index_array_owner.free(p_id);
6116
} else if (shader_owner.owns(p_id)) {
6117
Shader *shader = shader_owner.get_or_null(p_id);
6118
if (shader->driver_id) { // Not placeholder?
6119
frames[frame].shaders_to_dispose_of.push_back(*shader);
6120
}
6121
shader_owner.free(p_id);
6122
} else if (uniform_buffer_owner.owns(p_id)) {
6123
Buffer *uniform_buffer = uniform_buffer_owner.get_or_null(p_id);
6124
_check_transfer_worker_buffer(uniform_buffer);
6125
6126
RDG::resource_tracker_free(uniform_buffer->draw_tracker);
6127
frames[frame].buffers_to_dispose_of.push_back(*uniform_buffer);
6128
uniform_buffer_owner.free(p_id);
6129
} else if (texture_buffer_owner.owns(p_id)) {
6130
Buffer *texture_buffer = texture_buffer_owner.get_or_null(p_id);
6131
_check_transfer_worker_buffer(texture_buffer);
6132
6133
RDG::resource_tracker_free(texture_buffer->draw_tracker);
6134
frames[frame].buffers_to_dispose_of.push_back(*texture_buffer);
6135
texture_buffer_owner.free(p_id);
6136
} else if (storage_buffer_owner.owns(p_id)) {
6137
Buffer *storage_buffer = storage_buffer_owner.get_or_null(p_id);
6138
_check_transfer_worker_buffer(storage_buffer);
6139
6140
RDG::resource_tracker_free(storage_buffer->draw_tracker);
6141
frames[frame].buffers_to_dispose_of.push_back(*storage_buffer);
6142
storage_buffer_owner.free(p_id);
6143
} else if (uniform_set_owner.owns(p_id)) {
6144
UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
6145
frames[frame].uniform_sets_to_dispose_of.push_back(*uniform_set);
6146
uniform_set_owner.free(p_id);
6147
6148
if (uniform_set->invalidated_callback != nullptr) {
6149
uniform_set->invalidated_callback(uniform_set->invalidated_callback_userdata);
6150
}
6151
} else if (render_pipeline_owner.owns(p_id)) {
6152
RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_id);
6153
frames[frame].render_pipelines_to_dispose_of.push_back(*pipeline);
6154
render_pipeline_owner.free(p_id);
6155
} else if (compute_pipeline_owner.owns(p_id)) {
6156
ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_id);
6157
frames[frame].compute_pipelines_to_dispose_of.push_back(*pipeline);
6158
compute_pipeline_owner.free(p_id);
6159
} else {
6160
#ifdef DEV_ENABLED
6161
ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()) + " " + resource_name);
6162
#else
6163
ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()));
6164
#endif
6165
}
6166
6167
frames_pending_resources_for_processing = uint32_t(frames.size());
6168
}
6169
6170
// The full list of resources that can be named is in the VkObjectType enum.
6171
// We just expose the resources that are owned and can be accessed easily.
6172
void RenderingDevice::set_resource_name(RID p_id, const String &p_name) {
6173
_THREAD_SAFE_METHOD_
6174
6175
if (texture_owner.owns(p_id)) {
6176
Texture *texture = texture_owner.get_or_null(p_id);
6177
driver->set_object_name(RDD::OBJECT_TYPE_TEXTURE, texture->driver_id, p_name);
6178
} else if (framebuffer_owner.owns(p_id)) {
6179
//Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id);
6180
// Not implemented for now as the relationship between Framebuffer and RenderPass is very complex.
6181
} else if (sampler_owner.owns(p_id)) {
6182
RDD::SamplerID sampler_driver_id = *sampler_owner.get_or_null(p_id);
6183
driver->set_object_name(RDD::OBJECT_TYPE_SAMPLER, sampler_driver_id, p_name);
6184
} else if (vertex_buffer_owner.owns(p_id)) {
6185
Buffer *vertex_buffer = vertex_buffer_owner.get_or_null(p_id);
6186
driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, vertex_buffer->driver_id, p_name);
6187
} else if (index_buffer_owner.owns(p_id)) {
6188
IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_id);
6189
driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, index_buffer->driver_id, p_name);
6190
} else if (shader_owner.owns(p_id)) {
6191
Shader *shader = shader_owner.get_or_null(p_id);
6192
driver->set_object_name(RDD::OBJECT_TYPE_SHADER, shader->driver_id, p_name);
6193
} else if (uniform_buffer_owner.owns(p_id)) {
6194
Buffer *uniform_buffer = uniform_buffer_owner.get_or_null(p_id);
6195
driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, uniform_buffer->driver_id, p_name);
6196
} else if (texture_buffer_owner.owns(p_id)) {
6197
Buffer *texture_buffer = texture_buffer_owner.get_or_null(p_id);
6198
driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, texture_buffer->driver_id, p_name);
6199
} else if (storage_buffer_owner.owns(p_id)) {
6200
Buffer *storage_buffer = storage_buffer_owner.get_or_null(p_id);
6201
driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, storage_buffer->driver_id, p_name);
6202
} else if (uniform_set_owner.owns(p_id)) {
6203
UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
6204
driver->set_object_name(RDD::OBJECT_TYPE_UNIFORM_SET, uniform_set->driver_id, p_name);
6205
} else if (render_pipeline_owner.owns(p_id)) {
6206
RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_id);
6207
driver->set_object_name(RDD::OBJECT_TYPE_PIPELINE, pipeline->driver_id, p_name);
6208
} else if (compute_pipeline_owner.owns(p_id)) {
6209
ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_id);
6210
driver->set_object_name(RDD::OBJECT_TYPE_PIPELINE, pipeline->driver_id, p_name);
6211
} else {
6212
ERR_PRINT("Attempted to name invalid ID: " + itos(p_id.get_id()));
6213
return;
6214
}
6215
#ifdef DEV_ENABLED
6216
resource_names[p_id] = p_name;
6217
#endif
6218
}
6219
6220
void RenderingDevice::_draw_command_begin_label(String p_label_name, const Color &p_color) {
6221
draw_command_begin_label(p_label_name.utf8().span(), p_color);
6222
}
6223
6224
void RenderingDevice::draw_command_begin_label(const Span<char> p_label_name, const Color &p_color) {
6225
ERR_RENDER_THREAD_GUARD();
6226
6227
if (!context->is_debug_utils_enabled()) {
6228
return;
6229
}
6230
6231
draw_graph.begin_label(p_label_name, p_color);
6232
}
6233
6234
#ifndef DISABLE_DEPRECATED
6235
void RenderingDevice::draw_command_insert_label(String p_label_name, const Color &p_color) {
6236
WARN_PRINT("Deprecated. Inserting labels no longer applies due to command reordering.");
6237
}
6238
#endif
6239
6240
void RenderingDevice::draw_command_end_label() {
6241
ERR_RENDER_THREAD_GUARD();
6242
6243
draw_graph.end_label();
6244
}
6245
6246
String RenderingDevice::get_device_vendor_name() const {
6247
return _get_device_vendor_name(device);
6248
}
6249
6250
String RenderingDevice::get_device_name() const {
6251
return device.name;
6252
}
6253
6254
RenderingDevice::DeviceType RenderingDevice::get_device_type() const {
6255
return DeviceType(device.type);
6256
}
6257
6258
String RenderingDevice::get_device_api_name() const {
6259
return driver->get_api_name();
6260
}
6261
6262
bool RenderingDevice::is_composite_alpha_supported() const {
6263
return driver->is_composite_alpha_supported(main_queue);
6264
}
6265
6266
String RenderingDevice::get_device_api_version() const {
6267
return driver->get_api_version();
6268
}
6269
6270
String RenderingDevice::get_device_pipeline_cache_uuid() const {
6271
return driver->get_pipeline_cache_uuid();
6272
}
6273
6274
void RenderingDevice::swap_buffers(bool p_present) {
6275
ERR_RENDER_THREAD_GUARD();
6276
6277
_end_frame();
6278
_execute_frame(p_present);
6279
6280
// Advance to the next frame and begin recording again.
6281
frame = (frame + 1) % frames.size();
6282
6283
_begin_frame(true);
6284
}
6285
6286
void RenderingDevice::submit() {
6287
ERR_RENDER_THREAD_GUARD();
6288
ERR_FAIL_COND_MSG(is_main_instance, "Only local devices can submit and sync.");
6289
ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done.");
6290
6291
_end_frame();
6292
_execute_frame(false);
6293
local_device_processing = true;
6294
}
6295
6296
void RenderingDevice::sync() {
6297
ERR_RENDER_THREAD_GUARD();
6298
ERR_FAIL_COND_MSG(is_main_instance, "Only local devices can submit and sync.");
6299
ERR_FAIL_COND_MSG(!local_device_processing, "sync can only be called after a submit");
6300
6301
_begin_frame(true);
6302
local_device_processing = false;
6303
}
6304
6305
void RenderingDevice::_free_pending_resources(int p_frame) {
6306
// Free in dependency usage order, so nothing weird happens.
6307
// Pipelines.
6308
while (frames[p_frame].render_pipelines_to_dispose_of.front()) {
6309
RenderPipeline *pipeline = &frames[p_frame].render_pipelines_to_dispose_of.front()->get();
6310
6311
driver->pipeline_free(pipeline->driver_id);
6312
6313
frames[p_frame].render_pipelines_to_dispose_of.pop_front();
6314
}
6315
6316
while (frames[p_frame].compute_pipelines_to_dispose_of.front()) {
6317
ComputePipeline *pipeline = &frames[p_frame].compute_pipelines_to_dispose_of.front()->get();
6318
6319
driver->pipeline_free(pipeline->driver_id);
6320
6321
frames[p_frame].compute_pipelines_to_dispose_of.pop_front();
6322
}
6323
6324
// Uniform sets.
6325
while (frames[p_frame].uniform_sets_to_dispose_of.front()) {
6326
UniformSet *uniform_set = &frames[p_frame].uniform_sets_to_dispose_of.front()->get();
6327
6328
driver->uniform_set_free(uniform_set->driver_id);
6329
6330
frames[p_frame].uniform_sets_to_dispose_of.pop_front();
6331
}
6332
6333
// Shaders.
6334
while (frames[p_frame].shaders_to_dispose_of.front()) {
6335
Shader *shader = &frames[p_frame].shaders_to_dispose_of.front()->get();
6336
6337
driver->shader_free(shader->driver_id);
6338
6339
frames[p_frame].shaders_to_dispose_of.pop_front();
6340
}
6341
6342
// Samplers.
6343
while (frames[p_frame].samplers_to_dispose_of.front()) {
6344
RDD::SamplerID sampler = frames[p_frame].samplers_to_dispose_of.front()->get();
6345
6346
driver->sampler_free(sampler);
6347
6348
frames[p_frame].samplers_to_dispose_of.pop_front();
6349
}
6350
6351
// Framebuffers.
6352
while (frames[p_frame].framebuffers_to_dispose_of.front()) {
6353
Framebuffer *framebuffer = &frames[p_frame].framebuffers_to_dispose_of.front()->get();
6354
draw_graph.framebuffer_cache_free(driver, framebuffer->framebuffer_cache);
6355
frames[p_frame].framebuffers_to_dispose_of.pop_front();
6356
}
6357
6358
// Textures.
6359
while (frames[p_frame].textures_to_dispose_of.front()) {
6360
Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get();
6361
if (texture->bound) {
6362
WARN_PRINT("Deleted a texture while it was bound.");
6363
}
6364
6365
_texture_free_shared_fallback(texture);
6366
6367
texture_memory -= driver->texture_get_allocation_size(texture->driver_id);
6368
driver->texture_free(texture->driver_id);
6369
6370
frames[p_frame].textures_to_dispose_of.pop_front();
6371
}
6372
6373
// Buffers.
6374
while (frames[p_frame].buffers_to_dispose_of.front()) {
6375
Buffer &buffer = frames[p_frame].buffers_to_dispose_of.front()->get();
6376
driver->buffer_free(buffer.driver_id);
6377
buffer_memory -= buffer.size;
6378
6379
frames[p_frame].buffers_to_dispose_of.pop_front();
6380
}
6381
6382
if (frames_pending_resources_for_processing > 0u) {
6383
--frames_pending_resources_for_processing;
6384
}
6385
}
6386
6387
uint32_t RenderingDevice::get_frame_delay() const {
6388
return frames.size();
6389
}
6390
6391
uint64_t RenderingDevice::get_memory_usage(MemoryType p_type) const {
6392
switch (p_type) {
6393
case MEMORY_BUFFERS: {
6394
return buffer_memory;
6395
}
6396
case MEMORY_TEXTURES: {
6397
return texture_memory;
6398
}
6399
case MEMORY_TOTAL: {
6400
return driver->get_total_memory_used();
6401
}
6402
default: {
6403
DEV_ASSERT(false);
6404
return 0;
6405
}
6406
}
6407
}
6408
6409
void RenderingDevice::_begin_frame(bool p_presented) {
6410
// Before writing to this frame, wait for it to be finished.
6411
_stall_for_frame(frame);
6412
6413
if (command_pool_reset_enabled) {
6414
bool reset = driver->command_pool_reset(frames[frame].command_pool);
6415
ERR_FAIL_COND(!reset);
6416
}
6417
6418
if (p_presented) {
6419
update_perf_report();
6420
driver->linear_uniform_set_pools_reset(frame);
6421
}
6422
6423
// Begin recording on the frame's command buffers.
6424
driver->begin_segment(frame, frames_drawn++);
6425
driver->command_buffer_begin(frames[frame].command_buffer);
6426
6427
// Reset the graph.
6428
draw_graph.begin();
6429
6430
// Erase pending resources.
6431
_free_pending_resources(frame);
6432
6433
// Advance staging buffers if used.
6434
if (upload_staging_buffers.used) {
6435
upload_staging_buffers.current = (upload_staging_buffers.current + 1) % upload_staging_buffers.blocks.size();
6436
upload_staging_buffers.used = false;
6437
}
6438
6439
if (download_staging_buffers.used) {
6440
download_staging_buffers.current = (download_staging_buffers.current + 1) % download_staging_buffers.blocks.size();
6441
download_staging_buffers.used = false;
6442
}
6443
6444
if (frames[frame].timestamp_count) {
6445
driver->timestamp_query_pool_get_results(frames[frame].timestamp_pool, frames[frame].timestamp_count, frames[frame].timestamp_result_values.ptr());
6446
driver->command_timestamp_query_pool_reset(frames[frame].command_buffer, frames[frame].timestamp_pool, frames[frame].timestamp_count);
6447
SWAP(frames[frame].timestamp_names, frames[frame].timestamp_result_names);
6448
SWAP(frames[frame].timestamp_cpu_values, frames[frame].timestamp_cpu_result_values);
6449
}
6450
6451
frames[frame].timestamp_result_count = frames[frame].timestamp_count;
6452
frames[frame].timestamp_count = 0;
6453
frames[frame].index = Engine::get_singleton()->get_frames_drawn();
6454
}
6455
6456
void RenderingDevice::_end_frame() {
6457
if (draw_list.active) {
6458
ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
6459
}
6460
6461
if (compute_list.active) {
6462
ERR_PRINT("Found open compute list at the end of the frame, this should never happen (further compute will likely not work).");
6463
}
6464
6465
// The command buffer must be copied into a stack variable as the driver workarounds can change the command buffer in use.
6466
RDD::CommandBufferID command_buffer = frames[frame].command_buffer;
6467
_submit_transfer_workers(command_buffer);
6468
_submit_transfer_barriers(command_buffer);
6469
6470
draw_graph.end(RENDER_GRAPH_REORDER, RENDER_GRAPH_FULL_BARRIERS, command_buffer, frames[frame].command_buffer_pool);
6471
driver->command_buffer_end(command_buffer);
6472
driver->end_segment();
6473
}
6474
6475
void RenderingDevice::execute_chained_cmds(bool p_present_swap_chain, RenderingDeviceDriver::FenceID p_draw_fence,
6476
RenderingDeviceDriver::SemaphoreID p_dst_draw_semaphore_to_signal) {
6477
// Execute command buffers and use semaphores to wait on the execution of the previous one.
6478
// Normally there's only one command buffer, but driver workarounds can force situations where
6479
// there'll be more.
6480
uint32_t command_buffer_count = 1;
6481
RDG::CommandBufferPool &buffer_pool = frames[frame].command_buffer_pool;
6482
if (buffer_pool.buffers_used > 0) {
6483
command_buffer_count += buffer_pool.buffers_used;
6484
buffer_pool.buffers_used = 0;
6485
}
6486
6487
thread_local LocalVector<RDD::SwapChainID> swap_chains;
6488
swap_chains.clear();
6489
6490
// Instead of having just one command; we have potentially many (which had to be split due to an
6491
// Adreno workaround on mobile, only if the workaround is active). Thus we must execute all of them
6492
// and chain them together via semaphores as dependent executions.
6493
thread_local LocalVector<RDD::SemaphoreID> wait_semaphores;
6494
wait_semaphores = frames[frame].semaphores_to_wait_on;
6495
6496
for (uint32_t i = 0; i < command_buffer_count; i++) {
6497
RDD::CommandBufferID command_buffer;
6498
RDD::SemaphoreID signal_semaphore;
6499
RDD::FenceID signal_fence;
6500
if (i > 0) {
6501
command_buffer = buffer_pool.buffers[i - 1];
6502
} else {
6503
command_buffer = frames[frame].command_buffer;
6504
}
6505
6506
if (i == (command_buffer_count - 1)) {
6507
// This is the last command buffer, it should signal the semaphore & fence.
6508
signal_semaphore = p_dst_draw_semaphore_to_signal;
6509
signal_fence = p_draw_fence;
6510
6511
if (p_present_swap_chain) {
6512
// Just present the swap chains as part of the last command execution.
6513
swap_chains = frames[frame].swap_chains_to_present;
6514
}
6515
} else {
6516
signal_semaphore = buffer_pool.semaphores[i];
6517
// Semaphores always need to be signaled if it's not the last command buffer.
6518
}
6519
6520
driver->command_queue_execute_and_present(main_queue, wait_semaphores, command_buffer,
6521
signal_semaphore ? signal_semaphore : VectorView<RDD::SemaphoreID>(), signal_fence,
6522
swap_chains);
6523
6524
// Make the next command buffer wait on the semaphore signaled by this one.
6525
wait_semaphores.resize(1);
6526
wait_semaphores[0] = signal_semaphore;
6527
}
6528
6529
frames[frame].semaphores_to_wait_on.clear();
6530
}
6531
6532
void RenderingDevice::_execute_frame(bool p_present) {
6533
// Check whether this frame should present the swap chains and in which queue.
6534
const bool frame_can_present = p_present && !frames[frame].swap_chains_to_present.is_empty();
6535
const bool separate_present_queue = main_queue != present_queue;
6536
6537
// The semaphore is required if the frame can be presented and a separate present queue is used;
6538
// since the separate queue will wait for that semaphore before presenting.
6539
const RDD::SemaphoreID semaphore = (frame_can_present && separate_present_queue)
6540
? frames[frame].semaphore
6541
: RDD::SemaphoreID(nullptr);
6542
const bool present_swap_chain = frame_can_present && !separate_present_queue;
6543
6544
execute_chained_cmds(present_swap_chain, frames[frame].fence, semaphore);
6545
// Indicate the fence has been signaled so the next time the frame's contents need to be
6546
// used, the CPU needs to wait on the work to be completed.
6547
frames[frame].fence_signaled = true;
6548
6549
if (frame_can_present) {
6550
if (separate_present_queue) {
6551
// Issue the presentation separately if the presentation queue is different from the main queue.
6552
driver->command_queue_execute_and_present(present_queue, frames[frame].semaphore, {}, {}, {}, frames[frame].swap_chains_to_present);
6553
}
6554
6555
frames[frame].swap_chains_to_present.clear();
6556
}
6557
}
6558
6559
void RenderingDevice::_stall_for_frame(uint32_t p_frame) {
6560
thread_local PackedByteArray packed_byte_array;
6561
6562
if (frames[p_frame].fence_signaled) {
6563
driver->fence_wait(frames[p_frame].fence);
6564
frames[p_frame].fence_signaled = false;
6565
6566
// Flush any pending requests for asynchronous buffer downloads.
6567
if (!frames[p_frame].download_buffer_get_data_requests.is_empty()) {
6568
for (uint32_t i = 0; i < frames[p_frame].download_buffer_get_data_requests.size(); i++) {
6569
const BufferGetDataRequest &request = frames[p_frame].download_buffer_get_data_requests[i];
6570
packed_byte_array.resize(request.size);
6571
6572
uint32_t array_offset = 0;
6573
for (uint32_t j = 0; j < request.frame_local_count; j++) {
6574
uint32_t local_index = request.frame_local_index + j;
6575
const RDD::BufferCopyRegion &region = frames[p_frame].download_buffer_copy_regions[local_index];
6576
uint8_t *buffer_data = driver->buffer_map(frames[p_frame].download_buffer_staging_buffers[local_index]);
6577
memcpy(&packed_byte_array.write[array_offset], &buffer_data[region.dst_offset], region.size);
6578
driver->buffer_unmap(frames[p_frame].download_buffer_staging_buffers[local_index]);
6579
array_offset += region.size;
6580
}
6581
6582
request.callback.call(packed_byte_array);
6583
}
6584
6585
frames[p_frame].download_buffer_staging_buffers.clear();
6586
frames[p_frame].download_buffer_copy_regions.clear();
6587
frames[p_frame].download_buffer_get_data_requests.clear();
6588
}
6589
6590
// Flush any pending requests for asynchronous texture downloads.
6591
if (!frames[p_frame].download_texture_get_data_requests.is_empty()) {
6592
uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
6593
for (uint32_t i = 0; i < frames[p_frame].download_texture_get_data_requests.size(); i++) {
6594
const TextureGetDataRequest &request = frames[p_frame].download_texture_get_data_requests[i];
6595
uint32_t texture_size = get_image_format_required_size(request.format, request.width, request.height, request.depth, request.mipmaps);
6596
packed_byte_array.resize(texture_size);
6597
6598
// Find the block size of the texture's format.
6599
uint32_t block_w = 0;
6600
uint32_t block_h = 0;
6601
get_compressed_image_format_block_dimensions(request.format, block_w, block_h);
6602
6603
uint32_t block_size = get_compressed_image_format_block_byte_size(request.format);
6604
uint32_t pixel_size = get_image_format_pixel_size(request.format);
6605
uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(request.format);
6606
uint32_t region_size = texture_download_region_size_px;
6607
6608
for (uint32_t j = 0; j < request.frame_local_count; j++) {
6609
uint32_t local_index = request.frame_local_index + j;
6610
const RDD::BufferTextureCopyRegion &region = frames[p_frame].download_buffer_texture_copy_regions[local_index];
6611
uint32_t w = STEPIFY(request.width >> region.texture_subresources.mipmap, block_w);
6612
uint32_t h = STEPIFY(request.height >> region.texture_subresources.mipmap, block_h);
6613
uint32_t region_w = MIN(region_size, w - region.texture_offset.x);
6614
uint32_t region_h = MIN(region_size, h - region.texture_offset.y);
6615
uint32_t region_pitch = (region_w * pixel_size * block_w) >> pixel_rshift;
6616
region_pitch = STEPIFY(region_pitch, pitch_step);
6617
6618
uint8_t *buffer_data = driver->buffer_map(frames[p_frame].download_texture_staging_buffers[local_index]);
6619
const uint8_t *read_ptr = buffer_data + region.buffer_offset;
6620
uint8_t *write_ptr = packed_byte_array.ptrw() + frames[p_frame].download_texture_mipmap_offsets[local_index];
6621
uint32_t unit_size = pixel_size;
6622
if (block_w != 1 || block_h != 1) {
6623
unit_size = block_size;
6624
}
6625
6626
write_ptr += ((region.texture_offset.y / block_h) * (w / block_w) + (region.texture_offset.x / block_w)) * unit_size;
6627
for (uint32_t y = region_h / block_h; y > 0; y--) {
6628
memcpy(write_ptr, read_ptr, (region_w / block_w) * unit_size);
6629
write_ptr += (w / block_w) * unit_size;
6630
read_ptr += region_pitch;
6631
}
6632
6633
driver->buffer_unmap(frames[p_frame].download_texture_staging_buffers[local_index]);
6634
}
6635
6636
request.callback.call(packed_byte_array);
6637
}
6638
6639
frames[p_frame].download_texture_staging_buffers.clear();
6640
frames[p_frame].download_buffer_texture_copy_regions.clear();
6641
frames[p_frame].download_texture_mipmap_offsets.clear();
6642
frames[p_frame].download_texture_get_data_requests.clear();
6643
}
6644
}
6645
}
6646
6647
void RenderingDevice::_stall_for_previous_frames() {
6648
for (uint32_t i = 0; i < frames.size(); i++) {
6649
_stall_for_frame(i);
6650
}
6651
}
6652
6653
void RenderingDevice::_flush_and_stall_for_all_frames() {
6654
_stall_for_previous_frames();
6655
_end_frame();
6656
_execute_frame(false);
6657
_begin_frame();
6658
}
6659
6660
Error RenderingDevice::initialize(RenderingContextDriver *p_context, DisplayServer::WindowID p_main_window) {
6661
ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
6662
6663
Error err;
6664
RenderingContextDriver::SurfaceID main_surface = 0;
6665
is_main_instance = (singleton == this) && (p_main_window != DisplayServer::INVALID_WINDOW_ID);
6666
if (p_main_window != DisplayServer::INVALID_WINDOW_ID) {
6667
// Retrieve the surface from the main window if it was specified.
6668
main_surface = p_context->surface_get_from_window(p_main_window);
6669
ERR_FAIL_COND_V(main_surface == 0, FAILED);
6670
}
6671
6672
context = p_context;
6673
driver = context->driver_create();
6674
6675
print_verbose("Devices:");
6676
int32_t device_index = Engine::get_singleton()->get_gpu_index();
6677
const uint32_t device_count = context->device_get_count();
6678
const bool detect_device = (device_index < 0) || (device_index >= int32_t(device_count));
6679
uint32_t device_type_score = 0;
6680
for (uint32_t i = 0; i < device_count; i++) {
6681
RenderingContextDriver::Device device_option = context->device_get(i);
6682
String name = device_option.name;
6683
String vendor = _get_device_vendor_name(device_option);
6684
String type = _get_device_type_name(device_option);
6685
bool present_supported = main_surface != 0 ? context->device_supports_present(i, main_surface) : false;
6686
print_verbose(" #" + itos(i) + ": " + vendor + " " + name + " - " + (present_supported ? "Supported" : "Unsupported") + ", " + type);
6687
if (detect_device && (present_supported || main_surface == 0)) {
6688
// If a window was specified, present must be supported by the device to be available as an option.
6689
// Assign a score for each type of device and prefer the device with the higher score.
6690
uint32_t option_score = _get_device_type_score(device_option);
6691
if (option_score > device_type_score) {
6692
device_index = i;
6693
device_type_score = option_score;
6694
}
6695
}
6696
}
6697
6698
ERR_FAIL_COND_V_MSG((device_index < 0) || (device_index >= int32_t(device_count)), ERR_CANT_CREATE, "None of the devices supports both graphics and present queues.");
6699
6700
uint32_t frame_count = 1;
6701
if (main_surface != 0) {
6702
frame_count = MAX(2U, uint32_t(GLOBAL_GET("rendering/rendering_device/vsync/frame_queue_size")));
6703
}
6704
6705
frame = 0;
6706
max_timestamp_query_elements = GLOBAL_GET("debug/settings/profiler/max_timestamp_query_elements");
6707
6708
device = context->device_get(device_index);
6709
err = driver->initialize(device_index, frame_count);
6710
ERR_FAIL_COND_V_MSG(err != OK, FAILED, "Failed to initialize driver for device.");
6711
6712
if (is_main_instance) {
6713
// Only the singleton instance with a display should print this information.
6714
String rendering_method;
6715
if (OS::get_singleton()->get_current_rendering_method() == "mobile") {
6716
rendering_method = "Forward Mobile";
6717
} else {
6718
rendering_method = "Forward+";
6719
}
6720
6721
// Output our device version.
6722
Engine::get_singleton()->print_header(vformat("%s %s - %s - Using Device #%d: %s - %s", get_device_api_name(), get_device_api_version(), rendering_method, device_index, _get_device_vendor_name(device), device.name));
6723
}
6724
6725
// Pick the main queue family. It is worth noting we explicitly do not request the transfer bit, as apparently the specification defines
6726
// that the existence of either the graphics or compute bit implies that the queue can also do transfer operations, but it is optional
6727
// to indicate whether it supports them or not with the dedicated transfer bit if either is set.
6728
BitField<RDD::CommandQueueFamilyBits> main_queue_bits = {};
6729
main_queue_bits.set_flag(RDD::COMMAND_QUEUE_FAMILY_GRAPHICS_BIT);
6730
main_queue_bits.set_flag(RDD::COMMAND_QUEUE_FAMILY_COMPUTE_BIT);
6731
6732
#if !FORCE_SEPARATE_PRESENT_QUEUE
6733
// Needing to use a separate queue for presentation is an edge case that remains to be seen what hardware triggers it at all.
6734
main_queue_family = driver->command_queue_family_get(main_queue_bits, main_surface);
6735
if (!main_queue_family && (main_surface != 0))
6736
#endif
6737
{
6738
// If it was not possible to find a main queue that supports the surface, we attempt to get two different queues instead.
6739
main_queue_family = driver->command_queue_family_get(main_queue_bits);
6740
present_queue_family = driver->command_queue_family_get(BitField<RDD::CommandQueueFamilyBits>(), main_surface);
6741
ERR_FAIL_COND_V(!present_queue_family, FAILED);
6742
}
6743
6744
ERR_FAIL_COND_V(!main_queue_family, FAILED);
6745
6746
// Create the main queue.
6747
main_queue = driver->command_queue_create(main_queue_family, true);
6748
ERR_FAIL_COND_V(!main_queue, FAILED);
6749
6750
transfer_queue_family = driver->command_queue_family_get(RDD::COMMAND_QUEUE_FAMILY_TRANSFER_BIT);
6751
if (transfer_queue_family) {
6752
// Create the transfer queue.
6753
transfer_queue = driver->command_queue_create(transfer_queue_family);
6754
ERR_FAIL_COND_V(!transfer_queue, FAILED);
6755
} else {
6756
// Use main queue as the transfer queue.
6757
transfer_queue = main_queue;
6758
transfer_queue_family = main_queue_family;
6759
}
6760
6761
if (present_queue_family) {
6762
// Create the present queue.
6763
present_queue = driver->command_queue_create(present_queue_family);
6764
ERR_FAIL_COND_V(!present_queue, FAILED);
6765
} else {
6766
// Use main queue as the present queue.
6767
present_queue = main_queue;
6768
present_queue_family = main_queue_family;
6769
}
6770
6771
// Use the processor count as the max amount of transfer workers that can be created.
6772
transfer_worker_pool_max_size = OS::get_singleton()->get_processor_count();
6773
6774
frames.resize(frame_count);
6775
6776
// Create data for all the frames.
6777
for (uint32_t i = 0; i < frames.size(); i++) {
6778
frames[i].index = 0;
6779
6780
// Create command pool, command buffers, semaphores and fences.
6781
frames[i].command_pool = driver->command_pool_create(main_queue_family, RDD::COMMAND_BUFFER_TYPE_PRIMARY);
6782
ERR_FAIL_COND_V(!frames[i].command_pool, FAILED);
6783
frames[i].command_buffer = driver->command_buffer_create(frames[i].command_pool);
6784
ERR_FAIL_COND_V(!frames[i].command_buffer, FAILED);
6785
frames[i].semaphore = driver->semaphore_create();
6786
ERR_FAIL_COND_V(!frames[i].semaphore, FAILED);
6787
frames[i].fence = driver->fence_create();
6788
ERR_FAIL_COND_V(!frames[i].fence, FAILED);
6789
frames[i].fence_signaled = false;
6790
6791
// Create query pool.
6792
frames[i].timestamp_pool = driver->timestamp_query_pool_create(max_timestamp_query_elements);
6793
frames[i].timestamp_names.resize(max_timestamp_query_elements);
6794
frames[i].timestamp_cpu_values.resize(max_timestamp_query_elements);
6795
frames[i].timestamp_count = 0;
6796
frames[i].timestamp_result_names.resize(max_timestamp_query_elements);
6797
frames[i].timestamp_cpu_result_values.resize(max_timestamp_query_elements);
6798
frames[i].timestamp_result_values.resize(max_timestamp_query_elements);
6799
frames[i].timestamp_result_count = 0;
6800
6801
// Assign the main queue family and command pool to the command buffer pool.
6802
frames[i].command_buffer_pool.pool = frames[i].command_pool;
6803
6804
// Create the semaphores for the transfer workers.
6805
frames[i].transfer_worker_semaphores.resize(transfer_worker_pool_max_size);
6806
for (uint32_t j = 0; j < transfer_worker_pool_max_size; j++) {
6807
frames[i].transfer_worker_semaphores[j] = driver->semaphore_create();
6808
ERR_FAIL_COND_V(!frames[i].transfer_worker_semaphores[j], FAILED);
6809
}
6810
}
6811
6812
// Start from frame count, so everything else is immediately old.
6813
frames_drawn = frames.size();
6814
6815
// Initialize recording on the first frame.
6816
driver->begin_segment(frame, frames_drawn++);
6817
driver->command_buffer_begin(frames[0].command_buffer);
6818
6819
// Create draw graph and start it initialized as well.
6820
draw_graph.initialize(driver, device, &_render_pass_create_from_graph, frames.size(), main_queue_family, SECONDARY_COMMAND_BUFFERS_PER_FRAME);
6821
draw_graph.begin();
6822
6823
for (uint32_t i = 0; i < frames.size(); i++) {
6824
// Reset all queries in a query pool before doing any operations with them..
6825
driver->command_timestamp_query_pool_reset(frames[0].command_buffer, frames[i].timestamp_pool, max_timestamp_query_elements);
6826
}
6827
6828
// Convert block size from KB.
6829
upload_staging_buffers.block_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/block_size_kb");
6830
upload_staging_buffers.block_size = MAX(4u, upload_staging_buffers.block_size);
6831
upload_staging_buffers.block_size *= 1024;
6832
6833
// Convert staging buffer size from MB.
6834
upload_staging_buffers.max_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/max_size_mb");
6835
upload_staging_buffers.max_size = MAX(1u, upload_staging_buffers.max_size);
6836
upload_staging_buffers.max_size *= 1024 * 1024;
6837
upload_staging_buffers.max_size = MAX(upload_staging_buffers.max_size, upload_staging_buffers.block_size * 4);
6838
6839
// Copy the sizes to the download staging buffers.
6840
download_staging_buffers.block_size = upload_staging_buffers.block_size;
6841
download_staging_buffers.max_size = upload_staging_buffers.max_size;
6842
6843
texture_upload_region_size_px = GLOBAL_GET("rendering/rendering_device/staging_buffer/texture_upload_region_size_px");
6844
texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px);
6845
6846
texture_download_region_size_px = GLOBAL_GET("rendering/rendering_device/staging_buffer/texture_download_region_size_px");
6847
texture_download_region_size_px = nearest_power_of_2_templated(texture_download_region_size_px);
6848
6849
// Ensure current staging block is valid and at least one per frame exists.
6850
upload_staging_buffers.current = 0;
6851
upload_staging_buffers.used = false;
6852
upload_staging_buffers.usage_bits = RDD::BUFFER_USAGE_TRANSFER_FROM_BIT;
6853
6854
download_staging_buffers.current = 0;
6855
download_staging_buffers.used = false;
6856
download_staging_buffers.usage_bits = RDD::BUFFER_USAGE_TRANSFER_TO_BIT;
6857
6858
for (uint32_t i = 0; i < frames.size(); i++) {
6859
// Staging was never used, create the blocks.
6860
err = _insert_staging_block(upload_staging_buffers);
6861
ERR_FAIL_COND_V(err, FAILED);
6862
6863
err = _insert_staging_block(download_staging_buffers);
6864
ERR_FAIL_COND_V(err, FAILED);
6865
}
6866
6867
draw_list = DrawList();
6868
compute_list = ComputeList();
6869
6870
bool project_pipeline_cache_enable = GLOBAL_GET("rendering/rendering_device/pipeline_cache/enable");
6871
if (is_main_instance && project_pipeline_cache_enable) {
6872
// Only the instance that is not a local device and is also the singleton is allowed to manage a pipeline cache.
6873
pipeline_cache_file_path = vformat("user://vulkan/pipelines.%s.%s",
6874
OS::get_singleton()->get_current_rendering_method(),
6875
device.name.validate_filename().replace_char(' ', '_').to_lower());
6876
if (Engine::get_singleton()->is_editor_hint()) {
6877
pipeline_cache_file_path += ".editor";
6878
}
6879
pipeline_cache_file_path += ".cache";
6880
6881
Vector<uint8_t> cache_data = _load_pipeline_cache();
6882
pipeline_cache_enabled = driver->pipeline_cache_create(cache_data);
6883
if (pipeline_cache_enabled) {
6884
pipeline_cache_size = driver->pipeline_cache_query_size();
6885
print_verbose(vformat("Startup PSO cache (%.1f MiB)", pipeline_cache_size / (1024.0f * 1024.0f)));
6886
}
6887
}
6888
6889
// Find the best method available for VRS on the current hardware.
6890
_vrs_detect_method();
6891
6892
return OK;
6893
}
6894
6895
Vector<uint8_t> RenderingDevice::_load_pipeline_cache() {
6896
DirAccess::make_dir_recursive_absolute(pipeline_cache_file_path.get_base_dir());
6897
6898
if (FileAccess::exists(pipeline_cache_file_path)) {
6899
Error file_error;
6900
Vector<uint8_t> file_data = FileAccess::get_file_as_bytes(pipeline_cache_file_path, &file_error);
6901
return file_data;
6902
} else {
6903
return Vector<uint8_t>();
6904
}
6905
}
6906
6907
void RenderingDevice::_update_pipeline_cache(bool p_closing) {
6908
_THREAD_SAFE_METHOD_
6909
6910
{
6911
bool still_saving = pipeline_cache_save_task != WorkerThreadPool::INVALID_TASK_ID && !WorkerThreadPool::get_singleton()->is_task_completed(pipeline_cache_save_task);
6912
if (still_saving) {
6913
if (p_closing) {
6914
WorkerThreadPool::get_singleton()->wait_for_task_completion(pipeline_cache_save_task);
6915
pipeline_cache_save_task = WorkerThreadPool::INVALID_TASK_ID;
6916
} else {
6917
// We can't save until the currently running save is done. We'll retry next time; worst case, we'll save when exiting.
6918
return;
6919
}
6920
}
6921
}
6922
6923
{
6924
size_t new_pipelines_cache_size = driver->pipeline_cache_query_size();
6925
ERR_FAIL_COND(!new_pipelines_cache_size);
6926
size_t difference = new_pipelines_cache_size - pipeline_cache_size;
6927
6928
bool must_save = false;
6929
6930
if (p_closing) {
6931
must_save = difference > 0;
6932
} else {
6933
float save_interval = GLOBAL_GET("rendering/rendering_device/pipeline_cache/save_chunk_size_mb");
6934
must_save = difference > 0 && difference / (1024.0f * 1024.0f) >= save_interval;
6935
}
6936
6937
if (must_save) {
6938
pipeline_cache_size = new_pipelines_cache_size;
6939
} else {
6940
return;
6941
}
6942
}
6943
6944
if (p_closing) {
6945
_save_pipeline_cache(this);
6946
} else {
6947
pipeline_cache_save_task = WorkerThreadPool::get_singleton()->add_native_task(&_save_pipeline_cache, this, false, "PipelineCacheSave");
6948
}
6949
}
6950
6951
void RenderingDevice::_save_pipeline_cache(void *p_data) {
6952
RenderingDevice *self = static_cast<RenderingDevice *>(p_data);
6953
6954
self->_thread_safe_.lock();
6955
Vector<uint8_t> cache_blob = self->driver->pipeline_cache_serialize();
6956
self->_thread_safe_.unlock();
6957
6958
if (cache_blob.is_empty()) {
6959
return;
6960
}
6961
print_verbose(vformat("Updated PSO cache (%.1f MiB)", cache_blob.size() / (1024.0f * 1024.0f)));
6962
6963
Ref<FileAccess> f = FileAccess::open(self->pipeline_cache_file_path, FileAccess::WRITE, nullptr);
6964
if (f.is_valid()) {
6965
f->store_buffer(cache_blob);
6966
}
6967
}
6968
6969
template <typename T>
6970
void RenderingDevice::_free_rids(T &p_owner, const char *p_type) {
6971
LocalVector<RID> owned = p_owner.get_owned_list();
6972
if (owned.size()) {
6973
if (owned.size() == 1) {
6974
WARN_PRINT(vformat("1 RID of type \"%s\" was leaked.", p_type));
6975
} else {
6976
WARN_PRINT(vformat("%d RIDs of type \"%s\" were leaked.", owned.size(), p_type));
6977
}
6978
for (const RID &rid : owned) {
6979
#ifdef DEV_ENABLED
6980
if (resource_names.has(rid)) {
6981
print_line(String(" - ") + resource_names[rid]);
6982
}
6983
#endif
6984
free_rid(rid);
6985
}
6986
}
6987
}
6988
6989
void RenderingDevice::capture_timestamp(const String &p_name) {
6990
ERR_RENDER_THREAD_GUARD();
6991
6992
ERR_FAIL_COND_MSG(draw_list.active && draw_list.state.draw_count > 0, "Capturing timestamps during draw list creation is not allowed. Offending timestamp was: " + p_name);
6993
ERR_FAIL_COND_MSG(compute_list.active && compute_list.state.dispatch_count > 0, "Capturing timestamps during compute list creation is not allowed. Offending timestamp was: " + p_name);
6994
ERR_FAIL_COND_MSG(frames[frame].timestamp_count >= max_timestamp_query_elements, vformat("Tried capturing more timestamps than the configured maximum (%d). You can increase this limit in the project settings under 'Debug/Settings' called 'Max Timestamp Query Elements'.", max_timestamp_query_elements));
6995
6996
draw_graph.add_capture_timestamp(frames[frame].timestamp_pool, frames[frame].timestamp_count);
6997
6998
frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name;
6999
frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec();
7000
frames[frame].timestamp_count++;
7001
}
7002
7003
uint64_t RenderingDevice::get_driver_resource(DriverResource p_resource, RID p_rid, uint64_t p_index) {
7004
ERR_RENDER_THREAD_GUARD_V(0);
7005
7006
uint64_t driver_id = 0;
7007
switch (p_resource) {
7008
case DRIVER_RESOURCE_LOGICAL_DEVICE:
7009
case DRIVER_RESOURCE_PHYSICAL_DEVICE:
7010
case DRIVER_RESOURCE_TOPMOST_OBJECT:
7011
break;
7012
case DRIVER_RESOURCE_COMMAND_QUEUE:
7013
driver_id = main_queue.id;
7014
break;
7015
case DRIVER_RESOURCE_QUEUE_FAMILY:
7016
driver_id = main_queue_family.id;
7017
break;
7018
case DRIVER_RESOURCE_TEXTURE:
7019
case DRIVER_RESOURCE_TEXTURE_VIEW:
7020
case DRIVER_RESOURCE_TEXTURE_DATA_FORMAT: {
7021
Texture *tex = texture_owner.get_or_null(p_rid);
7022
ERR_FAIL_NULL_V(tex, 0);
7023
7024
driver_id = tex->driver_id.id;
7025
} break;
7026
case DRIVER_RESOURCE_SAMPLER: {
7027
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(p_rid);
7028
ERR_FAIL_NULL_V(sampler_driver_id, 0);
7029
7030
driver_id = (*sampler_driver_id).id;
7031
} break;
7032
case DRIVER_RESOURCE_UNIFORM_SET: {
7033
UniformSet *uniform_set = uniform_set_owner.get_or_null(p_rid);
7034
ERR_FAIL_NULL_V(uniform_set, 0);
7035
7036
driver_id = uniform_set->driver_id.id;
7037
} break;
7038
case DRIVER_RESOURCE_BUFFER: {
7039
Buffer *buffer = nullptr;
7040
if (vertex_buffer_owner.owns(p_rid)) {
7041
buffer = vertex_buffer_owner.get_or_null(p_rid);
7042
} else if (index_buffer_owner.owns(p_rid)) {
7043
buffer = index_buffer_owner.get_or_null(p_rid);
7044
} else if (uniform_buffer_owner.owns(p_rid)) {
7045
buffer = uniform_buffer_owner.get_or_null(p_rid);
7046
} else if (texture_buffer_owner.owns(p_rid)) {
7047
buffer = texture_buffer_owner.get_or_null(p_rid);
7048
} else if (storage_buffer_owner.owns(p_rid)) {
7049
buffer = storage_buffer_owner.get_or_null(p_rid);
7050
}
7051
ERR_FAIL_NULL_V(buffer, 0);
7052
7053
driver_id = buffer->driver_id.id;
7054
} break;
7055
case DRIVER_RESOURCE_COMPUTE_PIPELINE: {
7056
ComputePipeline *compute_pipeline = compute_pipeline_owner.get_or_null(p_rid);
7057
ERR_FAIL_NULL_V(compute_pipeline, 0);
7058
7059
driver_id = compute_pipeline->driver_id.id;
7060
} break;
7061
case DRIVER_RESOURCE_RENDER_PIPELINE: {
7062
RenderPipeline *render_pipeline = render_pipeline_owner.get_or_null(p_rid);
7063
ERR_FAIL_NULL_V(render_pipeline, 0);
7064
7065
driver_id = render_pipeline->driver_id.id;
7066
} break;
7067
default: {
7068
ERR_FAIL_V(0);
7069
} break;
7070
}
7071
7072
return driver->get_resource_native_handle(p_resource, driver_id);
7073
}
7074
7075
String RenderingDevice::get_driver_and_device_memory_report() const {
7076
return context->get_driver_and_device_memory_report();
7077
}
7078
7079
String RenderingDevice::get_tracked_object_name(uint32_t p_type_index) const {
7080
return context->get_tracked_object_name(p_type_index);
7081
}
7082
7083
uint64_t RenderingDevice::get_tracked_object_type_count() const {
7084
return context->get_tracked_object_type_count();
7085
}
7086
7087
uint64_t RenderingDevice::get_driver_total_memory() const {
7088
return context->get_driver_total_memory();
7089
}
7090
7091
uint64_t RenderingDevice::get_driver_allocation_count() const {
7092
return context->get_driver_allocation_count();
7093
}
7094
7095
uint64_t RenderingDevice::get_driver_memory_by_object_type(uint32_t p_type) const {
7096
return context->get_driver_memory_by_object_type(p_type);
7097
}
7098
7099
uint64_t RenderingDevice::get_driver_allocs_by_object_type(uint32_t p_type) const {
7100
return context->get_driver_allocs_by_object_type(p_type);
7101
}
7102
7103
uint64_t RenderingDevice::get_device_total_memory() const {
7104
return context->get_device_total_memory();
7105
}
7106
7107
uint64_t RenderingDevice::get_device_allocation_count() const {
7108
return context->get_device_allocation_count();
7109
}
7110
7111
uint64_t RenderingDevice::get_device_memory_by_object_type(uint32_t type) const {
7112
return context->get_device_memory_by_object_type(type);
7113
}
7114
7115
uint64_t RenderingDevice::get_device_allocs_by_object_type(uint32_t type) const {
7116
return context->get_device_allocs_by_object_type(type);
7117
}
7118
7119
uint32_t RenderingDevice::get_captured_timestamps_count() const {
7120
ERR_RENDER_THREAD_GUARD_V(0);
7121
return frames[frame].timestamp_result_count;
7122
}
7123
7124
uint64_t RenderingDevice::get_captured_timestamps_frame() const {
7125
ERR_RENDER_THREAD_GUARD_V(0);
7126
return frames[frame].index;
7127
}
7128
7129
uint64_t RenderingDevice::get_captured_timestamp_gpu_time(uint32_t p_index) const {
7130
ERR_RENDER_THREAD_GUARD_V(0);
7131
ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
7132
return driver->timestamp_query_result_to_time(frames[frame].timestamp_result_values[p_index]);
7133
}
7134
7135
uint64_t RenderingDevice::get_captured_timestamp_cpu_time(uint32_t p_index) const {
7136
ERR_RENDER_THREAD_GUARD_V(0);
7137
ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
7138
return frames[frame].timestamp_cpu_result_values[p_index];
7139
}
7140
7141
String RenderingDevice::get_captured_timestamp_name(uint32_t p_index) const {
7142
ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, String());
7143
return frames[frame].timestamp_result_names[p_index];
7144
}
7145
7146
uint64_t RenderingDevice::limit_get(Limit p_limit) const {
7147
return driver->limit_get(p_limit);
7148
}
7149
7150
void RenderingDevice::finalize() {
7151
ERR_RENDER_THREAD_GUARD();
7152
7153
if (!frames.is_empty()) {
7154
// Wait for all frames to have finished rendering.
7155
_flush_and_stall_for_all_frames();
7156
}
7157
7158
// Wait for transfer workers to finish.
7159
_submit_transfer_workers();
7160
_wait_for_transfer_workers();
7161
7162
// Delete everything the graph has created.
7163
draw_graph.finalize();
7164
7165
// Free all resources.
7166
_free_rids(render_pipeline_owner, "Pipeline");
7167
_free_rids(compute_pipeline_owner, "Compute");
7168
_free_rids(uniform_set_owner, "UniformSet");
7169
_free_rids(texture_buffer_owner, "TextureBuffer");
7170
_free_rids(storage_buffer_owner, "StorageBuffer");
7171
_free_rids(uniform_buffer_owner, "UniformBuffer");
7172
_free_rids(shader_owner, "Shader");
7173
_free_rids(index_array_owner, "IndexArray");
7174
_free_rids(index_buffer_owner, "IndexBuffer");
7175
_free_rids(vertex_array_owner, "VertexArray");
7176
_free_rids(vertex_buffer_owner, "VertexBuffer");
7177
_free_rids(framebuffer_owner, "Framebuffer");
7178
_free_rids(sampler_owner, "Sampler");
7179
{
7180
// For textures it's a bit more difficult because they may be shared.
7181
LocalVector<RID> owned = texture_owner.get_owned_list();
7182
if (owned.size()) {
7183
if (owned.size() == 1) {
7184
WARN_PRINT("1 RID of type \"Texture\" was leaked.");
7185
} else {
7186
WARN_PRINT(vformat("%d RIDs of type \"Texture\" were leaked.", owned.size()));
7187
}
7188
LocalVector<RID> owned_non_shared;
7189
// Free shared first.
7190
for (const RID &texture_rid : owned) {
7191
if (texture_is_shared(texture_rid)) {
7192
#ifdef DEV_ENABLED
7193
if (resource_names.has(texture_rid)) {
7194
print_line(String(" - ") + resource_names[texture_rid]);
7195
}
7196
#endif
7197
free_rid(texture_rid);
7198
} else {
7199
owned_non_shared.push_back(texture_rid);
7200
}
7201
}
7202
// Free non shared second, this will avoid an error trying to free unexisting textures due to dependencies.
7203
for (const RID &texture_rid : owned_non_shared) {
7204
#ifdef DEV_ENABLED
7205
if (resource_names.has(texture_rid)) {
7206
print_line(String(" - ") + resource_names[texture_rid]);
7207
}
7208
#endif
7209
free_rid(texture_rid);
7210
}
7211
}
7212
}
7213
7214
// Erase the transfer workers after all resources have been freed.
7215
_free_transfer_workers();
7216
7217
// Free everything pending.
7218
for (uint32_t i = 0; i < frames.size(); i++) {
7219
int f = (frame + i) % frames.size();
7220
_free_pending_resources(f);
7221
driver->command_pool_free(frames[i].command_pool);
7222
driver->timestamp_query_pool_free(frames[i].timestamp_pool);
7223
driver->semaphore_free(frames[i].semaphore);
7224
driver->fence_free(frames[i].fence);
7225
7226
RDG::CommandBufferPool &buffer_pool = frames[i].command_buffer_pool;
7227
for (uint32_t j = 0; j < buffer_pool.buffers.size(); j++) {
7228
driver->semaphore_free(buffer_pool.semaphores[j]);
7229
}
7230
7231
for (uint32_t j = 0; j < frames[i].transfer_worker_semaphores.size(); j++) {
7232
driver->semaphore_free(frames[i].transfer_worker_semaphores[j]);
7233
}
7234
}
7235
7236
if (pipeline_cache_enabled) {
7237
_update_pipeline_cache(true);
7238
driver->pipeline_cache_free();
7239
}
7240
7241
frames.clear();
7242
7243
for (int i = 0; i < upload_staging_buffers.blocks.size(); i++) {
7244
driver->buffer_free(upload_staging_buffers.blocks[i].driver_id);
7245
}
7246
7247
for (int i = 0; i < download_staging_buffers.blocks.size(); i++) {
7248
driver->buffer_free(download_staging_buffers.blocks[i].driver_id);
7249
}
7250
7251
while (vertex_formats.size()) {
7252
HashMap<VertexFormatID, VertexDescriptionCache>::Iterator temp = vertex_formats.begin();
7253
driver->vertex_format_free(temp->value.driver_id);
7254
vertex_formats.remove(temp);
7255
}
7256
7257
for (KeyValue<FramebufferFormatID, FramebufferFormat> &E : framebuffer_formats) {
7258
driver->render_pass_free(E.value.render_pass);
7259
}
7260
framebuffer_formats.clear();
7261
7262
// Delete the swap chains created for the screens.
7263
for (const KeyValue<DisplayServer::WindowID, RDD::SwapChainID> &it : screen_swap_chains) {
7264
driver->swap_chain_free(it.value);
7265
}
7266
7267
screen_swap_chains.clear();
7268
7269
// Delete the command queues.
7270
if (present_queue) {
7271
if (main_queue != present_queue) {
7272
// Only delete the present queue if it's unique.
7273
driver->command_queue_free(present_queue);
7274
}
7275
7276
present_queue = RDD::CommandQueueID();
7277
}
7278
7279
if (transfer_queue) {
7280
if (main_queue != transfer_queue) {
7281
// Only delete the transfer queue if it's unique.
7282
driver->command_queue_free(transfer_queue);
7283
}
7284
7285
transfer_queue = RDD::CommandQueueID();
7286
}
7287
7288
if (main_queue) {
7289
driver->command_queue_free(main_queue);
7290
main_queue = RDD::CommandQueueID();
7291
}
7292
7293
// Delete the driver once everything else has been deleted.
7294
if (driver != nullptr) {
7295
context->driver_free(driver);
7296
driver = nullptr;
7297
}
7298
7299
// All these should be clear at this point.
7300
ERR_FAIL_COND(dependency_map.size());
7301
ERR_FAIL_COND(reverse_dependency_map.size());
7302
}
7303
7304
void RenderingDevice::_set_max_fps(int p_max_fps) {
7305
for (const KeyValue<DisplayServer::WindowID, RDD::SwapChainID> &it : screen_swap_chains) {
7306
driver->swap_chain_set_max_fps(it.value, p_max_fps);
7307
}
7308
}
7309
7310
RenderingDevice *RenderingDevice::create_local_device() {
7311
RenderingDevice *rd = memnew(RenderingDevice);
7312
if (rd->initialize(context) != OK) {
7313
memdelete(rd);
7314
return nullptr;
7315
}
7316
return rd;
7317
}
7318
7319
bool RenderingDevice::has_feature(const Features p_feature) const {
7320
// Some features can be deduced from the capabilities without querying the driver and looking at the capabilities.
7321
switch (p_feature) {
7322
case SUPPORTS_MULTIVIEW: {
7323
const RDD::MultiviewCapabilities &multiview_capabilities = driver->get_multiview_capabilities();
7324
return multiview_capabilities.is_supported && multiview_capabilities.max_view_count > 1;
7325
}
7326
case SUPPORTS_ATTACHMENT_VRS: {
7327
const RDD::FragmentShadingRateCapabilities &fsr_capabilities = driver->get_fragment_shading_rate_capabilities();
7328
const RDD::FragmentDensityMapCapabilities &fdm_capabilities = driver->get_fragment_density_map_capabilities();
7329
return fsr_capabilities.attachment_supported || fdm_capabilities.attachment_supported;
7330
}
7331
default:
7332
return driver->has_feature(p_feature);
7333
}
7334
}
7335
7336
void RenderingDevice::_bind_methods() {
7337
ClassDB::bind_method(D_METHOD("texture_create", "format", "view", "data"), &RenderingDevice::_texture_create, DEFVAL(Array()));
7338
ClassDB::bind_method(D_METHOD("texture_create_shared", "view", "with_texture"), &RenderingDevice::_texture_create_shared);
7339
ClassDB::bind_method(D_METHOD("texture_create_shared_from_slice", "view", "with_texture", "layer", "mipmap", "mipmaps", "slice_type"), &RenderingDevice::_texture_create_shared_from_slice, DEFVAL(1), DEFVAL(TEXTURE_SLICE_2D));
7340
ClassDB::bind_method(D_METHOD("texture_create_from_extension", "type", "format", "samples", "usage_flags", "image", "width", "height", "depth", "layers", "mipmaps"), &RenderingDevice::texture_create_from_extension, DEFVAL(1));
7341
7342
ClassDB::bind_method(D_METHOD("texture_update", "texture", "layer", "data"), &RenderingDevice::texture_update);
7343
ClassDB::bind_method(D_METHOD("texture_get_data", "texture", "layer"), &RenderingDevice::texture_get_data);
7344
ClassDB::bind_method(D_METHOD("texture_get_data_async", "texture", "layer", "callback"), &RenderingDevice::texture_get_data_async);
7345
7346
ClassDB::bind_method(D_METHOD("texture_is_format_supported_for_usage", "format", "usage_flags"), &RenderingDevice::texture_is_format_supported_for_usage);
7347
7348
ClassDB::bind_method(D_METHOD("texture_is_shared", "texture"), &RenderingDevice::texture_is_shared);
7349
ClassDB::bind_method(D_METHOD("texture_is_valid", "texture"), &RenderingDevice::texture_is_valid);
7350
7351
ClassDB::bind_method(D_METHOD("texture_set_discardable", "texture", "discardable"), &RenderingDevice::texture_set_discardable);
7352
ClassDB::bind_method(D_METHOD("texture_is_discardable", "texture"), &RenderingDevice::texture_is_discardable);
7353
7354
ClassDB::bind_method(D_METHOD("texture_copy", "from_texture", "to_texture", "from_pos", "to_pos", "size", "src_mipmap", "dst_mipmap", "src_layer", "dst_layer"), &RenderingDevice::texture_copy);
7355
ClassDB::bind_method(D_METHOD("texture_clear", "texture", "color", "base_mipmap", "mipmap_count", "base_layer", "layer_count"), &RenderingDevice::texture_clear);
7356
ClassDB::bind_method(D_METHOD("texture_resolve_multisample", "from_texture", "to_texture"), &RenderingDevice::texture_resolve_multisample);
7357
7358
ClassDB::bind_method(D_METHOD("texture_get_format", "texture"), &RenderingDevice::_texture_get_format);
7359
#ifndef DISABLE_DEPRECATED
7360
ClassDB::bind_method(D_METHOD("texture_get_native_handle", "texture"), &RenderingDevice::texture_get_native_handle);
7361
#endif
7362
7363
ClassDB::bind_method(D_METHOD("framebuffer_format_create", "attachments", "view_count"), &RenderingDevice::_framebuffer_format_create, DEFVAL(1));
7364
ClassDB::bind_method(D_METHOD("framebuffer_format_create_multipass", "attachments", "passes", "view_count"), &RenderingDevice::_framebuffer_format_create_multipass, DEFVAL(1));
7365
ClassDB::bind_method(D_METHOD("framebuffer_format_create_empty", "samples"), &RenderingDevice::framebuffer_format_create_empty, DEFVAL(TEXTURE_SAMPLES_1));
7366
ClassDB::bind_method(D_METHOD("framebuffer_format_get_texture_samples", "format", "render_pass"), &RenderingDevice::framebuffer_format_get_texture_samples, DEFVAL(0));
7367
ClassDB::bind_method(D_METHOD("framebuffer_create", "textures", "validate_with_format", "view_count"), &RenderingDevice::_framebuffer_create, DEFVAL(INVALID_FORMAT_ID), DEFVAL(1));
7368
ClassDB::bind_method(D_METHOD("framebuffer_create_multipass", "textures", "passes", "validate_with_format", "view_count"), &RenderingDevice::_framebuffer_create_multipass, DEFVAL(INVALID_FORMAT_ID), DEFVAL(1));
7369
ClassDB::bind_method(D_METHOD("framebuffer_create_empty", "size", "samples", "validate_with_format"), &RenderingDevice::framebuffer_create_empty, DEFVAL(TEXTURE_SAMPLES_1), DEFVAL(INVALID_FORMAT_ID));
7370
ClassDB::bind_method(D_METHOD("framebuffer_get_format", "framebuffer"), &RenderingDevice::framebuffer_get_format);
7371
ClassDB::bind_method(D_METHOD("framebuffer_is_valid", "framebuffer"), &RenderingDevice::framebuffer_is_valid);
7372
7373
ClassDB::bind_method(D_METHOD("sampler_create", "state"), &RenderingDevice::_sampler_create);
7374
ClassDB::bind_method(D_METHOD("sampler_is_format_supported_for_filter", "format", "sampler_filter"), &RenderingDevice::sampler_is_format_supported_for_filter);
7375
7376
ClassDB::bind_method(D_METHOD("vertex_buffer_create", "size_bytes", "data", "creation_bits"), &RenderingDevice::_vertex_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(0));
7377
ClassDB::bind_method(D_METHOD("vertex_format_create", "vertex_descriptions"), &RenderingDevice::_vertex_format_create);
7378
ClassDB::bind_method(D_METHOD("vertex_array_create", "vertex_count", "vertex_format", "src_buffers", "offsets"), &RenderingDevice::_vertex_array_create, DEFVAL(Vector<int64_t>()));
7379
7380
ClassDB::bind_method(D_METHOD("index_buffer_create", "size_indices", "format", "data", "use_restart_indices", "creation_bits"), &RenderingDevice::_index_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(false), DEFVAL(0));
7381
ClassDB::bind_method(D_METHOD("index_array_create", "index_buffer", "index_offset", "index_count"), &RenderingDevice::index_array_create);
7382
7383
ClassDB::bind_method(D_METHOD("shader_compile_spirv_from_source", "shader_source", "allow_cache"), &RenderingDevice::_shader_compile_spirv_from_source, DEFVAL(true));
7384
ClassDB::bind_method(D_METHOD("shader_compile_binary_from_spirv", "spirv_data", "name"), &RenderingDevice::_shader_compile_binary_from_spirv, DEFVAL(""));
7385
ClassDB::bind_method(D_METHOD("shader_create_from_spirv", "spirv_data", "name"), &RenderingDevice::_shader_create_from_spirv, DEFVAL(""));
7386
ClassDB::bind_method(D_METHOD("shader_create_from_bytecode", "binary_data", "placeholder_rid"), &RenderingDevice::shader_create_from_bytecode, DEFVAL(RID()));
7387
ClassDB::bind_method(D_METHOD("shader_create_placeholder"), &RenderingDevice::shader_create_placeholder);
7388
7389
ClassDB::bind_method(D_METHOD("shader_get_vertex_input_attribute_mask", "shader"), &RenderingDevice::shader_get_vertex_input_attribute_mask);
7390
7391
ClassDB::bind_method(D_METHOD("uniform_buffer_create", "size_bytes", "data", "creation_bits"), &RenderingDevice::_uniform_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(0));
7392
ClassDB::bind_method(D_METHOD("storage_buffer_create", "size_bytes", "data", "usage", "creation_bits"), &RenderingDevice::_storage_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(0), DEFVAL(0));
7393
ClassDB::bind_method(D_METHOD("texture_buffer_create", "size_bytes", "format", "data"), &RenderingDevice::_texture_buffer_create, DEFVAL(Vector<uint8_t>()));
7394
7395
ClassDB::bind_method(D_METHOD("uniform_set_create", "uniforms", "shader", "shader_set"), &RenderingDevice::_uniform_set_create);
7396
ClassDB::bind_method(D_METHOD("uniform_set_is_valid", "uniform_set"), &RenderingDevice::uniform_set_is_valid);
7397
7398
ClassDB::bind_method(D_METHOD("buffer_copy", "src_buffer", "dst_buffer", "src_offset", "dst_offset", "size"), &RenderingDevice::buffer_copy);
7399
ClassDB::bind_method(D_METHOD("buffer_update", "buffer", "offset", "size_bytes", "data"), &RenderingDevice::_buffer_update_bind);
7400
ClassDB::bind_method(D_METHOD("buffer_clear", "buffer", "offset", "size_bytes"), &RenderingDevice::buffer_clear);
7401
ClassDB::bind_method(D_METHOD("buffer_get_data", "buffer", "offset_bytes", "size_bytes"), &RenderingDevice::buffer_get_data, DEFVAL(0), DEFVAL(0));
7402
ClassDB::bind_method(D_METHOD("buffer_get_data_async", "buffer", "callback", "offset_bytes", "size_bytes"), &RenderingDevice::buffer_get_data_async, DEFVAL(0), DEFVAL(0));
7403
ClassDB::bind_method(D_METHOD("buffer_get_device_address", "buffer"), &RenderingDevice::buffer_get_device_address);
7404
7405
ClassDB::bind_method(D_METHOD("render_pipeline_create", "shader", "framebuffer_format", "vertex_format", "primitive", "rasterization_state", "multisample_state", "stencil_state", "color_blend_state", "dynamic_state_flags", "for_render_pass", "specialization_constants"), &RenderingDevice::_render_pipeline_create, DEFVAL(0), DEFVAL(0), DEFVAL(TypedArray<RDPipelineSpecializationConstant>()));
7406
ClassDB::bind_method(D_METHOD("render_pipeline_is_valid", "render_pipeline"), &RenderingDevice::render_pipeline_is_valid);
7407
7408
ClassDB::bind_method(D_METHOD("compute_pipeline_create", "shader", "specialization_constants"), &RenderingDevice::_compute_pipeline_create, DEFVAL(TypedArray<RDPipelineSpecializationConstant>()));
7409
ClassDB::bind_method(D_METHOD("compute_pipeline_is_valid", "compute_pipeline"), &RenderingDevice::compute_pipeline_is_valid);
7410
7411
ClassDB::bind_method(D_METHOD("screen_get_width", "screen"), &RenderingDevice::screen_get_width, DEFVAL(DisplayServer::MAIN_WINDOW_ID));
7412
ClassDB::bind_method(D_METHOD("screen_get_height", "screen"), &RenderingDevice::screen_get_height, DEFVAL(DisplayServer::MAIN_WINDOW_ID));
7413
ClassDB::bind_method(D_METHOD("screen_get_framebuffer_format", "screen"), &RenderingDevice::screen_get_framebuffer_format, DEFVAL(DisplayServer::MAIN_WINDOW_ID));
7414
7415
ClassDB::bind_method(D_METHOD("draw_list_begin_for_screen", "screen", "clear_color"), &RenderingDevice::draw_list_begin_for_screen, DEFVAL(DisplayServer::MAIN_WINDOW_ID), DEFVAL(Color()));
7416
7417
ClassDB::bind_method(D_METHOD("draw_list_begin", "framebuffer", "draw_flags", "clear_color_values", "clear_depth_value", "clear_stencil_value", "region", "breadcrumb"), &RenderingDevice::_draw_list_begin_bind, DEFVAL(DRAW_DEFAULT_ALL), DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()), DEFVAL(0));
7418
#ifndef DISABLE_DEPRECATED
7419
ClassDB::bind_method(D_METHOD("draw_list_begin_split", "framebuffer", "splits", "initial_color_action", "final_color_action", "initial_depth_action", "final_depth_action", "clear_color_values", "clear_depth", "clear_stencil", "region", "storage_textures"), &RenderingDevice::_draw_list_begin_split, DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()), DEFVAL(TypedArray<RID>()));
7420
#endif
7421
7422
ClassDB::bind_method(D_METHOD("draw_list_set_blend_constants", "draw_list", "color"), &RenderingDevice::draw_list_set_blend_constants);
7423
ClassDB::bind_method(D_METHOD("draw_list_bind_render_pipeline", "draw_list", "render_pipeline"), &RenderingDevice::draw_list_bind_render_pipeline);
7424
ClassDB::bind_method(D_METHOD("draw_list_bind_uniform_set", "draw_list", "uniform_set", "set_index"), &RenderingDevice::draw_list_bind_uniform_set);
7425
ClassDB::bind_method(D_METHOD("draw_list_bind_vertex_array", "draw_list", "vertex_array"), &RenderingDevice::draw_list_bind_vertex_array);
7426
ClassDB::bind_method(D_METHOD("draw_list_bind_index_array", "draw_list", "index_array"), &RenderingDevice::draw_list_bind_index_array);
7427
ClassDB::bind_method(D_METHOD("draw_list_set_push_constant", "draw_list", "buffer", "size_bytes"), &RenderingDevice::_draw_list_set_push_constant);
7428
7429
ClassDB::bind_method(D_METHOD("draw_list_draw", "draw_list", "use_indices", "instances", "procedural_vertex_count"), &RenderingDevice::draw_list_draw, DEFVAL(0));
7430
ClassDB::bind_method(D_METHOD("draw_list_draw_indirect", "draw_list", "use_indices", "buffer", "offset", "draw_count", "stride"), &RenderingDevice::draw_list_draw_indirect, DEFVAL(0), DEFVAL(1), DEFVAL(0));
7431
7432
ClassDB::bind_method(D_METHOD("draw_list_enable_scissor", "draw_list", "rect"), &RenderingDevice::draw_list_enable_scissor, DEFVAL(Rect2()));
7433
ClassDB::bind_method(D_METHOD("draw_list_disable_scissor", "draw_list"), &RenderingDevice::draw_list_disable_scissor);
7434
7435
ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass"), &RenderingDevice::draw_list_switch_to_next_pass);
7436
#ifndef DISABLE_DEPRECATED
7437
ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass_split", "splits"), &RenderingDevice::_draw_list_switch_to_next_pass_split);
7438
#endif
7439
7440
ClassDB::bind_method(D_METHOD("draw_list_end"), &RenderingDevice::draw_list_end);
7441
7442
ClassDB::bind_method(D_METHOD("compute_list_begin"), &RenderingDevice::compute_list_begin);
7443
ClassDB::bind_method(D_METHOD("compute_list_bind_compute_pipeline", "compute_list", "compute_pipeline"), &RenderingDevice::compute_list_bind_compute_pipeline);
7444
ClassDB::bind_method(D_METHOD("compute_list_set_push_constant", "compute_list", "buffer", "size_bytes"), &RenderingDevice::_compute_list_set_push_constant);
7445
ClassDB::bind_method(D_METHOD("compute_list_bind_uniform_set", "compute_list", "uniform_set", "set_index"), &RenderingDevice::compute_list_bind_uniform_set);
7446
ClassDB::bind_method(D_METHOD("compute_list_dispatch", "compute_list", "x_groups", "y_groups", "z_groups"), &RenderingDevice::compute_list_dispatch);
7447
ClassDB::bind_method(D_METHOD("compute_list_dispatch_indirect", "compute_list", "buffer", "offset"), &RenderingDevice::compute_list_dispatch_indirect);
7448
ClassDB::bind_method(D_METHOD("compute_list_add_barrier", "compute_list"), &RenderingDevice::compute_list_add_barrier);
7449
ClassDB::bind_method(D_METHOD("compute_list_end"), &RenderingDevice::compute_list_end);
7450
7451
ClassDB::bind_method(D_METHOD("free_rid", "rid"), &RenderingDevice::free_rid);
7452
7453
ClassDB::bind_method(D_METHOD("capture_timestamp", "name"), &RenderingDevice::capture_timestamp);
7454
ClassDB::bind_method(D_METHOD("get_captured_timestamps_count"), &RenderingDevice::get_captured_timestamps_count);
7455
ClassDB::bind_method(D_METHOD("get_captured_timestamps_frame"), &RenderingDevice::get_captured_timestamps_frame);
7456
ClassDB::bind_method(D_METHOD("get_captured_timestamp_gpu_time", "index"), &RenderingDevice::get_captured_timestamp_gpu_time);
7457
ClassDB::bind_method(D_METHOD("get_captured_timestamp_cpu_time", "index"), &RenderingDevice::get_captured_timestamp_cpu_time);
7458
ClassDB::bind_method(D_METHOD("get_captured_timestamp_name", "index"), &RenderingDevice::get_captured_timestamp_name);
7459
7460
ClassDB::bind_method(D_METHOD("has_feature", "feature"), &RenderingDevice::has_feature);
7461
ClassDB::bind_method(D_METHOD("limit_get", "limit"), &RenderingDevice::limit_get);
7462
ClassDB::bind_method(D_METHOD("get_frame_delay"), &RenderingDevice::get_frame_delay);
7463
ClassDB::bind_method(D_METHOD("submit"), &RenderingDevice::submit);
7464
ClassDB::bind_method(D_METHOD("sync"), &RenderingDevice::sync);
7465
7466
#ifndef DISABLE_DEPRECATED
7467
ClassDB::bind_method(D_METHOD("barrier", "from", "to"), &RenderingDevice::barrier, DEFVAL(BARRIER_MASK_ALL_BARRIERS), DEFVAL(BARRIER_MASK_ALL_BARRIERS));
7468
ClassDB::bind_method(D_METHOD("full_barrier"), &RenderingDevice::full_barrier);
7469
#endif
7470
7471
ClassDB::bind_method(D_METHOD("create_local_device"), &RenderingDevice::create_local_device);
7472
7473
ClassDB::bind_method(D_METHOD("set_resource_name", "id", "name"), &RenderingDevice::set_resource_name);
7474
7475
ClassDB::bind_method(D_METHOD("draw_command_begin_label", "name", "color"), &RenderingDevice::_draw_command_begin_label);
7476
#ifndef DISABLE_DEPRECATED
7477
ClassDB::bind_method(D_METHOD("draw_command_insert_label", "name", "color"), &RenderingDevice::draw_command_insert_label);
7478
#endif
7479
ClassDB::bind_method(D_METHOD("draw_command_end_label"), &RenderingDevice::draw_command_end_label);
7480
7481
ClassDB::bind_method(D_METHOD("get_device_vendor_name"), &RenderingDevice::get_device_vendor_name);
7482
ClassDB::bind_method(D_METHOD("get_device_name"), &RenderingDevice::get_device_name);
7483
ClassDB::bind_method(D_METHOD("get_device_pipeline_cache_uuid"), &RenderingDevice::get_device_pipeline_cache_uuid);
7484
7485
ClassDB::bind_method(D_METHOD("get_memory_usage", "type"), &RenderingDevice::get_memory_usage);
7486
7487
ClassDB::bind_method(D_METHOD("get_driver_resource", "resource", "rid", "index"), &RenderingDevice::get_driver_resource);
7488
7489
ClassDB::bind_method(D_METHOD("get_perf_report"), &RenderingDevice::get_perf_report);
7490
7491
ClassDB::bind_method(D_METHOD("get_driver_and_device_memory_report"), &RenderingDevice::get_driver_and_device_memory_report);
7492
ClassDB::bind_method(D_METHOD("get_tracked_object_name", "type_index"), &RenderingDevice::get_tracked_object_name);
7493
ClassDB::bind_method(D_METHOD("get_tracked_object_type_count"), &RenderingDevice::get_tracked_object_type_count);
7494
ClassDB::bind_method(D_METHOD("get_driver_total_memory"), &RenderingDevice::get_driver_total_memory);
7495
ClassDB::bind_method(D_METHOD("get_driver_allocation_count"), &RenderingDevice::get_driver_allocation_count);
7496
ClassDB::bind_method(D_METHOD("get_driver_memory_by_object_type", "type"), &RenderingDevice::get_driver_memory_by_object_type);
7497
ClassDB::bind_method(D_METHOD("get_driver_allocs_by_object_type", "type"), &RenderingDevice::get_driver_allocs_by_object_type);
7498
ClassDB::bind_method(D_METHOD("get_device_total_memory"), &RenderingDevice::get_device_total_memory);
7499
ClassDB::bind_method(D_METHOD("get_device_allocation_count"), &RenderingDevice::get_device_allocation_count);
7500
ClassDB::bind_method(D_METHOD("get_device_memory_by_object_type", "type"), &RenderingDevice::get_device_memory_by_object_type);
7501
ClassDB::bind_method(D_METHOD("get_device_allocs_by_object_type", "type"), &RenderingDevice::get_device_allocs_by_object_type);
7502
7503
BIND_ENUM_CONSTANT(DEVICE_TYPE_OTHER);
7504
BIND_ENUM_CONSTANT(DEVICE_TYPE_INTEGRATED_GPU);
7505
BIND_ENUM_CONSTANT(DEVICE_TYPE_DISCRETE_GPU);
7506
BIND_ENUM_CONSTANT(DEVICE_TYPE_VIRTUAL_GPU);
7507
BIND_ENUM_CONSTANT(DEVICE_TYPE_CPU);
7508
BIND_ENUM_CONSTANT(DEVICE_TYPE_MAX);
7509
7510
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_LOGICAL_DEVICE);
7511
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_PHYSICAL_DEVICE);
7512
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TOPMOST_OBJECT);
7513
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_COMMAND_QUEUE);
7514
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_QUEUE_FAMILY);
7515
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TEXTURE);
7516
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TEXTURE_VIEW);
7517
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TEXTURE_DATA_FORMAT);
7518
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_SAMPLER);
7519
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_UNIFORM_SET);
7520
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_BUFFER);
7521
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_COMPUTE_PIPELINE);
7522
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_RENDER_PIPELINE);
7523
#ifndef DISABLE_DEPRECATED
7524
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_DEVICE);
7525
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_PHYSICAL_DEVICE);
7526
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_INSTANCE);
7527
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_QUEUE);
7528
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_QUEUE_FAMILY_INDEX);
7529
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_IMAGE);
7530
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_IMAGE_VIEW);
7531
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_IMAGE_NATIVE_TEXTURE_FORMAT);
7532
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_SAMPLER);
7533
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_DESCRIPTOR_SET);
7534
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_BUFFER);
7535
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_COMPUTE_PIPELINE);
7536
BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_RENDER_PIPELINE);
7537
#endif
7538
7539
BIND_ENUM_CONSTANT(DATA_FORMAT_R4G4_UNORM_PACK8);
7540
BIND_ENUM_CONSTANT(DATA_FORMAT_R4G4B4A4_UNORM_PACK16);
7541
BIND_ENUM_CONSTANT(DATA_FORMAT_B4G4R4A4_UNORM_PACK16);
7542
BIND_ENUM_CONSTANT(DATA_FORMAT_R5G6B5_UNORM_PACK16);
7543
BIND_ENUM_CONSTANT(DATA_FORMAT_B5G6R5_UNORM_PACK16);
7544
BIND_ENUM_CONSTANT(DATA_FORMAT_R5G5B5A1_UNORM_PACK16);
7545
BIND_ENUM_CONSTANT(DATA_FORMAT_B5G5R5A1_UNORM_PACK16);
7546
BIND_ENUM_CONSTANT(DATA_FORMAT_A1R5G5B5_UNORM_PACK16);
7547
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_UNORM);
7548
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SNORM);
7549
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_USCALED);
7550
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SSCALED);
7551
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_UINT);
7552
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SINT);
7553
BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SRGB);
7554
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_UNORM);
7555
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SNORM);
7556
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_USCALED);
7557
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SSCALED);
7558
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_UINT);
7559
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SINT);
7560
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SRGB);
7561
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_UNORM);
7562
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SNORM);
7563
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_USCALED);
7564
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SSCALED);
7565
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_UINT);
7566
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SINT);
7567
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SRGB);
7568
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_UNORM);
7569
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SNORM);
7570
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_USCALED);
7571
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SSCALED);
7572
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_UINT);
7573
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SINT);
7574
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SRGB);
7575
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_UNORM);
7576
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SNORM);
7577
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_USCALED);
7578
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SSCALED);
7579
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_UINT);
7580
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SINT);
7581
BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SRGB);
7582
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_UNORM);
7583
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SNORM);
7584
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_USCALED);
7585
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SSCALED);
7586
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_UINT);
7587
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SINT);
7588
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SRGB);
7589
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_UNORM_PACK32);
7590
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SNORM_PACK32);
7591
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_USCALED_PACK32);
7592
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SSCALED_PACK32);
7593
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_UINT_PACK32);
7594
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SINT_PACK32);
7595
BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SRGB_PACK32);
7596
BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_UNORM_PACK32);
7597
BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_SNORM_PACK32);
7598
BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_USCALED_PACK32);
7599
BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_SSCALED_PACK32);
7600
BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_UINT_PACK32);
7601
BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_SINT_PACK32);
7602
BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_UNORM_PACK32);
7603
BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_SNORM_PACK32);
7604
BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_USCALED_PACK32);
7605
BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_SSCALED_PACK32);
7606
BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_UINT_PACK32);
7607
BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_SINT_PACK32);
7608
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_UNORM);
7609
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SNORM);
7610
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_USCALED);
7611
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SSCALED);
7612
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_UINT);
7613
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SINT);
7614
BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SFLOAT);
7615
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_UNORM);
7616
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SNORM);
7617
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_USCALED);
7618
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SSCALED);
7619
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_UINT);
7620
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SINT);
7621
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SFLOAT);
7622
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_UNORM);
7623
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SNORM);
7624
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_USCALED);
7625
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SSCALED);
7626
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_UINT);
7627
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SINT);
7628
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SFLOAT);
7629
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_UNORM);
7630
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SNORM);
7631
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_USCALED);
7632
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SSCALED);
7633
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_UINT);
7634
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SINT);
7635
BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SFLOAT);
7636
BIND_ENUM_CONSTANT(DATA_FORMAT_R32_UINT);
7637
BIND_ENUM_CONSTANT(DATA_FORMAT_R32_SINT);
7638
BIND_ENUM_CONSTANT(DATA_FORMAT_R32_SFLOAT);
7639
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32_UINT);
7640
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32_SINT);
7641
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32_SFLOAT);
7642
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32_UINT);
7643
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32_SINT);
7644
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32_SFLOAT);
7645
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32A32_UINT);
7646
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32A32_SINT);
7647
BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32A32_SFLOAT);
7648
BIND_ENUM_CONSTANT(DATA_FORMAT_R64_UINT);
7649
BIND_ENUM_CONSTANT(DATA_FORMAT_R64_SINT);
7650
BIND_ENUM_CONSTANT(DATA_FORMAT_R64_SFLOAT);
7651
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64_UINT);
7652
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64_SINT);
7653
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64_SFLOAT);
7654
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64_UINT);
7655
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64_SINT);
7656
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64_SFLOAT);
7657
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64A64_UINT);
7658
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64A64_SINT);
7659
BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64A64_SFLOAT);
7660
BIND_ENUM_CONSTANT(DATA_FORMAT_B10G11R11_UFLOAT_PACK32);
7661
BIND_ENUM_CONSTANT(DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32);
7662
BIND_ENUM_CONSTANT(DATA_FORMAT_D16_UNORM);
7663
BIND_ENUM_CONSTANT(DATA_FORMAT_X8_D24_UNORM_PACK32);
7664
BIND_ENUM_CONSTANT(DATA_FORMAT_D32_SFLOAT);
7665
BIND_ENUM_CONSTANT(DATA_FORMAT_S8_UINT);
7666
BIND_ENUM_CONSTANT(DATA_FORMAT_D16_UNORM_S8_UINT);
7667
BIND_ENUM_CONSTANT(DATA_FORMAT_D24_UNORM_S8_UINT);
7668
BIND_ENUM_CONSTANT(DATA_FORMAT_D32_SFLOAT_S8_UINT);
7669
BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGB_UNORM_BLOCK);
7670
BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGB_SRGB_BLOCK);
7671
BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGBA_UNORM_BLOCK);
7672
BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGBA_SRGB_BLOCK);
7673
BIND_ENUM_CONSTANT(DATA_FORMAT_BC2_UNORM_BLOCK);
7674
BIND_ENUM_CONSTANT(DATA_FORMAT_BC2_SRGB_BLOCK);
7675
BIND_ENUM_CONSTANT(DATA_FORMAT_BC3_UNORM_BLOCK);
7676
BIND_ENUM_CONSTANT(DATA_FORMAT_BC3_SRGB_BLOCK);
7677
BIND_ENUM_CONSTANT(DATA_FORMAT_BC4_UNORM_BLOCK);
7678
BIND_ENUM_CONSTANT(DATA_FORMAT_BC4_SNORM_BLOCK);
7679
BIND_ENUM_CONSTANT(DATA_FORMAT_BC5_UNORM_BLOCK);
7680
BIND_ENUM_CONSTANT(DATA_FORMAT_BC5_SNORM_BLOCK);
7681
BIND_ENUM_CONSTANT(DATA_FORMAT_BC6H_UFLOAT_BLOCK);
7682
BIND_ENUM_CONSTANT(DATA_FORMAT_BC6H_SFLOAT_BLOCK);
7683
BIND_ENUM_CONSTANT(DATA_FORMAT_BC7_UNORM_BLOCK);
7684
BIND_ENUM_CONSTANT(DATA_FORMAT_BC7_SRGB_BLOCK);
7685
BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK);
7686
BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK);
7687
BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK);
7688
BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK);
7689
BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK);
7690
BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK);
7691
BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11_UNORM_BLOCK);
7692
BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11_SNORM_BLOCK);
7693
BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11G11_UNORM_BLOCK);
7694
BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11G11_SNORM_BLOCK);
7695
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_4x4_UNORM_BLOCK);
7696
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_4x4_SRGB_BLOCK);
7697
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x4_UNORM_BLOCK);
7698
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x4_SRGB_BLOCK);
7699
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x5_UNORM_BLOCK);
7700
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x5_SRGB_BLOCK);
7701
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x5_UNORM_BLOCK);
7702
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x5_SRGB_BLOCK);
7703
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x6_UNORM_BLOCK);
7704
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x6_SRGB_BLOCK);
7705
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x5_UNORM_BLOCK);
7706
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x5_SRGB_BLOCK);
7707
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x6_UNORM_BLOCK);
7708
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x6_SRGB_BLOCK);
7709
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x8_UNORM_BLOCK);
7710
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x8_SRGB_BLOCK);
7711
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x5_UNORM_BLOCK);
7712
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x5_SRGB_BLOCK);
7713
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x6_UNORM_BLOCK);
7714
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x6_SRGB_BLOCK);
7715
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x8_UNORM_BLOCK);
7716
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x8_SRGB_BLOCK);
7717
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x10_UNORM_BLOCK);
7718
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x10_SRGB_BLOCK);
7719
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x10_UNORM_BLOCK);
7720
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x10_SRGB_BLOCK);
7721
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x12_UNORM_BLOCK);
7722
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x12_SRGB_BLOCK);
7723
BIND_ENUM_CONSTANT(DATA_FORMAT_G8B8G8R8_422_UNORM);
7724
BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8G8_422_UNORM);
7725
BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM);
7726
BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM);
7727
BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM);
7728
BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM);
7729
BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM);
7730
BIND_ENUM_CONSTANT(DATA_FORMAT_R10X6_UNORM_PACK16);
7731
BIND_ENUM_CONSTANT(DATA_FORMAT_R10X6G10X6_UNORM_2PACK16);
7732
BIND_ENUM_CONSTANT(DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16);
7733
BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16);
7734
BIND_ENUM_CONSTANT(DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16);
7735
BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16);
7736
BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16);
7737
BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16);
7738
BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16);
7739
BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16);
7740
BIND_ENUM_CONSTANT(DATA_FORMAT_R12X4_UNORM_PACK16);
7741
BIND_ENUM_CONSTANT(DATA_FORMAT_R12X4G12X4_UNORM_2PACK16);
7742
BIND_ENUM_CONSTANT(DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16);
7743
BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16);
7744
BIND_ENUM_CONSTANT(DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16);
7745
BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16);
7746
BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16);
7747
BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16);
7748
BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16);
7749
BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16);
7750
BIND_ENUM_CONSTANT(DATA_FORMAT_G16B16G16R16_422_UNORM);
7751
BIND_ENUM_CONSTANT(DATA_FORMAT_B16G16R16G16_422_UNORM);
7752
BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM);
7753
BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM);
7754
BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM);
7755
BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM);
7756
BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM);
7757
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_4x4_SFLOAT_BLOCK);
7758
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x4_SFLOAT_BLOCK);
7759
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x5_SFLOAT_BLOCK);
7760
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x5_SFLOAT_BLOCK);
7761
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x6_SFLOAT_BLOCK);
7762
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x5_SFLOAT_BLOCK);
7763
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x6_SFLOAT_BLOCK);
7764
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x8_SFLOAT_BLOCK);
7765
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x5_SFLOAT_BLOCK);
7766
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x6_SFLOAT_BLOCK);
7767
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x8_SFLOAT_BLOCK);
7768
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x10_SFLOAT_BLOCK);
7769
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x10_SFLOAT_BLOCK);
7770
BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x12_SFLOAT_BLOCK);
7771
BIND_ENUM_CONSTANT(DATA_FORMAT_MAX);
7772
7773
#ifndef DISABLE_DEPRECATED
7774
BIND_BITFIELD_FLAG(BARRIER_MASK_VERTEX);
7775
BIND_BITFIELD_FLAG(BARRIER_MASK_FRAGMENT);
7776
BIND_BITFIELD_FLAG(BARRIER_MASK_COMPUTE);
7777
BIND_BITFIELD_FLAG(BARRIER_MASK_TRANSFER);
7778
BIND_BITFIELD_FLAG(BARRIER_MASK_RASTER);
7779
BIND_BITFIELD_FLAG(BARRIER_MASK_ALL_BARRIERS);
7780
BIND_BITFIELD_FLAG(BARRIER_MASK_NO_BARRIER);
7781
#endif
7782
7783
BIND_ENUM_CONSTANT(TEXTURE_TYPE_1D);
7784
BIND_ENUM_CONSTANT(TEXTURE_TYPE_2D);
7785
BIND_ENUM_CONSTANT(TEXTURE_TYPE_3D);
7786
BIND_ENUM_CONSTANT(TEXTURE_TYPE_CUBE);
7787
BIND_ENUM_CONSTANT(TEXTURE_TYPE_1D_ARRAY);
7788
BIND_ENUM_CONSTANT(TEXTURE_TYPE_2D_ARRAY);
7789
BIND_ENUM_CONSTANT(TEXTURE_TYPE_CUBE_ARRAY);
7790
BIND_ENUM_CONSTANT(TEXTURE_TYPE_MAX);
7791
7792
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_1);
7793
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_2);
7794
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_4);
7795
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_8);
7796
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_16);
7797
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_32);
7798
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_64);
7799
BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_MAX);
7800
7801
BIND_BITFIELD_FLAG(TEXTURE_USAGE_SAMPLING_BIT);
7802
BIND_BITFIELD_FLAG(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT);
7803
BIND_BITFIELD_FLAG(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
7804
BIND_BITFIELD_FLAG(TEXTURE_USAGE_STORAGE_BIT);
7805
BIND_BITFIELD_FLAG(TEXTURE_USAGE_STORAGE_ATOMIC_BIT);
7806
BIND_BITFIELD_FLAG(TEXTURE_USAGE_CPU_READ_BIT);
7807
BIND_BITFIELD_FLAG(TEXTURE_USAGE_CAN_UPDATE_BIT);
7808
BIND_BITFIELD_FLAG(TEXTURE_USAGE_CAN_COPY_FROM_BIT);
7809
BIND_BITFIELD_FLAG(TEXTURE_USAGE_CAN_COPY_TO_BIT);
7810
BIND_BITFIELD_FLAG(TEXTURE_USAGE_INPUT_ATTACHMENT_BIT);
7811
7812
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_IDENTITY);
7813
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_ZERO);
7814
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_ONE);
7815
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_R);
7816
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_G);
7817
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_B);
7818
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_A);
7819
BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_MAX);
7820
7821
BIND_ENUM_CONSTANT(TEXTURE_SLICE_2D);
7822
BIND_ENUM_CONSTANT(TEXTURE_SLICE_CUBEMAP);
7823
BIND_ENUM_CONSTANT(TEXTURE_SLICE_3D);
7824
7825
BIND_ENUM_CONSTANT(SAMPLER_FILTER_NEAREST);
7826
BIND_ENUM_CONSTANT(SAMPLER_FILTER_LINEAR);
7827
BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_REPEAT);
7828
BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_MIRRORED_REPEAT);
7829
BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE);
7830
BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_CLAMP_TO_BORDER);
7831
BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_MIRROR_CLAMP_TO_EDGE);
7832
BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_MAX);
7833
7834
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK);
7835
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_INT_TRANSPARENT_BLACK);
7836
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_BLACK);
7837
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_INT_OPAQUE_BLACK);
7838
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_WHITE);
7839
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_INT_OPAQUE_WHITE);
7840
BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_MAX);
7841
7842
BIND_ENUM_CONSTANT(VERTEX_FREQUENCY_VERTEX);
7843
BIND_ENUM_CONSTANT(VERTEX_FREQUENCY_INSTANCE);
7844
7845
BIND_ENUM_CONSTANT(INDEX_BUFFER_FORMAT_UINT16);
7846
BIND_ENUM_CONSTANT(INDEX_BUFFER_FORMAT_UINT32);
7847
7848
BIND_BITFIELD_FLAG(STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT);
7849
7850
BIND_BITFIELD_FLAG(BUFFER_CREATION_DEVICE_ADDRESS_BIT);
7851
BIND_BITFIELD_FLAG(BUFFER_CREATION_AS_STORAGE_BIT);
7852
7853
BIND_ENUM_CONSTANT(UNIFORM_TYPE_SAMPLER); //for sampling only (sampler GLSL type)
7854
BIND_ENUM_CONSTANT(UNIFORM_TYPE_SAMPLER_WITH_TEXTURE); // for sampling only); but includes a texture); (samplerXX GLSL type)); first a sampler then a texture
7855
BIND_ENUM_CONSTANT(UNIFORM_TYPE_TEXTURE); //only texture); (textureXX GLSL type)
7856
BIND_ENUM_CONSTANT(UNIFORM_TYPE_IMAGE); // storage image (imageXX GLSL type)); for compute mostly
7857
BIND_ENUM_CONSTANT(UNIFORM_TYPE_TEXTURE_BUFFER); // buffer texture (or TBO); textureBuffer type)
7858
BIND_ENUM_CONSTANT(UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER); // buffer texture with a sampler(or TBO); samplerBuffer type)
7859
BIND_ENUM_CONSTANT(UNIFORM_TYPE_IMAGE_BUFFER); //texel buffer); (imageBuffer type)); for compute mostly
7860
BIND_ENUM_CONSTANT(UNIFORM_TYPE_UNIFORM_BUFFER); //regular uniform buffer (or UBO).
7861
BIND_ENUM_CONSTANT(UNIFORM_TYPE_STORAGE_BUFFER); //storage buffer ("buffer" qualifier) like UBO); but supports storage); for compute mostly
7862
BIND_ENUM_CONSTANT(UNIFORM_TYPE_INPUT_ATTACHMENT); //used for sub-pass read/write); for mobile mostly
7863
BIND_ENUM_CONSTANT(UNIFORM_TYPE_MAX);
7864
7865
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_POINTS);
7866
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINES);
7867
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINES_WITH_ADJACENCY);
7868
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINESTRIPS);
7869
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINESTRIPS_WITH_ADJACENCY);
7870
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLES);
7871
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLES_WITH_ADJACENCY);
7872
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLE_STRIPS);
7873
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_AJACENCY);
7874
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX);
7875
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TESSELATION_PATCH);
7876
BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_MAX);
7877
7878
BIND_ENUM_CONSTANT(POLYGON_CULL_DISABLED);
7879
BIND_ENUM_CONSTANT(POLYGON_CULL_FRONT);
7880
BIND_ENUM_CONSTANT(POLYGON_CULL_BACK);
7881
7882
BIND_ENUM_CONSTANT(POLYGON_FRONT_FACE_CLOCKWISE);
7883
BIND_ENUM_CONSTANT(POLYGON_FRONT_FACE_COUNTER_CLOCKWISE);
7884
7885
BIND_ENUM_CONSTANT(STENCIL_OP_KEEP);
7886
BIND_ENUM_CONSTANT(STENCIL_OP_ZERO);
7887
BIND_ENUM_CONSTANT(STENCIL_OP_REPLACE);
7888
BIND_ENUM_CONSTANT(STENCIL_OP_INCREMENT_AND_CLAMP);
7889
BIND_ENUM_CONSTANT(STENCIL_OP_DECREMENT_AND_CLAMP);
7890
BIND_ENUM_CONSTANT(STENCIL_OP_INVERT);
7891
BIND_ENUM_CONSTANT(STENCIL_OP_INCREMENT_AND_WRAP);
7892
BIND_ENUM_CONSTANT(STENCIL_OP_DECREMENT_AND_WRAP);
7893
BIND_ENUM_CONSTANT(STENCIL_OP_MAX); //not an actual operator); just the amount of operators :D
7894
7895
BIND_ENUM_CONSTANT(COMPARE_OP_NEVER);
7896
BIND_ENUM_CONSTANT(COMPARE_OP_LESS);
7897
BIND_ENUM_CONSTANT(COMPARE_OP_EQUAL);
7898
BIND_ENUM_CONSTANT(COMPARE_OP_LESS_OR_EQUAL);
7899
BIND_ENUM_CONSTANT(COMPARE_OP_GREATER);
7900
BIND_ENUM_CONSTANT(COMPARE_OP_NOT_EQUAL);
7901
BIND_ENUM_CONSTANT(COMPARE_OP_GREATER_OR_EQUAL);
7902
BIND_ENUM_CONSTANT(COMPARE_OP_ALWAYS);
7903
BIND_ENUM_CONSTANT(COMPARE_OP_MAX);
7904
7905
BIND_ENUM_CONSTANT(LOGIC_OP_CLEAR);
7906
BIND_ENUM_CONSTANT(LOGIC_OP_AND);
7907
BIND_ENUM_CONSTANT(LOGIC_OP_AND_REVERSE);
7908
BIND_ENUM_CONSTANT(LOGIC_OP_COPY);
7909
BIND_ENUM_CONSTANT(LOGIC_OP_AND_INVERTED);
7910
BIND_ENUM_CONSTANT(LOGIC_OP_NO_OP);
7911
BIND_ENUM_CONSTANT(LOGIC_OP_XOR);
7912
BIND_ENUM_CONSTANT(LOGIC_OP_OR);
7913
BIND_ENUM_CONSTANT(LOGIC_OP_NOR);
7914
BIND_ENUM_CONSTANT(LOGIC_OP_EQUIVALENT);
7915
BIND_ENUM_CONSTANT(LOGIC_OP_INVERT);
7916
BIND_ENUM_CONSTANT(LOGIC_OP_OR_REVERSE);
7917
BIND_ENUM_CONSTANT(LOGIC_OP_COPY_INVERTED);
7918
BIND_ENUM_CONSTANT(LOGIC_OP_OR_INVERTED);
7919
BIND_ENUM_CONSTANT(LOGIC_OP_NAND);
7920
BIND_ENUM_CONSTANT(LOGIC_OP_SET);
7921
BIND_ENUM_CONSTANT(LOGIC_OP_MAX); //not an actual operator); just the amount of operators :D
7922
7923
BIND_ENUM_CONSTANT(BLEND_FACTOR_ZERO);
7924
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE);
7925
BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC_COLOR);
7926
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC_COLOR);
7927
BIND_ENUM_CONSTANT(BLEND_FACTOR_DST_COLOR);
7928
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_DST_COLOR);
7929
BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC_ALPHA);
7930
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC_ALPHA);
7931
BIND_ENUM_CONSTANT(BLEND_FACTOR_DST_ALPHA);
7932
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_DST_ALPHA);
7933
BIND_ENUM_CONSTANT(BLEND_FACTOR_CONSTANT_COLOR);
7934
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR);
7935
BIND_ENUM_CONSTANT(BLEND_FACTOR_CONSTANT_ALPHA);
7936
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA);
7937
BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC_ALPHA_SATURATE);
7938
BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC1_COLOR);
7939
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC1_COLOR);
7940
BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC1_ALPHA);
7941
BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA);
7942
BIND_ENUM_CONSTANT(BLEND_FACTOR_MAX);
7943
7944
BIND_ENUM_CONSTANT(BLEND_OP_ADD);
7945
BIND_ENUM_CONSTANT(BLEND_OP_SUBTRACT);
7946
BIND_ENUM_CONSTANT(BLEND_OP_REVERSE_SUBTRACT);
7947
BIND_ENUM_CONSTANT(BLEND_OP_MINIMUM);
7948
BIND_ENUM_CONSTANT(BLEND_OP_MAXIMUM);
7949
BIND_ENUM_CONSTANT(BLEND_OP_MAX);
7950
7951
BIND_BITFIELD_FLAG(DYNAMIC_STATE_LINE_WIDTH);
7952
BIND_BITFIELD_FLAG(DYNAMIC_STATE_DEPTH_BIAS);
7953
BIND_BITFIELD_FLAG(DYNAMIC_STATE_BLEND_CONSTANTS);
7954
BIND_BITFIELD_FLAG(DYNAMIC_STATE_DEPTH_BOUNDS);
7955
BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_COMPARE_MASK);
7956
BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_WRITE_MASK);
7957
BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_REFERENCE);
7958
7959
#ifndef DISABLE_DEPRECATED
7960
BIND_ENUM_CONSTANT(INITIAL_ACTION_LOAD);
7961
BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR);
7962
BIND_ENUM_CONSTANT(INITIAL_ACTION_DISCARD);
7963
BIND_ENUM_CONSTANT(INITIAL_ACTION_MAX);
7964
BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION);
7965
BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION_CONTINUE);
7966
BIND_ENUM_CONSTANT(INITIAL_ACTION_KEEP);
7967
BIND_ENUM_CONSTANT(INITIAL_ACTION_DROP);
7968
BIND_ENUM_CONSTANT(INITIAL_ACTION_CONTINUE);
7969
7970
BIND_ENUM_CONSTANT(FINAL_ACTION_STORE);
7971
BIND_ENUM_CONSTANT(FINAL_ACTION_DISCARD);
7972
BIND_ENUM_CONSTANT(FINAL_ACTION_MAX);
7973
BIND_ENUM_CONSTANT(FINAL_ACTION_READ);
7974
BIND_ENUM_CONSTANT(FINAL_ACTION_CONTINUE);
7975
#endif
7976
7977
BIND_ENUM_CONSTANT(SHADER_STAGE_VERTEX);
7978
BIND_ENUM_CONSTANT(SHADER_STAGE_FRAGMENT);
7979
BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_CONTROL);
7980
BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_EVALUATION);
7981
BIND_ENUM_CONSTANT(SHADER_STAGE_COMPUTE);
7982
BIND_ENUM_CONSTANT(SHADER_STAGE_MAX);
7983
BIND_ENUM_CONSTANT(SHADER_STAGE_VERTEX_BIT);
7984
BIND_ENUM_CONSTANT(SHADER_STAGE_FRAGMENT_BIT);
7985
BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_CONTROL_BIT);
7986
BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_EVALUATION_BIT);
7987
BIND_ENUM_CONSTANT(SHADER_STAGE_COMPUTE_BIT);
7988
7989
BIND_ENUM_CONSTANT(SHADER_LANGUAGE_GLSL);
7990
BIND_ENUM_CONSTANT(SHADER_LANGUAGE_HLSL);
7991
7992
BIND_ENUM_CONSTANT(PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL);
7993
BIND_ENUM_CONSTANT(PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT);
7994
BIND_ENUM_CONSTANT(PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT);
7995
7996
BIND_ENUM_CONSTANT(SUPPORTS_METALFX_SPATIAL);
7997
BIND_ENUM_CONSTANT(SUPPORTS_METALFX_TEMPORAL);
7998
BIND_ENUM_CONSTANT(SUPPORTS_BUFFER_DEVICE_ADDRESS);
7999
BIND_ENUM_CONSTANT(SUPPORTS_IMAGE_ATOMIC_32_BIT);
8000
8001
BIND_ENUM_CONSTANT(LIMIT_MAX_BOUND_UNIFORM_SETS);
8002
BIND_ENUM_CONSTANT(LIMIT_MAX_FRAMEBUFFER_COLOR_ATTACHMENTS);
8003
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURES_PER_UNIFORM_SET);
8004
BIND_ENUM_CONSTANT(LIMIT_MAX_SAMPLERS_PER_UNIFORM_SET);
8005
BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_BUFFERS_PER_UNIFORM_SET);
8006
BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_IMAGES_PER_UNIFORM_SET);
8007
BIND_ENUM_CONSTANT(LIMIT_MAX_UNIFORM_BUFFERS_PER_UNIFORM_SET);
8008
BIND_ENUM_CONSTANT(LIMIT_MAX_DRAW_INDEXED_INDEX);
8009
BIND_ENUM_CONSTANT(LIMIT_MAX_FRAMEBUFFER_HEIGHT);
8010
BIND_ENUM_CONSTANT(LIMIT_MAX_FRAMEBUFFER_WIDTH);
8011
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_ARRAY_LAYERS);
8012
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_1D);
8013
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_2D);
8014
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_3D);
8015
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_CUBE);
8016
BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURES_PER_SHADER_STAGE);
8017
BIND_ENUM_CONSTANT(LIMIT_MAX_SAMPLERS_PER_SHADER_STAGE);
8018
BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_BUFFERS_PER_SHADER_STAGE);
8019
BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_IMAGES_PER_SHADER_STAGE);
8020
BIND_ENUM_CONSTANT(LIMIT_MAX_UNIFORM_BUFFERS_PER_SHADER_STAGE);
8021
BIND_ENUM_CONSTANT(LIMIT_MAX_PUSH_CONSTANT_SIZE);
8022
BIND_ENUM_CONSTANT(LIMIT_MAX_UNIFORM_BUFFER_SIZE);
8023
BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_ATTRIBUTE_OFFSET);
8024
BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_ATTRIBUTES);
8025
BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_BINDINGS);
8026
BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_BINDING_STRIDE);
8027
BIND_ENUM_CONSTANT(LIMIT_MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
8028
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_SHARED_MEMORY_SIZE);
8029
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X);
8030
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y);
8031
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z);
8032
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_INVOCATIONS);
8033
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_X);
8034
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Y);
8035
BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Z);
8036
BIND_ENUM_CONSTANT(LIMIT_MAX_VIEWPORT_DIMENSIONS_X);
8037
BIND_ENUM_CONSTANT(LIMIT_MAX_VIEWPORT_DIMENSIONS_Y);
8038
BIND_ENUM_CONSTANT(LIMIT_METALFX_TEMPORAL_SCALER_MIN_SCALE);
8039
BIND_ENUM_CONSTANT(LIMIT_METALFX_TEMPORAL_SCALER_MAX_SCALE);
8040
8041
BIND_ENUM_CONSTANT(MEMORY_TEXTURES);
8042
BIND_ENUM_CONSTANT(MEMORY_BUFFERS);
8043
BIND_ENUM_CONSTANT(MEMORY_TOTAL);
8044
8045
BIND_CONSTANT(INVALID_ID);
8046
BIND_CONSTANT(INVALID_FORMAT_ID);
8047
8048
BIND_ENUM_CONSTANT(NONE);
8049
BIND_ENUM_CONSTANT(REFLECTION_PROBES);
8050
BIND_ENUM_CONSTANT(SKY_PASS);
8051
BIND_ENUM_CONSTANT(LIGHTMAPPER_PASS);
8052
BIND_ENUM_CONSTANT(SHADOW_PASS_DIRECTIONAL);
8053
BIND_ENUM_CONSTANT(SHADOW_PASS_CUBE);
8054
BIND_ENUM_CONSTANT(OPAQUE_PASS);
8055
BIND_ENUM_CONSTANT(ALPHA_PASS);
8056
BIND_ENUM_CONSTANT(TRANSPARENT_PASS);
8057
BIND_ENUM_CONSTANT(POST_PROCESSING_PASS);
8058
BIND_ENUM_CONSTANT(BLIT_PASS);
8059
BIND_ENUM_CONSTANT(UI_PASS);
8060
BIND_ENUM_CONSTANT(DEBUG_PASS);
8061
8062
BIND_BITFIELD_FLAG(DRAW_DEFAULT_ALL);
8063
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_0);
8064
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_1);
8065
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_2);
8066
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_3);
8067
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_4);
8068
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_5);
8069
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_6);
8070
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_7);
8071
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_MASK);
8072
BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_ALL);
8073
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_0);
8074
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_1);
8075
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_2);
8076
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_3);
8077
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_4);
8078
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_5);
8079
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_6);
8080
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_7);
8081
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_MASK);
8082
BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_ALL);
8083
BIND_BITFIELD_FLAG(DRAW_CLEAR_DEPTH);
8084
BIND_BITFIELD_FLAG(DRAW_IGNORE_DEPTH);
8085
BIND_BITFIELD_FLAG(DRAW_CLEAR_STENCIL);
8086
BIND_BITFIELD_FLAG(DRAW_IGNORE_STENCIL);
8087
BIND_BITFIELD_FLAG(DRAW_CLEAR_ALL);
8088
BIND_BITFIELD_FLAG(DRAW_IGNORE_ALL);
8089
}
8090
8091
void RenderingDevice::make_current() {
8092
render_thread_id = Thread::get_caller_id();
8093
}
8094
8095
RenderingDevice::~RenderingDevice() {
8096
finalize();
8097
8098
if (singleton == this) {
8099
singleton = nullptr;
8100
}
8101
}
8102
8103
RenderingDevice::RenderingDevice() {
8104
if (singleton == nullptr) {
8105
singleton = this;
8106
}
8107
8108
render_thread_id = Thread::get_caller_id();
8109
}
8110
8111
/*****************/
8112
/**** BINDERS ****/
8113
/*****************/
8114
8115
RID RenderingDevice::_texture_create(const Ref<RDTextureFormat> &p_format, const Ref<RDTextureView> &p_view, const TypedArray<PackedByteArray> &p_data) {
8116
ERR_FAIL_COND_V(p_format.is_null(), RID());
8117
ERR_FAIL_COND_V(p_view.is_null(), RID());
8118
Vector<Vector<uint8_t>> data;
8119
for (int i = 0; i < p_data.size(); i++) {
8120
Vector<uint8_t> byte_slice = p_data[i];
8121
ERR_FAIL_COND_V(byte_slice.is_empty(), RID());
8122
data.push_back(byte_slice);
8123
}
8124
return texture_create(p_format->base, p_view->base, data);
8125
}
8126
8127
RID RenderingDevice::_texture_create_shared(const Ref<RDTextureView> &p_view, RID p_with_texture) {
8128
ERR_FAIL_COND_V(p_view.is_null(), RID());
8129
8130
return texture_create_shared(p_view->base, p_with_texture);
8131
}
8132
8133
RID RenderingDevice::_texture_create_shared_from_slice(const Ref<RDTextureView> &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type) {
8134
ERR_FAIL_COND_V(p_view.is_null(), RID());
8135
8136
return texture_create_shared_from_slice(p_view->base, p_with_texture, p_layer, p_mipmap, p_mipmaps, p_slice_type);
8137
}
8138
8139
Ref<RDTextureFormat> RenderingDevice::_texture_get_format(RID p_rd_texture) {
8140
Ref<RDTextureFormat> rtf;
8141
rtf.instantiate();
8142
rtf->base = texture_get_format(p_rd_texture);
8143
8144
return rtf;
8145
}
8146
8147
RenderingDevice::FramebufferFormatID RenderingDevice::_framebuffer_format_create(const TypedArray<RDAttachmentFormat> &p_attachments, uint32_t p_view_count) {
8148
Vector<AttachmentFormat> attachments;
8149
attachments.resize(p_attachments.size());
8150
8151
for (int i = 0; i < p_attachments.size(); i++) {
8152
Ref<RDAttachmentFormat> af = p_attachments[i];
8153
ERR_FAIL_COND_V(af.is_null(), INVALID_FORMAT_ID);
8154
attachments.write[i] = af->base;
8155
}
8156
return framebuffer_format_create(attachments, p_view_count);
8157
}
8158
8159
RenderingDevice::FramebufferFormatID RenderingDevice::_framebuffer_format_create_multipass(const TypedArray<RDAttachmentFormat> &p_attachments, const TypedArray<RDFramebufferPass> &p_passes, uint32_t p_view_count) {
8160
Vector<AttachmentFormat> attachments;
8161
attachments.resize(p_attachments.size());
8162
8163
for (int i = 0; i < p_attachments.size(); i++) {
8164
Ref<RDAttachmentFormat> af = p_attachments[i];
8165
ERR_FAIL_COND_V(af.is_null(), INVALID_FORMAT_ID);
8166
attachments.write[i] = af->base;
8167
}
8168
8169
Vector<FramebufferPass> passes;
8170
for (int i = 0; i < p_passes.size(); i++) {
8171
Ref<RDFramebufferPass> pass = p_passes[i];
8172
ERR_CONTINUE(pass.is_null());
8173
passes.push_back(pass->base);
8174
}
8175
8176
return framebuffer_format_create_multipass(attachments, passes, p_view_count);
8177
}
8178
8179
RID RenderingDevice::_framebuffer_create(const TypedArray<RID> &p_textures, FramebufferFormatID p_format_check, uint32_t p_view_count) {
8180
Vector<RID> textures = Variant(p_textures);
8181
return framebuffer_create(textures, p_format_check, p_view_count);
8182
}
8183
8184
RID RenderingDevice::_framebuffer_create_multipass(const TypedArray<RID> &p_textures, const TypedArray<RDFramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) {
8185
Vector<RID> textures = Variant(p_textures);
8186
Vector<FramebufferPass> passes;
8187
for (int i = 0; i < p_passes.size(); i++) {
8188
Ref<RDFramebufferPass> pass = p_passes[i];
8189
ERR_CONTINUE(pass.is_null());
8190
passes.push_back(pass->base);
8191
}
8192
return framebuffer_create_multipass(textures, passes, p_format_check, p_view_count);
8193
}
8194
8195
RID RenderingDevice::_sampler_create(const Ref<RDSamplerState> &p_state) {
8196
ERR_FAIL_COND_V(p_state.is_null(), RID());
8197
8198
return sampler_create(p_state->base);
8199
}
8200
8201
RenderingDevice::VertexFormatID RenderingDevice::_vertex_format_create(const TypedArray<RDVertexAttribute> &p_vertex_formats) {
8202
Vector<VertexAttribute> descriptions;
8203
descriptions.resize(p_vertex_formats.size());
8204
8205
for (int i = 0; i < p_vertex_formats.size(); i++) {
8206
Ref<RDVertexAttribute> af = p_vertex_formats[i];
8207
ERR_FAIL_COND_V(af.is_null(), INVALID_FORMAT_ID);
8208
descriptions.write[i] = af->base;
8209
}
8210
return vertex_format_create(descriptions);
8211
}
8212
8213
RID RenderingDevice::_vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const TypedArray<RID> &p_src_buffers, const Vector<int64_t> &p_offsets) {
8214
Vector<RID> buffers = Variant(p_src_buffers);
8215
8216
Vector<uint64_t> offsets;
8217
offsets.resize(p_offsets.size());
8218
for (int i = 0; i < p_offsets.size(); i++) {
8219
offsets.write[i] = p_offsets[i];
8220
}
8221
8222
return vertex_array_create(p_vertex_count, p_vertex_format, buffers, offsets);
8223
}
8224
8225
Ref<RDShaderSPIRV> RenderingDevice::_shader_compile_spirv_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache) {
8226
ERR_FAIL_COND_V(p_source.is_null(), Ref<RDShaderSPIRV>());
8227
8228
Ref<RDShaderSPIRV> bytecode;
8229
bytecode.instantiate();
8230
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
8231
String error;
8232
8233
ShaderStage stage = ShaderStage(i);
8234
String source = p_source->get_stage_source(stage);
8235
8236
if (!source.is_empty()) {
8237
Vector<uint8_t> spirv = shader_compile_spirv_from_source(stage, source, p_source->get_language(), &error, p_allow_cache);
8238
bytecode->set_stage_bytecode(stage, spirv);
8239
bytecode->set_stage_compile_error(stage, error);
8240
}
8241
}
8242
return bytecode;
8243
}
8244
8245
Vector<uint8_t> RenderingDevice::_shader_compile_binary_from_spirv(const Ref<RDShaderSPIRV> &p_spirv, const String &p_shader_name) {
8246
ERR_FAIL_COND_V(p_spirv.is_null(), Vector<uint8_t>());
8247
8248
Vector<ShaderStageSPIRVData> stage_data;
8249
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
8250
ShaderStage stage = ShaderStage(i);
8251
ShaderStageSPIRVData sd;
8252
sd.shader_stage = stage;
8253
String error = p_spirv->get_stage_compile_error(stage);
8254
ERR_FAIL_COND_V_MSG(!error.is_empty(), Vector<uint8_t>(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
8255
sd.spirv = p_spirv->get_stage_bytecode(stage);
8256
if (sd.spirv.is_empty()) {
8257
continue;
8258
}
8259
stage_data.push_back(sd);
8260
}
8261
8262
return shader_compile_binary_from_spirv(stage_data, p_shader_name);
8263
}
8264
8265
RID RenderingDevice::_shader_create_from_spirv(const Ref<RDShaderSPIRV> &p_spirv, const String &p_shader_name) {
8266
ERR_FAIL_COND_V(p_spirv.is_null(), RID());
8267
8268
Vector<ShaderStageSPIRVData> stage_data;
8269
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
8270
ShaderStage stage = ShaderStage(i);
8271
ShaderStageSPIRVData sd;
8272
sd.shader_stage = stage;
8273
String error = p_spirv->get_stage_compile_error(stage);
8274
ERR_FAIL_COND_V_MSG(!error.is_empty(), RID(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
8275
sd.spirv = p_spirv->get_stage_bytecode(stage);
8276
if (sd.spirv.is_empty()) {
8277
continue;
8278
}
8279
stage_data.push_back(sd);
8280
}
8281
return shader_create_from_spirv(stage_data);
8282
}
8283
8284
RID RenderingDevice::_uniform_set_create(const TypedArray<RDUniform> &p_uniforms, RID p_shader, uint32_t p_shader_set) {
8285
LocalVector<Uniform> uniforms;
8286
uniforms.resize(p_uniforms.size());
8287
for (int i = 0; i < p_uniforms.size(); i++) {
8288
Ref<RDUniform> uniform = p_uniforms[i];
8289
ERR_FAIL_COND_V(uniform.is_null(), RID());
8290
uniforms[i] = uniform->base;
8291
}
8292
return uniform_set_create(uniforms, p_shader, p_shader_set);
8293
}
8294
8295
Error RenderingDevice::_buffer_update_bind(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data) {
8296
return buffer_update(p_buffer, p_offset, p_size, p_data.ptr());
8297
}
8298
8299
static Vector<RenderingDevice::PipelineSpecializationConstant> _get_spec_constants(const TypedArray<RDPipelineSpecializationConstant> &p_constants) {
8300
Vector<RenderingDevice::PipelineSpecializationConstant> ret;
8301
ret.resize(p_constants.size());
8302
for (int i = 0; i < p_constants.size(); i++) {
8303
Ref<RDPipelineSpecializationConstant> c = p_constants[i];
8304
ERR_CONTINUE(c.is_null());
8305
RenderingDevice::PipelineSpecializationConstant &sc = ret.write[i];
8306
Variant value = c->get_value();
8307
switch (value.get_type()) {
8308
case Variant::BOOL: {
8309
sc.type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
8310
sc.bool_value = value;
8311
} break;
8312
case Variant::INT: {
8313
sc.type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
8314
sc.int_value = value;
8315
} break;
8316
case Variant::FLOAT: {
8317
sc.type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
8318
sc.float_value = value;
8319
} break;
8320
default: {
8321
}
8322
}
8323
8324
sc.constant_id = c->get_constant_id();
8325
}
8326
return ret;
8327
}
8328
8329
RID RenderingDevice::_render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const Ref<RDPipelineRasterizationState> &p_rasterization_state, const Ref<RDPipelineMultisampleState> &p_multisample_state, const Ref<RDPipelineDepthStencilState> &p_depth_stencil_state, const Ref<RDPipelineColorBlendState> &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants) {
8330
PipelineRasterizationState rasterization_state;
8331
if (p_rasterization_state.is_valid()) {
8332
rasterization_state = p_rasterization_state->base;
8333
}
8334
8335
PipelineMultisampleState multisample_state;
8336
if (p_multisample_state.is_valid()) {
8337
multisample_state = p_multisample_state->base;
8338
for (int i = 0; i < p_multisample_state->sample_masks.size(); i++) {
8339
int64_t mask = p_multisample_state->sample_masks[i];
8340
multisample_state.sample_mask.push_back(mask);
8341
}
8342
}
8343
8344
PipelineDepthStencilState depth_stencil_state;
8345
if (p_depth_stencil_state.is_valid()) {
8346
depth_stencil_state = p_depth_stencil_state->base;
8347
}
8348
8349
PipelineColorBlendState color_blend_state;
8350
if (p_blend_state.is_valid()) {
8351
color_blend_state = p_blend_state->base;
8352
for (int i = 0; i < p_blend_state->attachments.size(); i++) {
8353
Ref<RDPipelineColorBlendStateAttachment> attachment = p_blend_state->attachments[i];
8354
if (attachment.is_valid()) {
8355
color_blend_state.attachments.push_back(attachment->base);
8356
}
8357
}
8358
}
8359
8360
return render_pipeline_create(p_shader, p_framebuffer_format, p_vertex_format, p_render_primitive, rasterization_state, multisample_state, depth_stencil_state, color_blend_state, p_dynamic_state_flags, p_for_render_pass, _get_spec_constants(p_specialization_constants));
8361
}
8362
8363
RID RenderingDevice::_compute_pipeline_create(RID p_shader, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants = TypedArray<RDPipelineSpecializationConstant>()) {
8364
return compute_pipeline_create(p_shader, _get_spec_constants(p_specialization_constants));
8365
}
8366
8367
#ifndef DISABLE_DEPRECATED
8368
Vector<int64_t> RenderingDevice::_draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const TypedArray<RID> &p_storage_textures) {
8369
ERR_FAIL_V_MSG(Vector<int64_t>(), "Deprecated. Split draw lists are used automatically by RenderingDevice.");
8370
}
8371
8372
Vector<int64_t> RenderingDevice::_draw_list_switch_to_next_pass_split(uint32_t p_splits) {
8373
ERR_FAIL_V_MSG(Vector<int64_t>(), "Deprecated. Split draw lists are used automatically by RenderingDevice.");
8374
}
8375
#endif
8376
8377
void RenderingDevice::_draw_list_set_push_constant(DrawListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size) {
8378
ERR_FAIL_COND(p_data_size > (uint32_t)p_data.size());
8379
draw_list_set_push_constant(p_list, p_data.ptr(), p_data_size);
8380
}
8381
8382
void RenderingDevice::_compute_list_set_push_constant(ComputeListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size) {
8383
ERR_FAIL_COND(p_data_size > (uint32_t)p_data.size());
8384
compute_list_set_push_constant(p_list, p_data.ptr(), p_data_size);
8385
}
8386
8387
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_NONE, RDG::RESOURCE_USAGE_NONE));
8388
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_COPY_FROM, RDG::RESOURCE_USAGE_COPY_FROM));
8389
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_COPY_TO, RDG::RESOURCE_USAGE_COPY_TO));
8390
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_RESOLVE_FROM, RDG::RESOURCE_USAGE_RESOLVE_FROM));
8391
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_RESOLVE_TO, RDG::RESOURCE_USAGE_RESOLVE_TO));
8392
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_UNIFORM_BUFFER_READ, RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ));
8393
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_INDIRECT_BUFFER_READ, RDG::RESOURCE_USAGE_INDIRECT_BUFFER_READ));
8394
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_TEXTURE_BUFFER_READ, RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ));
8395
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE, RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE));
8396
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_STORAGE_BUFFER_READ, RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ));
8397
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE, RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE));
8398
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_VERTEX_BUFFER_READ, RDG::RESOURCE_USAGE_VERTEX_BUFFER_READ));
8399
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_INDEX_BUFFER_READ, RDG::RESOURCE_USAGE_INDEX_BUFFER_READ));
8400
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_TEXTURE_SAMPLE, RDG::RESOURCE_USAGE_TEXTURE_SAMPLE));
8401
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_STORAGE_IMAGE_READ, RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ));
8402
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE, RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE));
8403
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE, RDG::RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE));
8404
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE, RDG::RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE));
8405
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_GENERAL, RDG::RESOURCE_USAGE_GENERAL));
8406
static_assert(ENUM_MEMBERS_EQUAL(RD::CALLBACK_RESOURCE_USAGE_MAX, RDG::RESOURCE_USAGE_MAX));
8407
8408