Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/servers/rendering/rendering_device.h
21212 views
1
/**************************************************************************/
2
/* rendering_device.h */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#pragma once
32
33
#include "core/object/worker_thread_pool.h"
34
#include "core/os/condition_variable.h"
35
#include "core/os/thread_safe.h"
36
#include "core/templates/local_vector.h"
37
#include "core/templates/rb_map.h"
38
#include "core/templates/rid_owner.h"
39
#include "core/variant/typed_array.h"
40
#include "servers/display/display_server.h"
41
#include "servers/rendering/rendering_device_commons.h"
42
#include "servers/rendering/rendering_device_driver.h"
43
#include "servers/rendering/rendering_device_graph.h"
44
45
class RDTextureFormat;
46
class RDTextureView;
47
class RDAttachmentFormat;
48
class RDSamplerState;
49
class RDVertexAttribute;
50
class RDShaderSource;
51
class RDShaderSPIRV;
52
class RDUniform;
53
class RDPipelineRasterizationState;
54
class RDPipelineMultisampleState;
55
class RDPipelineDepthStencilState;
56
class RDPipelineColorBlendState;
57
class RDFramebufferPass;
58
class RDPipelineSpecializationConstant;
59
60
class RenderingDevice : public RenderingDeviceCommons {
61
GDCLASS(RenderingDevice, Object)
62
63
_THREAD_SAFE_CLASS_
64
65
private:
66
Thread::ID render_thread_id;
67
68
public:
69
typedef int64_t DrawListID;
70
typedef int64_t ComputeListID;
71
typedef int64_t RaytracingListID;
72
73
typedef void (*InvalidationCallback)(void *);
74
75
private:
76
static RenderingDevice *singleton;
77
78
RenderingContextDriver *context = nullptr;
79
RenderingDeviceDriver *driver = nullptr;
80
RenderingContextDriver::Device device;
81
82
bool local_device_processing = false;
83
bool is_main_instance = false;
84
85
protected:
86
static void _bind_methods();
87
88
#ifndef DISABLE_DEPRECATED
89
RID _shader_create_from_bytecode_bind_compat_79606(const Vector<uint8_t> &p_shader_binary);
90
RID _texture_create_from_extension_compat_105570(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_usage, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers);
91
static void _bind_compatibility_methods();
92
#endif
93
94
/***************************/
95
/**** ID INFRASTRUCTURE ****/
96
/***************************/
97
public:
98
//base numeric ID for all types
99
enum {
100
INVALID_FORMAT_ID = -1
101
};
102
103
enum IDType {
104
ID_TYPE_FRAMEBUFFER_FORMAT,
105
ID_TYPE_VERTEX_FORMAT,
106
ID_TYPE_DRAW_LIST,
107
ID_TYPE_COMPUTE_LIST = 4,
108
ID_TYPE_RAYTRACING_LIST = 5,
109
ID_TYPE_MAX,
110
ID_BASE_SHIFT = 58, // 5 bits for ID types.
111
ID_MASK = (ID_BASE_SHIFT - 1),
112
};
113
114
private:
115
HashMap<RID, HashSet<RID>> dependency_map; // IDs to IDs that depend on it.
116
HashMap<RID, HashSet<RID>> reverse_dependency_map; // Same as above, but in reverse.
117
118
void _add_dependency(RID p_id, RID p_depends_on);
119
void _free_dependencies(RID p_id);
120
121
private:
122
/***************************/
123
/**** BUFFER MANAGEMENT ****/
124
/***************************/
125
126
// These are temporary buffers on CPU memory that hold
127
// the information until the CPU fetches it and places it
128
// either on GPU buffers, or images (textures). It ensures
129
// updates are properly synchronized with whatever the
130
// GPU is doing.
131
//
132
// The logic here is as follows, only 3 of these
133
// blocks are created at the beginning (one per frame)
134
// they can each belong to a frame (assigned to current when
135
// used) and they can only be reused after the same frame is
136
// recycled.
137
//
138
// When CPU requires to allocate more than what is available,
139
// more of these buffers are created. If a limit is reached,
140
// then a fence will ensure will wait for blocks allocated
141
// in previous frames are processed. If that fails, then
142
// another fence will ensure everything pending for the current
143
// frame is processed (effectively stalling).
144
//
145
// See the comments in the code to understand better how it works.
146
147
enum StagingRequiredAction {
148
STAGING_REQUIRED_ACTION_NONE,
149
STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL,
150
STAGING_REQUIRED_ACTION_STALL_PREVIOUS,
151
};
152
153
struct StagingBufferBlock {
154
RDD::BufferID driver_id;
155
uint64_t frame_used = 0;
156
uint32_t fill_amount = 0;
157
uint8_t *data_ptr = nullptr;
158
};
159
160
struct StagingBuffers {
161
Vector<StagingBufferBlock> blocks;
162
int current = 0;
163
uint32_t block_size = 0;
164
uint64_t max_size = 0;
165
BitField<RDD::BufferUsageBits> usage_bits = {};
166
bool used = false;
167
};
168
169
Error _staging_buffer_allocate(StagingBuffers &p_staging_buffers, uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, StagingRequiredAction &r_required_action, bool p_can_segment = true);
170
void _staging_buffer_execute_required_action(StagingBuffers &p_staging_buffers, StagingRequiredAction p_required_action);
171
Error _insert_staging_block(StagingBuffers &p_staging_buffers);
172
173
StagingBuffers upload_staging_buffers;
174
StagingBuffers download_staging_buffers;
175
176
struct Buffer {
177
RDD::BufferID driver_id;
178
uint32_t size = 0;
179
BitField<RDD::BufferUsageBits> usage = {};
180
RDG::ResourceTracker *draw_tracker = nullptr;
181
int32_t transfer_worker_index = -1;
182
uint64_t transfer_worker_operation = 0;
183
};
184
185
Buffer *_get_buffer_from_owner(RID p_buffer);
186
Error _buffer_initialize(Buffer *p_buffer, Span<uint8_t> p_data, uint32_t p_required_align = 32);
187
188
void update_perf_report();
189
// Flag for batching descriptor sets.
190
bool descriptor_set_batching = true;
191
// When true, the final draw call that copies our offscreen result into the Swapchain is put into its
192
// own cmd buffer, so that the whole rendering can start early instead of having to wait for the
193
// swapchain semaphore to be signaled (which causes bubbles).
194
bool split_swapchain_into_its_own_cmd_buffer = true;
195
uint32_t gpu_copy_count = 0;
196
uint32_t direct_copy_count = 0;
197
uint32_t copy_bytes_count = 0;
198
uint32_t prev_gpu_copy_count = 0;
199
uint32_t prev_copy_bytes_count = 0;
200
201
RID_Owner<Buffer, true> uniform_buffer_owner;
202
RID_Owner<Buffer, true> storage_buffer_owner;
203
RID_Owner<Buffer, true> texture_buffer_owner;
204
205
struct BufferGetDataRequest {
206
uint32_t frame_local_index = 0;
207
uint32_t frame_local_count = 0;
208
Callable callback;
209
uint32_t size = 0;
210
};
211
212
public:
213
Error buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size);
214
/**
215
* @brief Updates the given GPU buffer at offset and size with the given CPU data.
216
* @remarks
217
* Buffer update is queued into the render graph. The render graph will reorder this operation so
218
* that it happens together with other buffer_update() in bulk and before rendering operations
219
* (or compute dispatches) that need it.
220
*
221
* This means that the following will not work as intended:
222
* @code
223
* buffer_update(buffer_a, ..., data_source_x, ...);
224
* draw_list_draw(buffer_a); // render data_render_x.
225
* buffer_update(buffer_a, ..., data_source_y, ...);
226
* draw_list_draw(buffer_a); // render data_source_y.
227
* @endcode
228
*
229
* Because it will be *reordered* to become the following:
230
* @code
231
* buffer_update(buffer_a, ..., data_source_x, ...);
232
* buffer_update(buffer_a, ..., data_source_y, ...);
233
* draw_list_draw(buffer_a); // render data_source_y. <-- Oops! should be data_source_x
234
* draw_list_draw(buffer_a); // render data_source_y.
235
* @endcode
236
*
237
* When p_skip_check = true, we will perform checks to prevent this situation from happening
238
* (buffer_update must not be called while creating a draw or compute list).
239
* Do NOT set it to false for user-facing public API because users had trouble understanding
240
* this problem when manually creating draw lists.
241
*
242
* Godot internally can set p_skip_check = true when it believes it will only update
243
* the buffer once and it needs to be done while a draw/compute list is being created.
244
*
245
* Important: The Vulkan & Metal APIs do not allow issuing copies while inside a RenderPass.
246
* We can do it because Godot's render graph will reorder them.
247
*
248
* @param p_buffer GPU buffer to update.
249
* @param p_offset Offset in bytes (relative to p_buffer).
250
* @param p_size Size in bytes of the data.
251
* @param p_data CPU data to transfer to GPU.
252
* Pointer can be deleted after buffer_update returns.
253
* @param p_skip_check Must always be false for user-facing public API. See remarks.
254
* @return Status result of the operation.
255
*/
256
Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, bool p_skip_check = false);
257
Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size);
258
Vector<uint8_t> buffer_get_data(RID p_buffer, uint32_t p_offset = 0, uint32_t p_size = 0); // This causes stall, only use to retrieve large buffers for saving.
259
Error buffer_get_data_async(RID p_buffer, const Callable &p_callback, uint32_t p_offset = 0, uint32_t p_size = 0);
260
uint64_t buffer_get_device_address(RID p_buffer);
261
uint8_t *buffer_persistent_map_advance(RID p_buffer);
262
void buffer_flush(RID p_buffer);
263
264
private:
265
/******************/
266
/**** CALLBACK ****/
267
/******************/
268
269
public:
270
enum CallbackResourceType {
271
CALLBACK_RESOURCE_TYPE_TEXTURE,
272
CALLBACK_RESOURCE_TYPE_BUFFER,
273
};
274
275
enum CallbackResourceUsage {
276
CALLBACK_RESOURCE_USAGE_NONE,
277
CALLBACK_RESOURCE_USAGE_COPY_FROM,
278
CALLBACK_RESOURCE_USAGE_COPY_TO,
279
CALLBACK_RESOURCE_USAGE_RESOLVE_FROM,
280
CALLBACK_RESOURCE_USAGE_RESOLVE_TO,
281
CALLBACK_RESOURCE_USAGE_UNIFORM_BUFFER_READ,
282
CALLBACK_RESOURCE_USAGE_INDIRECT_BUFFER_READ,
283
CALLBACK_RESOURCE_USAGE_TEXTURE_BUFFER_READ,
284
CALLBACK_RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE,
285
CALLBACK_RESOURCE_USAGE_STORAGE_BUFFER_READ,
286
CALLBACK_RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE,
287
CALLBACK_RESOURCE_USAGE_VERTEX_BUFFER_READ,
288
CALLBACK_RESOURCE_USAGE_INDEX_BUFFER_READ,
289
CALLBACK_RESOURCE_USAGE_TEXTURE_SAMPLE,
290
CALLBACK_RESOURCE_USAGE_STORAGE_IMAGE_READ,
291
CALLBACK_RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE,
292
CALLBACK_RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE,
293
CALLBACK_RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE,
294
CALLBACK_RESOURCE_USAGE_ATTACHMENT_FRAGMENT_SHADING_RATE_READ,
295
CALLBACK_RESOURCE_USAGE_ATTACHMENT_FRAGMENT_DENSITY_MAP_READ,
296
CALLBACK_RESOURCE_USAGE_GENERAL,
297
CALLBACK_RESOURCE_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT,
298
CALLBACK_RESOURCE_USAGE_ACCELERATION_STRUCTURE_READ,
299
CALLBACK_RESOURCE_USAGE_ACCELERATION_STRUCTURE_READ_WRITE,
300
CALLBACK_RESOURCE_USAGE_MAX
301
};
302
303
struct CallbackResource {
304
RID rid;
305
CallbackResourceType type = CALLBACK_RESOURCE_TYPE_TEXTURE;
306
CallbackResourceUsage usage = CALLBACK_RESOURCE_USAGE_NONE;
307
};
308
309
Error driver_callback_add(RDD::DriverCallback p_callback, void *p_userdata, VectorView<CallbackResource> p_resources);
310
311
/*****************/
312
/**** TEXTURE ****/
313
/*****************/
314
315
// In modern APIs, the concept of textures may not exist;
316
// instead there is the image (the memory pretty much,
317
// the view (how the memory is interpreted) and the
318
// sampler (how it's sampled from the shader).
319
//
320
// Texture here includes the first two stages, but
321
// It's possible to create textures sharing the image
322
// but with different views. The main use case for this
323
// is textures that can be read as both SRGB/Linear,
324
// or slices of a texture (a mipmap, a layer, a 3D slice)
325
// for a framebuffer to render into it.
326
327
struct Texture {
328
struct SharedFallback {
329
uint32_t revision = 1;
330
RDD::TextureID texture;
331
RDG::ResourceTracker *texture_tracker = nullptr;
332
RDD::BufferID buffer;
333
RDG::ResourceTracker *buffer_tracker = nullptr;
334
bool raw_reinterpretation = false;
335
};
336
337
RDD::TextureID driver_id;
338
339
TextureType type = TEXTURE_TYPE_MAX;
340
DataFormat format = DATA_FORMAT_MAX;
341
TextureSamples samples = TEXTURE_SAMPLES_MAX;
342
TextureSliceType slice_type = TEXTURE_SLICE_MAX;
343
Rect2i slice_rect;
344
uint32_t width = 0;
345
uint32_t height = 0;
346
uint32_t depth = 0;
347
uint32_t layers = 0;
348
uint32_t mipmaps = 0;
349
uint32_t usage_flags = 0;
350
uint32_t base_mipmap = 0;
351
uint32_t base_layer = 0;
352
353
Vector<DataFormat> allowed_shared_formats;
354
355
bool is_resolve_buffer = false;
356
bool is_discardable = false;
357
bool has_initial_data = false;
358
bool pending_clear = false;
359
360
BitField<RDD::TextureAspectBits> read_aspect_flags = {};
361
BitField<RDD::TextureAspectBits> barrier_aspect_flags = {};
362
bool bound = false; // Bound to framebuffer.
363
RID owner;
364
365
RDG::ResourceTracker *draw_tracker = nullptr;
366
HashMap<Rect2i, RDG::ResourceTracker *> *slice_trackers = nullptr;
367
SharedFallback *shared_fallback = nullptr;
368
int32_t transfer_worker_index = -1;
369
uint64_t transfer_worker_operation = 0;
370
371
RDD::TextureSubresourceRange barrier_range() const {
372
RDD::TextureSubresourceRange r;
373
r.aspect = barrier_aspect_flags;
374
r.base_mipmap = base_mipmap;
375
r.mipmap_count = mipmaps;
376
r.base_layer = base_layer;
377
r.layer_count = layers;
378
return r;
379
}
380
381
TextureFormat texture_format() const {
382
TextureFormat tf;
383
tf.format = format;
384
tf.width = width;
385
tf.height = height;
386
tf.depth = depth;
387
tf.array_layers = layers;
388
tf.mipmaps = mipmaps;
389
tf.texture_type = type;
390
tf.samples = samples;
391
tf.usage_bits = usage_flags;
392
tf.shareable_formats = allowed_shared_formats;
393
tf.is_resolve_buffer = is_resolve_buffer;
394
tf.is_discardable = is_discardable;
395
return tf;
396
}
397
};
398
399
RID_Owner<Texture, true> texture_owner;
400
uint32_t texture_upload_region_size_px = 0;
401
uint32_t texture_download_region_size_px = 0;
402
403
uint32_t _texture_layer_count(Texture *p_texture) const;
404
uint32_t _texture_alignment(Texture *p_texture) const;
405
Error _texture_initialize(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, RDD::TextureLayout p_dst_layout, bool p_immediate_flush);
406
void _texture_check_shared_fallback(Texture *p_texture);
407
void _texture_update_shared_fallback(RID p_texture_rid, Texture *p_texture, bool p_for_writing);
408
void _texture_free_shared_fallback(Texture *p_texture);
409
void _texture_copy_shared(RID p_src_texture_rid, Texture *p_src_texture, RID p_dst_texture_rid, Texture *p_dst_texture);
410
void _texture_create_reinterpret_buffer(Texture *p_texture);
411
void _texture_check_pending_clear(RID p_texture_rid, Texture *p_texture);
412
void _texture_clear_color(RID p_texture_rid, Texture *p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers);
413
void _texture_clear_depth_stencil(RID p_texture_rid, Texture *p_texture, float p_depth, uint8_t p_stencil, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers);
414
uint32_t _texture_vrs_method_to_usage_bits() const;
415
416
struct TextureGetDataRequest {
417
uint32_t frame_local_index = 0;
418
uint32_t frame_local_count = 0;
419
Callable callback;
420
uint32_t width = 0;
421
uint32_t height = 0;
422
uint32_t depth = 0;
423
uint32_t mipmaps = 0;
424
RDD::DataFormat format = RDD::DATA_FORMAT_MAX;
425
};
426
427
public:
428
struct TextureView {
429
DataFormat format_override = DATA_FORMAT_MAX; // // Means, use same as format.
430
TextureSwizzle swizzle_r = TEXTURE_SWIZZLE_R;
431
TextureSwizzle swizzle_g = TEXTURE_SWIZZLE_G;
432
TextureSwizzle swizzle_b = TEXTURE_SWIZZLE_B;
433
TextureSwizzle swizzle_a = TEXTURE_SWIZZLE_A;
434
435
bool operator==(const TextureView &p_other) const {
436
if (format_override != p_other.format_override) {
437
return false;
438
} else if (swizzle_r != p_other.swizzle_r) {
439
return false;
440
} else if (swizzle_g != p_other.swizzle_g) {
441
return false;
442
} else if (swizzle_b != p_other.swizzle_b) {
443
return false;
444
} else if (swizzle_a != p_other.swizzle_a) {
445
return false;
446
} else {
447
return true;
448
}
449
}
450
};
451
452
RID texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t>> &p_data = Vector<Vector<uint8_t>>());
453
RID texture_create_shared(const TextureView &p_view, RID p_with_texture);
454
RID texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_usage, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers, uint64_t p_mipmaps = 1);
455
RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D, uint32_t p_layers = 0);
456
Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data);
457
Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer); // CPU textures will return immediately, while GPU textures will most likely force a flush
458
Error texture_get_data_async(RID p_texture, uint32_t p_layer, const Callable &p_callback);
459
460
bool texture_is_format_supported_for_usage(DataFormat p_format, BitField<TextureUsageBits> p_usage) const;
461
bool texture_is_shared(RID p_texture);
462
bool texture_is_valid(RID p_texture);
463
TextureFormat texture_get_format(RID p_texture);
464
Size2i texture_size(RID p_texture);
465
#ifndef DISABLE_DEPRECATED
466
uint64_t texture_get_native_handle(RID p_texture);
467
#endif
468
469
Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer);
470
Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers);
471
Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture);
472
473
void texture_set_discardable(RID p_texture, bool p_discardable);
474
bool texture_is_discardable(RID p_texture);
475
476
public:
477
/*************/
478
/**** VRS ****/
479
/*************/
480
481
enum VRSMethod {
482
VRS_METHOD_NONE,
483
VRS_METHOD_FRAGMENT_SHADING_RATE,
484
VRS_METHOD_FRAGMENT_DENSITY_MAP,
485
};
486
487
private:
488
VRSMethod vrs_method = VRS_METHOD_NONE;
489
DataFormat vrs_format = DATA_FORMAT_MAX;
490
Size2i vrs_texel_size;
491
492
static RDG::ResourceUsage _vrs_usage_from_method(VRSMethod p_method);
493
static RDD::PipelineStageBits _vrs_stages_from_method(VRSMethod p_method);
494
static RDD::TextureLayout _vrs_layout_from_method(VRSMethod p_method);
495
void _vrs_detect_method();
496
497
public:
498
VRSMethod vrs_get_method() const;
499
DataFormat vrs_get_format() const;
500
Size2i vrs_get_texel_size() const;
501
502
/*********************/
503
/**** FRAMEBUFFER ****/
504
/*********************/
505
506
// In modern APIs, generally, framebuffers work similar to how they
507
// do in OpenGL, with the exception that
508
// the "format" (RDD::RenderPassID) is not dynamic
509
// and must be more or less the same as the one
510
// used for the render pipelines.
511
512
struct AttachmentFormat {
513
enum : uint32_t {
514
UNUSED_ATTACHMENT = 0xFFFFFFFF
515
};
516
DataFormat format;
517
TextureSamples samples;
518
uint32_t usage_flags;
519
AttachmentFormat() {
520
format = DATA_FORMAT_R8G8B8A8_UNORM;
521
samples = TEXTURE_SAMPLES_1;
522
usage_flags = 0;
523
}
524
};
525
526
struct FramebufferPass {
527
Vector<int32_t> color_attachments;
528
Vector<int32_t> input_attachments;
529
Vector<int32_t> resolve_attachments;
530
Vector<int32_t> preserve_attachments;
531
int32_t depth_attachment = ATTACHMENT_UNUSED;
532
int32_t depth_resolve_attachment = ATTACHMENT_UNUSED;
533
};
534
535
typedef int64_t FramebufferFormatID;
536
537
private:
538
struct FramebufferFormatKey {
539
Vector<AttachmentFormat> attachments;
540
Vector<FramebufferPass> passes;
541
uint32_t view_count = 1;
542
VRSMethod vrs_method = VRS_METHOD_NONE;
543
int32_t vrs_attachment = ATTACHMENT_UNUSED;
544
Size2i vrs_texel_size;
545
546
bool operator<(const FramebufferFormatKey &p_key) const {
547
if (vrs_texel_size != p_key.vrs_texel_size) {
548
return vrs_texel_size < p_key.vrs_texel_size;
549
}
550
551
if (vrs_attachment != p_key.vrs_attachment) {
552
return vrs_attachment < p_key.vrs_attachment;
553
}
554
555
if (vrs_method != p_key.vrs_method) {
556
return vrs_method < p_key.vrs_method;
557
}
558
559
if (view_count != p_key.view_count) {
560
return view_count < p_key.view_count;
561
}
562
563
uint32_t pass_size = passes.size();
564
uint32_t key_pass_size = p_key.passes.size();
565
if (pass_size != key_pass_size) {
566
return pass_size < key_pass_size;
567
}
568
const FramebufferPass *pass_ptr = passes.ptr();
569
const FramebufferPass *key_pass_ptr = p_key.passes.ptr();
570
571
for (uint32_t i = 0; i < pass_size; i++) {
572
{ // Compare color attachments.
573
uint32_t attachment_size = pass_ptr[i].color_attachments.size();
574
uint32_t key_attachment_size = key_pass_ptr[i].color_attachments.size();
575
if (attachment_size != key_attachment_size) {
576
return attachment_size < key_attachment_size;
577
}
578
const int32_t *pass_attachment_ptr = pass_ptr[i].color_attachments.ptr();
579
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].color_attachments.ptr();
580
581
for (uint32_t j = 0; j < attachment_size; j++) {
582
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
583
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
584
}
585
}
586
}
587
{ // Compare input attachments.
588
uint32_t attachment_size = pass_ptr[i].input_attachments.size();
589
uint32_t key_attachment_size = key_pass_ptr[i].input_attachments.size();
590
if (attachment_size != key_attachment_size) {
591
return attachment_size < key_attachment_size;
592
}
593
const int32_t *pass_attachment_ptr = pass_ptr[i].input_attachments.ptr();
594
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].input_attachments.ptr();
595
596
for (uint32_t j = 0; j < attachment_size; j++) {
597
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
598
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
599
}
600
}
601
}
602
{ // Compare resolve attachments.
603
uint32_t attachment_size = pass_ptr[i].resolve_attachments.size();
604
uint32_t key_attachment_size = key_pass_ptr[i].resolve_attachments.size();
605
if (attachment_size != key_attachment_size) {
606
return attachment_size < key_attachment_size;
607
}
608
const int32_t *pass_attachment_ptr = pass_ptr[i].resolve_attachments.ptr();
609
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].resolve_attachments.ptr();
610
611
for (uint32_t j = 0; j < attachment_size; j++) {
612
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
613
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
614
}
615
}
616
}
617
{ // Compare preserve attachments.
618
uint32_t attachment_size = pass_ptr[i].preserve_attachments.size();
619
uint32_t key_attachment_size = key_pass_ptr[i].preserve_attachments.size();
620
if (attachment_size != key_attachment_size) {
621
return attachment_size < key_attachment_size;
622
}
623
const int32_t *pass_attachment_ptr = pass_ptr[i].preserve_attachments.ptr();
624
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].preserve_attachments.ptr();
625
626
for (uint32_t j = 0; j < attachment_size; j++) {
627
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
628
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
629
}
630
}
631
}
632
if (pass_ptr[i].depth_attachment != key_pass_ptr[i].depth_attachment) {
633
return pass_ptr[i].depth_attachment < key_pass_ptr[i].depth_attachment;
634
}
635
}
636
637
int as = attachments.size();
638
int bs = p_key.attachments.size();
639
if (as != bs) {
640
return as < bs;
641
}
642
643
const AttachmentFormat *af_a = attachments.ptr();
644
const AttachmentFormat *af_b = p_key.attachments.ptr();
645
for (int i = 0; i < as; i++) {
646
const AttachmentFormat &a = af_a[i];
647
const AttachmentFormat &b = af_b[i];
648
if (a.format != b.format) {
649
return a.format < b.format;
650
}
651
if (a.samples != b.samples) {
652
return a.samples < b.samples;
653
}
654
if (a.usage_flags != b.usage_flags) {
655
return a.usage_flags < b.usage_flags;
656
}
657
}
658
659
return false; // Equal.
660
}
661
};
662
663
static RDD::RenderPassID _render_pass_create(RenderingDeviceDriver *p_driver, const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, uint32_t p_view_count = 1, VRSMethod p_vrs_method = VRS_METHOD_NONE, int32_t p_vrs_attachment = -1, Size2i p_vrs_texel_size = Size2i(), Vector<TextureSamples> *r_samples = nullptr);
664
static RDD::RenderPassID _render_pass_create_from_graph(RenderingDeviceDriver *p_driver, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, void *p_user_data);
665
666
// This is a cache and it's never freed, it ensures
667
// IDs for a given format are always unique.
668
RBMap<FramebufferFormatKey, FramebufferFormatID> framebuffer_format_cache;
669
struct FramebufferFormat {
670
const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E;
671
RDD::RenderPassID render_pass; // Here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec).
672
Vector<TextureSamples> pass_samples;
673
uint32_t view_count = 1; // Number of views.
674
};
675
676
HashMap<FramebufferFormatID, FramebufferFormat> framebuffer_formats;
677
678
struct Framebuffer {
679
FramebufferFormatID format_id;
680
uint32_t storage_mask = 0;
681
Vector<RID> texture_ids;
682
InvalidationCallback invalidated_callback = nullptr;
683
void *invalidated_callback_userdata = nullptr;
684
RDG::FramebufferCache *framebuffer_cache = nullptr;
685
Size2 size;
686
uint32_t view_count;
687
};
688
689
RID_Owner<Framebuffer, true> framebuffer_owner;
690
691
public:
692
// This ID is warranted to be unique for the same formats, does not need to be freed
693
FramebufferFormatID framebuffer_format_create(const Vector<AttachmentFormat> &p_format, uint32_t p_view_count = 1, int32_t p_vrs_attachment = -1);
694
FramebufferFormatID framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count = 1, int32_t p_vrs_attachment = -1);
695
FramebufferFormatID framebuffer_format_create_empty(TextureSamples p_samples = TEXTURE_SAMPLES_1);
696
TextureSamples framebuffer_format_get_texture_samples(FramebufferFormatID p_format, uint32_t p_pass = 0);
697
698
RID framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
699
RID framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
700
RID framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples = TEXTURE_SAMPLES_1, FramebufferFormatID p_format_check = INVALID_ID);
701
bool framebuffer_is_valid(RID p_framebuffer) const;
702
void framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata);
703
704
FramebufferFormatID framebuffer_get_format(RID p_framebuffer);
705
Size2 framebuffer_get_size(RID p_framebuffer);
706
707
/*****************/
708
/**** SAMPLER ****/
709
/*****************/
710
private:
711
RID_Owner<RDD::SamplerID, true> sampler_owner;
712
713
public:
714
RID sampler_create(const SamplerState &p_state);
715
bool sampler_is_format_supported_for_filter(DataFormat p_format, SamplerFilter p_sampler_filter) const;
716
717
/**********************/
718
/**** VERTEX ARRAY ****/
719
/**********************/
720
721
typedef int64_t VertexFormatID;
722
723
private:
724
// Vertex buffers in Vulkan are similar to how
725
// they work in OpenGL, except that instead of
726
// an attribute index, there is a buffer binding
727
// index (for binding the buffers in real-time)
728
// and a location index (what is used in the shader).
729
//
730
// This mapping is done here internally, and it's not
731
// exposed.
732
733
RID_Owner<Buffer, true> vertex_buffer_owner;
734
735
struct VertexDescriptionKey {
736
Vector<VertexAttribute> vertex_formats;
737
738
bool operator==(const VertexDescriptionKey &p_key) const {
739
int vdc = vertex_formats.size();
740
int vdck = p_key.vertex_formats.size();
741
742
if (vdc != vdck) {
743
return false;
744
} else {
745
const VertexAttribute *a_ptr = vertex_formats.ptr();
746
const VertexAttribute *b_ptr = p_key.vertex_formats.ptr();
747
for (int i = 0; i < vdc; i++) {
748
const VertexAttribute &a = a_ptr[i];
749
const VertexAttribute &b = b_ptr[i];
750
751
if (a.location != b.location) {
752
return false;
753
}
754
if (a.offset != b.offset) {
755
return false;
756
}
757
if (a.format != b.format) {
758
return false;
759
}
760
if (a.stride != b.stride) {
761
return false;
762
}
763
if (a.frequency != b.frequency) {
764
return false;
765
}
766
}
767
return true; // They are equal.
768
}
769
}
770
771
uint32_t hash() const {
772
int vdc = vertex_formats.size();
773
uint32_t h = hash_murmur3_one_32(vdc);
774
const VertexAttribute *ptr = vertex_formats.ptr();
775
for (int i = 0; i < vdc; i++) {
776
const VertexAttribute &vd = ptr[i];
777
h = hash_murmur3_one_32(vd.location, h);
778
h = hash_murmur3_one_32(vd.offset, h);
779
h = hash_murmur3_one_32(vd.format, h);
780
h = hash_murmur3_one_32(vd.stride, h);
781
h = hash_murmur3_one_32(vd.frequency, h);
782
}
783
return hash_fmix32(h);
784
}
785
};
786
787
struct VertexDescriptionHash {
788
static _FORCE_INLINE_ uint32_t hash(const VertexDescriptionKey &p_key) {
789
return p_key.hash();
790
}
791
};
792
793
// This is a cache and it's never freed, it ensures that
794
// ID used for a specific format always remain the same.
795
HashMap<VertexDescriptionKey, VertexFormatID, VertexDescriptionHash> vertex_format_cache;
796
797
struct VertexDescriptionCache {
798
Vector<VertexAttribute> vertex_formats;
799
VertexAttributeBindingsMap bindings;
800
RDD::VertexFormatID driver_id;
801
};
802
803
HashMap<VertexFormatID, VertexDescriptionCache> vertex_formats;
804
805
struct VertexArray {
806
RID buffer;
807
VertexFormatID description;
808
int vertex_count = 0;
809
uint32_t max_instances_allowed = 0;
810
811
Vector<RDD::BufferID> buffers; // Not owned, just referenced.
812
Vector<RDG::ResourceTracker *> draw_trackers; // Not owned, just referenced.
813
Vector<uint64_t> offsets;
814
Vector<int32_t> transfer_worker_indices;
815
Vector<uint64_t> transfer_worker_operations;
816
HashSet<RID> untracked_buffers;
817
};
818
819
RID_Owner<VertexArray, true> vertex_array_owner;
820
821
struct IndexBuffer : public Buffer {
822
uint32_t max_index = 0; // Used for validation.
823
uint32_t index_count = 0;
824
IndexBufferFormat format = INDEX_BUFFER_FORMAT_UINT16;
825
bool supports_restart_indices = false;
826
};
827
828
RID_Owner<IndexBuffer, true> index_buffer_owner;
829
830
struct IndexArray {
831
uint32_t max_index = 0; // Remember the maximum index here too, for validation.
832
RDD::BufferID driver_id; // Not owned, inherited from index buffer.
833
RDG::ResourceTracker *draw_tracker = nullptr; // Not owned, inherited from index buffer.
834
uint32_t offset = 0;
835
uint32_t indices = 0;
836
IndexBufferFormat format = INDEX_BUFFER_FORMAT_UINT16;
837
bool supports_restart_indices = false;
838
int32_t transfer_worker_index = -1;
839
uint64_t transfer_worker_operation = 0;
840
};
841
842
RID_Owner<IndexArray, true> index_array_owner;
843
844
public:
845
enum BufferCreationBits {
846
BUFFER_CREATION_DEVICE_ADDRESS_BIT = (1 << 0),
847
BUFFER_CREATION_AS_STORAGE_BIT = (1 << 1),
848
BUFFER_CREATION_DYNAMIC_PERSISTENT_BIT = (1 << 2),
849
BUFFER_CREATION_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT = (1 << 3),
850
};
851
852
enum StorageBufferUsage {
853
STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT = (1 << 0),
854
};
855
856
RID vertex_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data = {}, BitField<BufferCreationBits> p_creation_bits = 0);
857
RID _vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<BufferCreationBits> p_creation_bits = 0) {
858
return vertex_buffer_create(p_size_bytes, p_data, p_creation_bits);
859
}
860
861
// This ID is warranted to be unique for the same formats, does not need to be freed
862
VertexFormatID vertex_format_create(const Vector<VertexAttribute> &p_vertex_descriptions);
863
RID vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets = Vector<uint64_t>());
864
865
RID index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, Span<uint8_t> p_data = {}, bool p_use_restart_indices = false, BitField<BufferCreationBits> p_creation_bits = 0);
866
RID _index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices = false, BitField<BufferCreationBits> p_creation_bits = 0) {
867
return index_buffer_create(p_index_count, p_format, p_data, p_use_restart_indices, p_creation_bits);
868
}
869
870
RID index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count);
871
872
private:
873
BitField<RDD::BufferUsageBits> _creation_to_usage_bits(BitField<BufferCreationBits> p_creation_bits);
874
875
/****************/
876
/**** SHADER ****/
877
/****************/
878
879
// Some APIs (e.g., Vulkan) specifies a really complex behavior for the application
880
// in order to tell when descriptor sets need to be re-bound (or not).
881
// "When binding a descriptor set (see Descriptor Set Binding) to set
882
// number N, if the previously bound descriptor sets for sets zero
883
// through N-1 were all bound using compatible pipeline layouts,
884
// then performing this binding does not disturb any of the lower numbered sets.
885
// If, additionally, the previous bound descriptor set for set N was
886
// bound using a pipeline layout compatible for set N, then the bindings
887
// in sets numbered greater than N are also not disturbed."
888
// As a result, we need to figure out quickly when something is no longer "compatible".
889
// in order to avoid costly rebinds.
890
891
private:
892
struct UniformSetFormat {
893
Vector<ShaderUniform> uniforms;
894
895
_FORCE_INLINE_ bool operator<(const UniformSetFormat &p_other) const {
896
if (uniforms.size() != p_other.uniforms.size()) {
897
return uniforms.size() < p_other.uniforms.size();
898
}
899
for (int i = 0; i < uniforms.size(); i++) {
900
if (uniforms[i] < p_other.uniforms[i]) {
901
return true;
902
} else if (p_other.uniforms[i] < uniforms[i]) {
903
return false;
904
}
905
}
906
return false;
907
}
908
};
909
910
// Always grows, never shrinks, ensuring unique IDs, but we assume
911
// the amount of formats will never be a problem, as the amount of shaders
912
// in a game is limited.
913
RBMap<UniformSetFormat, uint32_t> uniform_set_format_cache;
914
915
// Shaders in Vulkan are just pretty much
916
// precompiled blocks of SPIR-V bytecode. They
917
// are most likely not really compiled to host
918
// assembly until a pipeline is created.
919
//
920
// When supplying the shaders, this implementation
921
// will use the reflection abilities of glslang to
922
// understand and cache everything required to
923
// create and use the descriptor sets (Vulkan's
924
// biggest pain).
925
//
926
// Additionally, hashes are created for every set
927
// to do quick validation and ensuring the user
928
// does not submit something invalid.
929
930
struct Shader : public ShaderReflection {
931
String name; // Used for debug.
932
RDD::ShaderID driver_id;
933
uint32_t layout_hash = 0;
934
BitField<RDD::PipelineStageBits> stage_bits = {};
935
Vector<uint32_t> set_formats;
936
};
937
938
String _shader_uniform_debug(RID p_shader, int p_set = -1);
939
940
RID_Owner<Shader, true> shader_owner;
941
942
#ifndef DISABLE_DEPRECATED
943
public:
944
enum BarrierMask {
945
BARRIER_MASK_VERTEX = 1,
946
BARRIER_MASK_FRAGMENT = 8,
947
BARRIER_MASK_COMPUTE = 2,
948
BARRIER_MASK_TRANSFER = 4,
949
950
BARRIER_MASK_RASTER = BARRIER_MASK_VERTEX | BARRIER_MASK_FRAGMENT, // 9,
951
BARRIER_MASK_ALL_BARRIERS = 0x7FFF, // all flags set
952
BARRIER_MASK_NO_BARRIER = 0x8000,
953
};
954
955
enum InitialAction {
956
INITIAL_ACTION_LOAD,
957
INITIAL_ACTION_CLEAR,
958
INITIAL_ACTION_DISCARD,
959
INITIAL_ACTION_MAX,
960
INITIAL_ACTION_CLEAR_REGION = INITIAL_ACTION_CLEAR,
961
INITIAL_ACTION_CLEAR_REGION_CONTINUE = INITIAL_ACTION_CLEAR,
962
INITIAL_ACTION_KEEP = INITIAL_ACTION_LOAD,
963
INITIAL_ACTION_DROP = INITIAL_ACTION_DISCARD,
964
INITIAL_ACTION_CONTINUE = INITIAL_ACTION_LOAD,
965
};
966
967
enum FinalAction {
968
FINAL_ACTION_STORE,
969
FINAL_ACTION_DISCARD,
970
FINAL_ACTION_MAX,
971
FINAL_ACTION_READ = FINAL_ACTION_STORE,
972
FINAL_ACTION_CONTINUE = FINAL_ACTION_STORE,
973
};
974
975
void barrier(BitField<BarrierMask> p_from = BARRIER_MASK_ALL_BARRIERS, BitField<BarrierMask> p_to = BARRIER_MASK_ALL_BARRIERS);
976
void full_barrier();
977
void draw_command_insert_label(String p_label_name, const Color &p_color = Color(1, 1, 1, 1));
978
Error draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2(), const Vector<RID> &p_storage_textures = Vector<RID>());
979
Error draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids);
980
Vector<int64_t> _draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2(), const TypedArray<RID> &p_storage_textures = TypedArray<RID>());
981
Vector<int64_t> _draw_list_switch_to_next_pass_split(uint32_t p_splits);
982
983
private:
984
void _draw_list_end_bind_compat_81356(BitField<BarrierMask> p_post_barrier);
985
void _compute_list_end_bind_compat_81356(BitField<BarrierMask> p_post_barrier);
986
void _barrier_bind_compat_81356(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to);
987
988
void _draw_list_end_bind_compat_84976(BitField<BarrierMask> p_post_barrier);
989
void _compute_list_end_bind_compat_84976(BitField<BarrierMask> p_post_barrier);
990
InitialAction _convert_initial_action_84976(InitialAction p_old_initial_action);
991
FinalAction _convert_final_action_84976(FinalAction p_old_final_action);
992
DrawListID _draw_list_begin_bind_compat_84976(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const TypedArray<RID> &p_storage_textures);
993
ComputeListID _compute_list_begin_bind_compat_84976(bool p_allow_draw_overlap);
994
Error _buffer_update_bind_compat_84976(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier);
995
Error _buffer_clear_bind_compat_84976(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier);
996
Error _texture_update_bind_compat_84976(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier);
997
Error _texture_copy_bind_compat_84976(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier);
998
Error _texture_clear_bind_compat_84976(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier);
999
Error _texture_resolve_multisample_bind_compat_84976(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier);
1000
1001
FramebufferFormatID _screen_get_framebuffer_format_bind_compat_87340() const;
1002
1003
DrawListID _draw_list_begin_bind_compat_90993(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region);
1004
1005
DrawListID _draw_list_begin_bind_compat_98670(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, uint32_t p_breadcrumb);
1006
1007
RID _uniform_buffer_create_bind_compat_101561(uint32_t p_size_bytes, const Vector<uint8_t> &p_data);
1008
RID _vertex_buffer_create_bind_compat_101561(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, bool p_use_as_storage);
1009
RID _index_buffer_create_bind_compat_101561(uint32_t p_size_indices, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices);
1010
RID _storage_buffer_create_bind_compat_101561(uint32_t p_size, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage);
1011
#endif
1012
1013
public:
1014
RenderingDeviceDriver *get_device_driver() const { return driver; }
1015
RenderingContextDriver *get_context_driver() const { return context; }
1016
1017
const RDD::Capabilities &get_device_capabilities() const { return driver->get_capabilities(); }
1018
1019
bool has_feature(const Features p_feature) const;
1020
1021
Vector<uint8_t> shader_compile_spirv_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language = SHADER_LANGUAGE_GLSL, String *r_error = nullptr, bool p_allow_cache = true);
1022
Vector<uint8_t> shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "");
1023
1024
RID shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "");
1025
RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder = RID());
1026
RID shader_create_placeholder();
1027
void shader_destroy_modules(RID p_shader);
1028
1029
uint64_t shader_get_vertex_input_attribute_mask(RID p_shader);
1030
1031
/******************/
1032
/**** UNIFORMS ****/
1033
/******************/
1034
String get_perf_report() const;
1035
1036
/*****************/
1037
/**** BUFFERS ****/
1038
/*****************/
1039
1040
RID uniform_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data = {}, BitField<BufferCreationBits> p_creation_bits = 0);
1041
RID _uniform_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<BufferCreationBits> p_creation_bits = 0) {
1042
return uniform_buffer_create(p_size_bytes, p_data, p_creation_bits);
1043
}
1044
1045
RID storage_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data = {}, BitField<StorageBufferUsage> p_usage = 0, BitField<BufferCreationBits> p_creation_bits = 0);
1046
RID _storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage = 0, BitField<BufferCreationBits> p_creation_bits = 0) {
1047
return storage_buffer_create(p_size_bytes, p_data, p_usage, p_creation_bits);
1048
}
1049
1050
RID texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, Span<uint8_t> p_data = {});
1051
RID _texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
1052
return texture_buffer_create(p_size_elements, p_format, p_data);
1053
}
1054
1055
struct Uniform {
1056
UniformType uniform_type = UNIFORM_TYPE_IMAGE;
1057
uint32_t binding = 0; // Binding index as specified in shader.
1058
// This flag specifies that this is an immutable sampler to be set when creating pipeline layout.
1059
bool immutable_sampler = false;
1060
1061
private:
1062
// In most cases only one ID is provided per binding, so avoid allocating memory unnecessarily for performance.
1063
RID id; // If only one is provided, this is used.
1064
Vector<RID> ids; // If multiple ones are provided, this is used instead.
1065
1066
public:
1067
_FORCE_INLINE_ uint32_t get_id_count() const {
1068
return (id.is_valid() ? 1 : ids.size());
1069
}
1070
1071
_FORCE_INLINE_ RID get_id(uint32_t p_idx) const {
1072
if (id.is_valid()) {
1073
ERR_FAIL_COND_V(p_idx != 0, RID());
1074
return id;
1075
} else {
1076
return ids[p_idx];
1077
}
1078
}
1079
_FORCE_INLINE_ void set_id(uint32_t p_idx, RID p_id) {
1080
if (id.is_valid()) {
1081
ERR_FAIL_COND(p_idx != 0);
1082
id = p_id;
1083
} else {
1084
ids.write[p_idx] = p_id;
1085
}
1086
}
1087
1088
_FORCE_INLINE_ void append_id(RID p_id) {
1089
if (ids.is_empty()) {
1090
if (id == RID()) {
1091
id = p_id;
1092
} else {
1093
ids.push_back(id);
1094
ids.push_back(p_id);
1095
id = RID();
1096
}
1097
} else {
1098
ids.push_back(p_id);
1099
}
1100
}
1101
1102
_FORCE_INLINE_ void clear_ids() {
1103
id = RID();
1104
ids.clear();
1105
}
1106
1107
_FORCE_INLINE_ Uniform(UniformType p_type, int p_binding, RID p_id) {
1108
uniform_type = p_type;
1109
binding = p_binding;
1110
id = p_id;
1111
}
1112
_FORCE_INLINE_ Uniform(UniformType p_type, int p_binding, const Vector<RID> &p_ids) {
1113
uniform_type = p_type;
1114
binding = p_binding;
1115
ids = p_ids;
1116
}
1117
_FORCE_INLINE_ Uniform() = default;
1118
};
1119
1120
typedef Uniform PipelineImmutableSampler;
1121
RID shader_create_from_bytecode_with_samplers(const Vector<uint8_t> &p_shader_binary, RID p_placeholder = RID(), const Vector<PipelineImmutableSampler> &p_immutable_samplers = Vector<PipelineImmutableSampler>());
1122
1123
private:
1124
static const uint32_t MAX_UNIFORM_SETS = 16;
1125
static const uint32_t MAX_PUSH_CONSTANT_SIZE = 128;
1126
1127
// This structure contains the descriptor set. They _need_ to be allocated
1128
// for a shader (and will be erased when this shader is erased), but should
1129
// work for other shaders as long as the hash matches. This covers using
1130
// them in shader variants.
1131
//
1132
// Keep also in mind that you can share buffers between descriptor sets, so
1133
// the above restriction is not too serious.
1134
1135
struct UniformSet {
1136
uint32_t format = 0;
1137
RID shader_id;
1138
uint32_t shader_set = 0;
1139
RDD::UniformSetID driver_id;
1140
struct AttachableTexture {
1141
uint32_t bind = 0;
1142
RID texture;
1143
};
1144
1145
struct SharedTexture {
1146
uint32_t writing = 0;
1147
RID texture;
1148
};
1149
1150
LocalVector<AttachableTexture> attachable_textures; // Used for validation.
1151
Vector<RDG::ResourceTracker *> draw_trackers;
1152
Vector<RDG::ResourceUsage> draw_trackers_usage;
1153
HashMap<RID, RDG::ResourceUsage> untracked_usage;
1154
LocalVector<SharedTexture> shared_textures_to_update;
1155
LocalVector<RID> pending_clear_textures;
1156
InvalidationCallback invalidated_callback = nullptr;
1157
void *invalidated_callback_userdata = nullptr;
1158
};
1159
1160
RID_Owner<UniformSet, true> uniform_set_owner;
1161
1162
void _uniform_set_update_shared(UniformSet *p_uniform_set);
1163
void _uniform_set_update_clears(UniformSet *p_uniform_set);
1164
1165
public:
1166
/** Bake a set of uniforms that can be bound at runtime with the given shader.
1167
* @remark Setting p_linear_pool = true while keeping the RID around for longer than the current frame will result in undefined behavior.
1168
* @param p_uniforms The uniforms to bake into a set.
1169
* @param p_shader The shader you intend to bind these uniforms with.
1170
* @param p_set_index The set. Should be in range [0; 4)
1171
* The value 4 comes from physical_device_properties.limits.maxBoundDescriptorSets. Vulkan only guarantees maxBoundDescriptorSets >= 4 (== 4 is very common on Mobile).
1172
* @param p_linear_pool If you call this function every frame (and free the returned RID within the same frame!), set it to true for better performance.
1173
* If you plan on keeping the return value around for more than one frame (e.g. Sets that are created once and reused forever) you MUST set it to false.
1174
* @return Baked descriptor set.
1175
*/
1176
RID uniform_set_create(const VectorView<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set, bool p_linear_pool = false);
1177
bool uniform_set_is_valid(RID p_uniform_set);
1178
void uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata);
1179
1180
bool uniform_sets_have_linear_pools() const;
1181
1182
/*******************/
1183
/**** PIPELINES ****/
1184
/*******************/
1185
1186
// Render pipeline contains ALL the
1187
// information required for drawing.
1188
// This includes all the rasterizer state
1189
// as well as shader used, framebuffer format,
1190
// etc.
1191
// While the pipeline is just a single object
1192
// (VkPipeline) a lot of values are also saved
1193
// here to do validation (vulkan does none by
1194
// default) and warn the user if something
1195
// was not supplied as intended.
1196
private:
1197
struct RenderPipeline {
1198
// Cached values for validation.
1199
#ifdef DEBUG_ENABLED
1200
struct Validation {
1201
FramebufferFormatID framebuffer_format;
1202
uint32_t render_pass = 0;
1203
uint32_t dynamic_state = 0;
1204
VertexFormatID vertex_format;
1205
bool uses_restart_indices = false;
1206
uint32_t primitive_minimum = 0;
1207
uint32_t primitive_divisor = 0;
1208
} validation;
1209
#endif
1210
// Actual pipeline.
1211
RID shader;
1212
RDD::ShaderID shader_driver_id;
1213
uint32_t shader_layout_hash = 0;
1214
Vector<uint32_t> set_formats;
1215
RDD::PipelineID driver_id;
1216
BitField<RDD::PipelineStageBits> stage_bits = {};
1217
uint32_t push_constant_size = 0;
1218
};
1219
1220
RID_Owner<RenderPipeline, true> render_pipeline_owner;
1221
1222
bool pipeline_cache_enabled = false;
1223
size_t pipeline_cache_size = 0;
1224
String pipeline_cache_file_path;
1225
WorkerThreadPool::TaskID pipeline_cache_save_task = WorkerThreadPool::INVALID_TASK_ID;
1226
1227
Vector<uint8_t> _load_pipeline_cache();
1228
static void _save_pipeline_cache(void *p_data);
1229
1230
struct ComputePipeline {
1231
RID shader;
1232
RDD::ShaderID shader_driver_id;
1233
uint32_t shader_layout_hash = 0;
1234
Vector<uint32_t> set_formats;
1235
RDD::PipelineID driver_id;
1236
uint32_t push_constant_size = 0;
1237
uint32_t local_group_size[3] = { 0, 0, 0 };
1238
};
1239
1240
RID_Owner<ComputePipeline, true> compute_pipeline_owner;
1241
1242
struct RaytracingPipeline {
1243
RID shader;
1244
RDD::ShaderID shader_driver_id;
1245
uint32_t shader_layout_hash = 0;
1246
Vector<uint32_t> set_formats;
1247
RDD::RaytracingPipelineID driver_id;
1248
uint32_t push_constant_size = 0;
1249
};
1250
1251
RID_Owner<RaytracingPipeline> raytracing_pipeline_owner;
1252
1253
public:
1254
RID render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags = 0, uint32_t p_for_render_pass = 0, const Vector<PipelineSpecializationConstant> &p_specialization_constants = Vector<PipelineSpecializationConstant>());
1255
bool render_pipeline_is_valid(RID p_pipeline);
1256
1257
RID compute_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants = Vector<PipelineSpecializationConstant>());
1258
bool compute_pipeline_is_valid(RID p_pipeline);
1259
1260
RID raytracing_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants = Vector<PipelineSpecializationConstant>());
1261
bool raytracing_pipeline_is_valid(RID p_pipeline);
1262
1263
void update_pipeline_cache(bool p_closing = false);
1264
1265
private:
1266
/****************/
1267
/**** SCREEN ****/
1268
/****************/
1269
HashMap<DisplayServer::WindowID, RDD::SwapChainID> screen_swap_chains;
1270
HashMap<DisplayServer::WindowID, RDD::FramebufferID> screen_framebuffers;
1271
1272
uint32_t _get_swap_chain_desired_count() const;
1273
1274
public:
1275
Error screen_create(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID);
1276
Error screen_prepare_for_drawing(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID);
1277
int screen_get_width(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1278
int screen_get_height(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1279
int screen_get_pre_rotation_degrees(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1280
FramebufferFormatID screen_get_framebuffer_format(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1281
Error screen_free(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID);
1282
1283
private:
1284
/********************************/
1285
/**** ACCELERATION STRUCTURE ****/
1286
/********************************/
1287
1288
struct InstancesBuffer {
1289
Buffer buffer;
1290
uint32_t instance_count;
1291
Vector<RID> blases;
1292
};
1293
1294
struct AccelerationStructure {
1295
RDD::AccelerationStructureID driver_id;
1296
RDD::AccelerationStructureType type = RDD::ACCELERATION_STRUCTURE_TYPE_BLAS;
1297
RDG::ResourceTracker *draw_tracker = nullptr;
1298
Vector<RDG::ResourceTracker *> draw_trackers;
1299
1300
RID scratch_buffer;
1301
RID vertex_array;
1302
RID index_array;
1303
RID transform_buffer;
1304
RID instances_buffer;
1305
};
1306
1307
RID_Owner<InstancesBuffer, true> instances_buffer_owner;
1308
RID_Owner<AccelerationStructure> acceleration_structure_owner;
1309
1310
public:
1311
enum AccelerationStructureGeometryBits {
1312
ACCELERATION_STRUCTURE_GEOMETRY_OPAQUE = (1 << 0),
1313
ACCELERATION_STRUCTURE_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION = (1 << 1),
1314
};
1315
1316
RID blas_create(RID p_vertex_array, RID p_index_array, BitField<AccelerationStructureGeometryBits> p_geometry_bits = 0, uint32_t p_position_attribute_location = 0);
1317
RID tlas_instances_buffer_create(uint32_t p_instance_count, BitField<BufferCreationBits> p_creation_bits = 0);
1318
void tlas_instances_buffer_fill(RID p_buffer, const Vector<RID> &p_blases, VectorView<Transform3D> p_transforms);
1319
RID tlas_create(RID p_instances_buffer);
1320
Error acceleration_structure_build(RID p_acceleration_structure);
1321
1322
/*************************/
1323
/**** DRAW LISTS (II) ****/
1324
/*************************/
1325
1326
private:
1327
// Draw list contains both the command buffer
1328
// used for drawing as well as a LOT of
1329
// information used for validation. This
1330
// validation is cheap so most of it can
1331
// also run in release builds.
1332
1333
struct DrawList {
1334
Rect2i viewport;
1335
bool active = false;
1336
1337
struct SetState {
1338
uint32_t pipeline_expected_format = 0;
1339
uint32_t uniform_set_format = 0;
1340
RDD::UniformSetID uniform_set_driver_id;
1341
RID uniform_set;
1342
bool bound = false;
1343
};
1344
1345
struct State {
1346
SetState sets[MAX_UNIFORM_SETS];
1347
uint32_t set_count = 0;
1348
RID pipeline;
1349
RID pipeline_shader;
1350
RDD::ShaderID pipeline_shader_driver_id;
1351
uint32_t pipeline_shader_layout_hash = 0;
1352
uint32_t pipeline_push_constant_size = 0;
1353
RID vertex_array;
1354
RID index_array;
1355
uint32_t draw_count = 0;
1356
} state;
1357
1358
#ifdef DEBUG_ENABLED
1359
struct Validation {
1360
// Actual render pass values.
1361
uint32_t dynamic_state = 0;
1362
VertexFormatID vertex_format = INVALID_ID;
1363
uint32_t vertex_array_size = 0;
1364
uint32_t vertex_max_instances_allowed = 0xFFFFFFFF;
1365
bool index_buffer_uses_restart_indices = false;
1366
uint32_t index_array_count = 0;
1367
uint32_t index_array_max_index = 0;
1368
Vector<uint32_t> set_formats;
1369
Vector<bool> set_bound;
1370
Vector<RID> set_rids;
1371
// Last pipeline set values.
1372
bool pipeline_active = false;
1373
uint32_t pipeline_dynamic_state = 0;
1374
VertexFormatID pipeline_vertex_format = INVALID_ID;
1375
RID pipeline_shader;
1376
bool pipeline_uses_restart_indices = false;
1377
uint32_t pipeline_primitive_divisor = 0;
1378
uint32_t pipeline_primitive_minimum = 0;
1379
uint32_t pipeline_push_constant_size = 0;
1380
bool pipeline_push_constant_supplied = false;
1381
} validation;
1382
#else
1383
struct Validation {
1384
uint32_t vertex_array_size = 0;
1385
uint32_t index_array_count = 0;
1386
} validation;
1387
#endif
1388
};
1389
1390
DrawList draw_list;
1391
uint32_t draw_list_subpass_count = 0;
1392
#ifdef DEBUG_ENABLED
1393
FramebufferFormatID draw_list_framebuffer_format = INVALID_ID;
1394
#endif
1395
uint32_t draw_list_current_subpass = 0;
1396
1397
LocalVector<RID> draw_list_bound_textures;
1398
1399
void _draw_list_start(const Rect2i &p_viewport);
1400
void _draw_list_end(Rect2i *r_last_viewport = nullptr);
1401
1402
public:
1403
enum DrawFlags {
1404
DRAW_DEFAULT_ALL = 0,
1405
DRAW_CLEAR_COLOR_0 = (1 << 0),
1406
DRAW_CLEAR_COLOR_1 = (1 << 1),
1407
DRAW_CLEAR_COLOR_2 = (1 << 2),
1408
DRAW_CLEAR_COLOR_3 = (1 << 3),
1409
DRAW_CLEAR_COLOR_4 = (1 << 4),
1410
DRAW_CLEAR_COLOR_5 = (1 << 5),
1411
DRAW_CLEAR_COLOR_6 = (1 << 6),
1412
DRAW_CLEAR_COLOR_7 = (1 << 7),
1413
DRAW_CLEAR_COLOR_MASK = 0xFF,
1414
DRAW_CLEAR_COLOR_ALL = DRAW_CLEAR_COLOR_MASK,
1415
DRAW_IGNORE_COLOR_0 = (1 << 8),
1416
DRAW_IGNORE_COLOR_1 = (1 << 9),
1417
DRAW_IGNORE_COLOR_2 = (1 << 10),
1418
DRAW_IGNORE_COLOR_3 = (1 << 11),
1419
DRAW_IGNORE_COLOR_4 = (1 << 12),
1420
DRAW_IGNORE_COLOR_5 = (1 << 13),
1421
DRAW_IGNORE_COLOR_6 = (1 << 14),
1422
DRAW_IGNORE_COLOR_7 = (1 << 15),
1423
DRAW_IGNORE_COLOR_MASK = 0xFF00,
1424
DRAW_IGNORE_COLOR_ALL = DRAW_IGNORE_COLOR_MASK,
1425
DRAW_CLEAR_DEPTH = (1 << 16),
1426
DRAW_IGNORE_DEPTH = (1 << 17),
1427
DRAW_CLEAR_STENCIL = (1 << 18),
1428
DRAW_IGNORE_STENCIL = (1 << 19),
1429
DRAW_CLEAR_ALL = DRAW_CLEAR_COLOR_ALL | DRAW_CLEAR_DEPTH | DRAW_CLEAR_STENCIL,
1430
DRAW_IGNORE_ALL = DRAW_IGNORE_COLOR_ALL | DRAW_IGNORE_DEPTH | DRAW_IGNORE_STENCIL
1431
};
1432
1433
/**
1434
* @param p_clear_color Must use linear encoding when HDR 2D is active.
1435
*/
1436
DrawListID draw_list_begin_for_screen(DisplayServer::WindowID p_screen = 0, const Color &p_clear_color = Color());
1437
/**
1438
* @param p_clear_color_values Color values must use linear encoding when HDR 2D is active.
1439
*/
1440
DrawListID draw_list_begin(RID p_framebuffer, BitField<DrawFlags> p_draw_flags = DRAW_DEFAULT_ALL, VectorView<Color> p_clear_color_values = VectorView<Color>(), float p_clear_depth_value = 1.0f, uint32_t p_clear_stencil_value = 0, const Rect2 &p_region = Rect2(), uint32_t p_breadcrumb = 0);
1441
DrawListID _draw_list_begin_bind(RID p_framebuffer, BitField<DrawFlags> p_draw_flags = DRAW_DEFAULT_ALL, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth_value = 1.0f, uint32_t p_clear_stencil_value = 0, const Rect2 &p_region = Rect2(), uint32_t p_breadcrumb = 0);
1442
1443
void draw_list_set_blend_constants(DrawListID p_list, const Color &p_color);
1444
void draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline);
1445
void draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index);
1446
void draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array);
1447
void draw_list_bind_vertex_buffers_format(DrawListID p_list, VertexFormatID p_vertex_format, uint32_t p_vertex_count, const Span<RID> &p_vertex_buffers, const Span<uint64_t> &p_offsets = Vector<uint64_t>());
1448
void draw_list_bind_index_array(DrawListID p_list, RID p_index_array);
1449
void draw_list_set_line_width(DrawListID p_list, float p_width);
1450
void draw_list_set_push_constant(DrawListID p_list, const void *p_data, uint32_t p_data_size);
1451
1452
void draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances = 1, uint32_t p_procedural_vertices = 0);
1453
void draw_list_draw_indirect(DrawListID p_list, bool p_use_indices, RID p_buffer, uint32_t p_offset = 0, uint32_t p_draw_count = 1, uint32_t p_stride = 0);
1454
1455
void draw_list_set_viewport(DrawListID p_list, const Rect2 &p_rect);
1456
void draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect);
1457
void draw_list_disable_scissor(DrawListID p_list);
1458
1459
uint32_t draw_list_get_current_pass();
1460
DrawListID draw_list_switch_to_next_pass();
1461
1462
void draw_list_end();
1463
1464
private:
1465
/**************************/
1466
/**** RAYTRACING LISTS ****/
1467
/**************************/
1468
1469
struct RaytracingList {
1470
bool active = false;
1471
struct SetState {
1472
uint32_t pipeline_expected_format = 0;
1473
uint32_t uniform_set_format = 0;
1474
RDD::UniformSetID uniform_set_driver_id;
1475
RID uniform_set;
1476
bool bound = false;
1477
};
1478
1479
struct State {
1480
SetState sets[MAX_UNIFORM_SETS];
1481
uint32_t set_count = 0;
1482
RID pipeline;
1483
RDD::RaytracingPipelineID pipeline_driver_id;
1484
RID pipeline_shader;
1485
RDD::ShaderID pipeline_shader_driver_id;
1486
uint32_t pipeline_shader_layout_hash = 0;
1487
uint8_t push_constant_data[MAX_PUSH_CONSTANT_SIZE] = {};
1488
uint32_t push_constant_size = 0;
1489
uint32_t trace_count = 0;
1490
} state;
1491
1492
#ifdef DEBUG_ENABLED
1493
struct Validation {
1494
bool active = true; // Means command buffer was not closed, so you can keep adding things.
1495
Vector<uint32_t> set_formats;
1496
Vector<bool> set_bound;
1497
Vector<RID> set_rids;
1498
// Last pipeline set values.
1499
bool pipeline_active = false;
1500
RID pipeline_shader;
1501
uint32_t invalid_set_from = 0;
1502
uint32_t pipeline_push_constant_size = 0;
1503
bool pipeline_push_constant_supplied = false;
1504
} validation;
1505
#endif
1506
};
1507
1508
RaytracingList raytracing_list;
1509
RaytracingList::State raytracing_list_barrier_state;
1510
1511
public:
1512
RaytracingListID raytracing_list_begin();
1513
void raytracing_list_bind_raytracing_pipeline(RaytracingListID p_list, RID p_raytracing_pipeline);
1514
void raytracing_list_bind_uniform_set(RaytracingListID p_list, RID p_uniform_set, uint32_t p_index);
1515
void raytracing_list_set_push_constant(RaytracingListID p_list, const void *p_data, uint32_t p_data_size);
1516
void raytracing_list_trace_rays(RaytracingListID p_list, uint32_t p_width, uint32_t p_height);
1517
void raytracing_list_end();
1518
1519
private:
1520
/***********************/
1521
/**** COMPUTE LISTS ****/
1522
/***********************/
1523
1524
struct ComputeList {
1525
bool active = false;
1526
struct SetState {
1527
uint32_t pipeline_expected_format = 0;
1528
uint32_t uniform_set_format = 0;
1529
RDD::UniformSetID uniform_set_driver_id;
1530
RID uniform_set;
1531
bool bound = false;
1532
};
1533
1534
struct State {
1535
SetState sets[MAX_UNIFORM_SETS];
1536
uint32_t set_count = 0;
1537
RID pipeline;
1538
RID pipeline_shader;
1539
RDD::ShaderID pipeline_shader_driver_id;
1540
uint32_t pipeline_shader_layout_hash = 0;
1541
uint32_t local_group_size[3] = { 0, 0, 0 };
1542
uint8_t push_constant_data[MAX_PUSH_CONSTANT_SIZE] = {};
1543
uint32_t push_constant_size = 0;
1544
uint32_t dispatch_count = 0;
1545
} state;
1546
1547
#ifdef DEBUG_ENABLED
1548
struct Validation {
1549
Vector<uint32_t> set_formats;
1550
Vector<bool> set_bound;
1551
Vector<RID> set_rids;
1552
// Last pipeline set values.
1553
bool pipeline_active = false;
1554
RID pipeline_shader;
1555
uint32_t invalid_set_from = 0;
1556
uint32_t pipeline_push_constant_size = 0;
1557
bool pipeline_push_constant_supplied = false;
1558
} validation;
1559
#endif
1560
};
1561
1562
ComputeList compute_list;
1563
ComputeList::State compute_list_barrier_state;
1564
1565
public:
1566
ComputeListID compute_list_begin();
1567
void compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline);
1568
void compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index);
1569
void compute_list_set_push_constant(ComputeListID p_list, const void *p_data, uint32_t p_data_size);
1570
void compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups);
1571
void compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads);
1572
void compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset);
1573
void compute_list_add_barrier(ComputeListID p_list);
1574
1575
void compute_list_end();
1576
1577
private:
1578
/*************************/
1579
/**** TRANSFER WORKER ****/
1580
/*************************/
1581
1582
struct TransferWorker {
1583
uint32_t index = 0;
1584
RDD::BufferID staging_buffer;
1585
uint32_t max_transfer_size = 0;
1586
uint32_t staging_buffer_size_in_use = 0;
1587
uint32_t staging_buffer_size_allocated = 0;
1588
RDD::CommandBufferID command_buffer;
1589
RDD::CommandPoolID command_pool;
1590
RDD::FenceID command_fence;
1591
LocalVector<RDD::TextureBarrier> texture_barriers;
1592
bool recording = false;
1593
bool submitted = false;
1594
BinaryMutex thread_mutex;
1595
uint64_t operations_processed = 0;
1596
uint64_t operations_submitted = 0;
1597
uint64_t operations_counter = 0;
1598
BinaryMutex operations_mutex;
1599
};
1600
1601
TightLocalVector<TransferWorker *> transfer_worker_pool;
1602
uint32_t transfer_worker_pool_size = 0;
1603
uint32_t transfer_worker_pool_max_size = 1;
1604
TightLocalVector<uint64_t> transfer_worker_operation_used_by_draw;
1605
LocalVector<uint32_t> transfer_worker_pool_available_list;
1606
LocalVector<RDD::TextureBarrier> transfer_worker_pool_texture_barriers;
1607
BinaryMutex transfer_worker_pool_mutex;
1608
BinaryMutex transfer_worker_pool_texture_barriers_mutex;
1609
ConditionVariable transfer_worker_pool_condition;
1610
1611
TransferWorker *_acquire_transfer_worker(uint32_t p_transfer_size, uint32_t p_required_align, uint32_t &r_staging_offset);
1612
void _release_transfer_worker(TransferWorker *p_transfer_worker);
1613
void _end_transfer_worker(TransferWorker *p_transfer_worker);
1614
void _submit_transfer_worker(TransferWorker *p_transfer_worker, VectorView<RDD::SemaphoreID> p_signal_semaphores = VectorView<RDD::SemaphoreID>());
1615
void _wait_for_transfer_worker(TransferWorker *p_transfer_worker);
1616
void _flush_barriers_for_transfer_worker(TransferWorker *p_transfer_worker);
1617
void _check_transfer_worker_operation(uint32_t p_transfer_worker_index, uint64_t p_transfer_worker_operation);
1618
void _check_transfer_worker_buffer(Buffer *p_buffer);
1619
void _check_transfer_worker_texture(Texture *p_texture);
1620
void _check_transfer_worker_vertex_array(VertexArray *p_vertex_array);
1621
void _check_transfer_worker_index_array(IndexArray *p_index_array);
1622
void _submit_transfer_workers(RDD::CommandBufferID p_draw_command_buffer = RDD::CommandBufferID());
1623
void _submit_transfer_barriers(RDD::CommandBufferID p_draw_command_buffer);
1624
void _wait_for_transfer_workers();
1625
void _free_transfer_workers();
1626
1627
/***********************/
1628
/**** COMMAND GRAPH ****/
1629
/***********************/
1630
1631
bool _texture_make_mutable(Texture *p_texture, RID p_texture_id);
1632
bool _buffer_make_mutable(Buffer *p_buffer, RID p_buffer_id);
1633
bool _vertex_array_make_mutable(VertexArray *p_vertex_array, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker);
1634
bool _index_array_make_mutable(IndexArray *p_index_array, RDG::ResourceTracker *p_resource_tracker);
1635
bool _uniform_set_make_mutable(UniformSet *p_uniform_set, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker);
1636
bool _dependency_make_mutable(RID p_id, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker);
1637
bool _dependencies_make_mutable_recursive(RID p_id, RDG::ResourceTracker *p_resource_tracker);
1638
bool _dependencies_make_mutable(RID p_id, RDG::ResourceTracker *p_resource_tracker);
1639
1640
RenderingDeviceGraph draw_graph;
1641
1642
/**************************/
1643
/**** QUEUE MANAGEMENT ****/
1644
/**************************/
1645
1646
RDD::CommandQueueFamilyID main_queue_family;
1647
RDD::CommandQueueFamilyID transfer_queue_family;
1648
RDD::CommandQueueFamilyID present_queue_family;
1649
RDD::CommandQueueID main_queue;
1650
RDD::CommandQueueID transfer_queue;
1651
RDD::CommandQueueID present_queue;
1652
1653
/**************************/
1654
/**** FRAME MANAGEMENT ****/
1655
/**************************/
1656
1657
// This is the frame structure. There are normally
1658
// 3 of these (used for triple buffering), or 2
1659
// (double buffering). They are cycled constantly.
1660
//
1661
// It contains two command buffers, one that is
1662
// used internally for setting up (creating stuff)
1663
// and another used mostly for drawing.
1664
//
1665
// They also contains a list of things that need
1666
// to be disposed of when deleted, which can't
1667
// happen immediately due to the asynchronous
1668
// nature of the GPU. They will get deleted
1669
// when the frame is cycled.
1670
1671
struct Frame {
1672
// List in usage order, from last to free to first to free.
1673
List<Buffer> buffers_to_dispose_of;
1674
List<Texture> textures_to_dispose_of;
1675
List<Framebuffer> framebuffers_to_dispose_of;
1676
List<RDD::SamplerID> samplers_to_dispose_of;
1677
List<Shader> shaders_to_dispose_of;
1678
List<UniformSet> uniform_sets_to_dispose_of;
1679
List<RenderPipeline> render_pipelines_to_dispose_of;
1680
List<ComputePipeline> compute_pipelines_to_dispose_of;
1681
List<AccelerationStructure> acceleration_structures_to_dispose_of;
1682
List<RaytracingPipeline> raytracing_pipelines_to_dispose_of;
1683
1684
// Pending asynchronous data transfer for buffers.
1685
LocalVector<RDD::BufferID> download_buffer_staging_buffers;
1686
LocalVector<RDD::BufferCopyRegion> download_buffer_copy_regions;
1687
LocalVector<BufferGetDataRequest> download_buffer_get_data_requests;
1688
1689
// Pending asynchronous data transfer for textures.
1690
LocalVector<RDD::BufferID> download_texture_staging_buffers;
1691
LocalVector<RDD::BufferTextureCopyRegion> download_buffer_texture_copy_regions;
1692
LocalVector<uint32_t> download_texture_mipmap_offsets;
1693
LocalVector<TextureGetDataRequest> download_texture_get_data_requests;
1694
1695
// The command pool used by the command buffer.
1696
RDD::CommandPoolID command_pool;
1697
1698
// The command buffer used by the main thread when recording the frame.
1699
RDD::CommandBufferID command_buffer;
1700
1701
// Signaled by the command buffer submission. Present must wait on this semaphore.
1702
RDD::SemaphoreID semaphore;
1703
1704
// Signaled by the command buffer submission. Must wait on this fence before beginning command recording for the frame.
1705
RDD::FenceID fence;
1706
bool fence_signaled = false;
1707
1708
// Semaphores the frame must wait on before executing the command buffer.
1709
LocalVector<RDD::SemaphoreID> semaphores_to_wait_on;
1710
1711
// Swap chains prepared for drawing during the frame that must be presented.
1712
LocalVector<RDD::SwapChainID> swap_chains_to_present;
1713
1714
// Semaphores the transfer workers can use to wait before rendering the frame.
1715
// This must have the same size of the transfer worker pool.
1716
TightLocalVector<RDD::SemaphoreID> transfer_worker_semaphores;
1717
1718
// Extra command buffer pool used for driver workarounds or to reduce GPU bubbles by
1719
// splitting the final render pass to the swapchain into its own cmd buffer.
1720
RDG::CommandBufferPool command_buffer_pool;
1721
1722
struct Timestamp {
1723
String description;
1724
uint64_t value = 0;
1725
};
1726
1727
RDD::QueryPoolID timestamp_pool;
1728
1729
TightLocalVector<String> timestamp_names;
1730
TightLocalVector<uint64_t> timestamp_cpu_values;
1731
uint32_t timestamp_count = 0;
1732
TightLocalVector<String> timestamp_result_names;
1733
TightLocalVector<uint64_t> timestamp_cpu_result_values;
1734
TightLocalVector<uint64_t> timestamp_result_values;
1735
uint32_t timestamp_result_count = 0;
1736
uint64_t index = 0;
1737
};
1738
1739
uint32_t max_timestamp_query_elements = 0;
1740
1741
int frame = 0;
1742
TightLocalVector<Frame> frames;
1743
uint64_t frames_drawn = 0;
1744
1745
// Whenever logic/physics request a graphics operation (not just deleting a resource) that requires
1746
// us to flush all graphics commands, we must set frames_pending_resources_for_processing = frames.size().
1747
// This is important for when the user requested for the logic loop to still be updated while
1748
// graphics should not (e.g. headless Multiplayer servers, minimized windows that need to still
1749
// process something on the background).
1750
uint32_t frames_pending_resources_for_processing = 0u;
1751
1752
public:
1753
bool has_pending_resources_for_processing() const { return frames_pending_resources_for_processing != 0u; }
1754
1755
private:
1756
void _free_pending_resources(int p_frame);
1757
1758
uint64_t texture_memory = 0;
1759
uint64_t buffer_memory = 0;
1760
1761
protected:
1762
void execute_chained_cmds(bool p_present_swap_chain,
1763
RenderingDeviceDriver::FenceID p_draw_fence,
1764
RenderingDeviceDriver::SemaphoreID p_dst_draw_semaphore_to_signal);
1765
1766
public:
1767
void _free_internal(RID p_id);
1768
void _begin_frame(bool p_presented = false);
1769
void _end_frame();
1770
void _execute_frame(bool p_present);
1771
void _stall_for_frame(uint32_t p_frame);
1772
void _stall_for_previous_frames();
1773
void _flush_and_stall_for_all_frames(bool p_begin_frame = true);
1774
1775
template <typename T>
1776
void _free_rids(T &p_owner, const char *p_type);
1777
1778
#ifdef DEV_ENABLED
1779
HashMap<RID, String> resource_names;
1780
#endif
1781
1782
public:
1783
Error initialize(RenderingContextDriver *p_context, DisplayServer::WindowID p_main_window = DisplayServer::INVALID_WINDOW_ID);
1784
void finalize();
1785
1786
void _set_max_fps(int p_max_fps);
1787
1788
void free_rid(RID p_rid);
1789
#ifndef DISABLE_DEPRECATED
1790
[[deprecated("Use `free_rid()` instead.")]] void free(RID p_rid) {
1791
free_rid(p_rid);
1792
}
1793
#endif // DISABLE_DEPRECATED
1794
1795
/****************/
1796
/**** Timing ****/
1797
/****************/
1798
1799
void capture_timestamp(const String &p_name);
1800
uint32_t get_captured_timestamps_count() const;
1801
uint64_t get_captured_timestamps_frame() const;
1802
uint64_t get_captured_timestamp_gpu_time(uint32_t p_index) const;
1803
uint64_t get_captured_timestamp_cpu_time(uint32_t p_index) const;
1804
String get_captured_timestamp_name(uint32_t p_index) const;
1805
1806
/****************/
1807
/**** LIMITS ****/
1808
/****************/
1809
1810
uint64_t limit_get(Limit p_limit) const;
1811
1812
void swap_buffers(bool p_present);
1813
1814
uint32_t get_frame_delay() const;
1815
1816
void submit();
1817
void sync();
1818
1819
enum MemoryType {
1820
MEMORY_TEXTURES,
1821
MEMORY_BUFFERS,
1822
MEMORY_TOTAL
1823
};
1824
1825
uint64_t get_memory_usage(MemoryType p_type) const;
1826
1827
RenderingDevice *create_local_device();
1828
1829
void set_resource_name(RID p_id, const String &p_name);
1830
1831
void _draw_command_begin_label(String p_label_name, const Color &p_color = Color(1, 1, 1, 1));
1832
void draw_command_begin_label(const Span<char> p_label_name, const Color &p_color = Color(1, 1, 1, 1));
1833
void draw_command_end_label();
1834
1835
String get_device_vendor_name() const;
1836
String get_device_name() const;
1837
DeviceType get_device_type() const;
1838
String get_device_api_name() const;
1839
String get_device_api_version() const;
1840
String get_device_pipeline_cache_uuid() const;
1841
1842
uint64_t get_frames_drawn() const { return frames_drawn; }
1843
1844
bool is_composite_alpha_supported() const;
1845
1846
uint64_t get_driver_resource(DriverResource p_resource, RID p_rid = RID(), uint64_t p_index = 0);
1847
1848
String get_driver_and_device_memory_report() const;
1849
1850
String get_tracked_object_name(uint32_t p_type_index) const;
1851
uint64_t get_tracked_object_type_count() const;
1852
1853
uint64_t get_driver_total_memory() const;
1854
uint64_t get_driver_allocation_count() const;
1855
uint64_t get_driver_memory_by_object_type(uint32_t p_type) const;
1856
uint64_t get_driver_allocs_by_object_type(uint32_t p_type) const;
1857
1858
uint64_t get_device_total_memory() const;
1859
uint64_t get_device_allocation_count() const;
1860
uint64_t get_device_memory_by_object_type(uint32_t p_type) const;
1861
uint64_t get_device_allocs_by_object_type(uint32_t p_type) const;
1862
1863
static RenderingDevice *get_singleton();
1864
1865
void make_current();
1866
1867
RenderingDevice();
1868
~RenderingDevice();
1869
1870
private:
1871
/*****************/
1872
/**** BINDERS ****/
1873
/*****************/
1874
1875
RID _texture_create(const Ref<RDTextureFormat> &p_format, const Ref<RDTextureView> &p_view, const TypedArray<PackedByteArray> &p_data = Array());
1876
RID _texture_create_shared(const Ref<RDTextureView> &p_view, RID p_with_texture);
1877
RID _texture_create_shared_from_slice(const Ref<RDTextureView> &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D);
1878
Ref<RDTextureFormat> _texture_get_format(RID p_rd_texture);
1879
1880
FramebufferFormatID _framebuffer_format_create(const TypedArray<RDAttachmentFormat> &p_attachments, uint32_t p_view_count);
1881
FramebufferFormatID _framebuffer_format_create_multipass(const TypedArray<RDAttachmentFormat> &p_attachments, const TypedArray<RDFramebufferPass> &p_passes, uint32_t p_view_count);
1882
RID _framebuffer_create(const TypedArray<RID> &p_textures, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
1883
RID _framebuffer_create_multipass(const TypedArray<RID> &p_textures, const TypedArray<RDFramebufferPass> &p_passes, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
1884
1885
RID _sampler_create(const Ref<RDSamplerState> &p_state);
1886
1887
VertexFormatID _vertex_format_create(const TypedArray<RDVertexAttribute> &p_vertex_formats);
1888
RID _vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const TypedArray<RID> &p_src_buffers, const Vector<int64_t> &p_offsets = Vector<int64_t>());
1889
void _draw_list_bind_vertex_buffers_format(DrawListID p_list, VertexFormatID p_vertex_format, uint32_t p_vertex_count, const TypedArray<RID> &p_vertex_buffers, const Vector<int64_t> &p_offsets = Vector<int64_t>());
1890
1891
Ref<RDShaderSPIRV> _shader_compile_spirv_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache = true);
1892
Vector<uint8_t> _shader_compile_binary_from_spirv(const Ref<RDShaderSPIRV> &p_bytecode, const String &p_shader_name = "");
1893
RID _shader_create_from_spirv(const Ref<RDShaderSPIRV> &p_spirv, const String &p_shader_name = "");
1894
1895
RID _uniform_set_create(const TypedArray<RDUniform> &p_uniforms, RID p_shader, uint32_t p_shader_set);
1896
1897
Error _buffer_update_bind(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data);
1898
1899
void _tlas_instances_buffer_fill(RID p_buffer, const TypedArray<RID> &p_blases, const TypedArray<Transform3D> &p_transforms);
1900
1901
RID _render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const Ref<RDPipelineRasterizationState> &p_rasterization_state, const Ref<RDPipelineMultisampleState> &p_multisample_state, const Ref<RDPipelineDepthStencilState> &p_depth_stencil_state, const Ref<RDPipelineColorBlendState> &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants);
1902
RID _compute_pipeline_create(RID p_shader, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants);
1903
RID _raytracing_pipeline_create(RID p_shader, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants);
1904
1905
void _draw_list_set_push_constant(DrawListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size);
1906
void _compute_list_set_push_constant(ComputeListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size);
1907
void _raytracing_list_set_push_constant(RaytracingListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size);
1908
};
1909
1910
VARIANT_ENUM_CAST(RenderingDevice::DeviceType)
1911
VARIANT_ENUM_CAST(RenderingDevice::DriverResource)
1912
VARIANT_ENUM_CAST(RenderingDevice::ShaderStage)
1913
VARIANT_ENUM_CAST(RenderingDevice::ShaderLanguage)
1914
VARIANT_ENUM_CAST(RenderingDevice::CompareOperator)
1915
VARIANT_ENUM_CAST(RenderingDevice::DataFormat)
1916
VARIANT_ENUM_CAST(RenderingDevice::TextureType)
1917
VARIANT_ENUM_CAST(RenderingDevice::TextureSamples)
1918
VARIANT_BITFIELD_CAST(RenderingDevice::TextureUsageBits)
1919
VARIANT_ENUM_CAST(RenderingDevice::TextureSwizzle)
1920
VARIANT_ENUM_CAST(RenderingDevice::TextureSliceType)
1921
VARIANT_ENUM_CAST(RenderingDevice::SamplerFilter)
1922
VARIANT_ENUM_CAST(RenderingDevice::SamplerRepeatMode)
1923
VARIANT_ENUM_CAST(RenderingDevice::SamplerBorderColor)
1924
VARIANT_ENUM_CAST(RenderingDevice::VertexFrequency)
1925
VARIANT_ENUM_CAST(RenderingDevice::IndexBufferFormat)
1926
VARIANT_BITFIELD_CAST(RenderingDevice::StorageBufferUsage)
1927
VARIANT_BITFIELD_CAST(RenderingDevice::BufferCreationBits)
1928
VARIANT_BITFIELD_CAST(RenderingDevice::AccelerationStructureGeometryBits)
1929
VARIANT_ENUM_CAST(RenderingDevice::UniformType)
1930
VARIANT_ENUM_CAST(RenderingDevice::RenderPrimitive)
1931
VARIANT_ENUM_CAST(RenderingDevice::PolygonCullMode)
1932
VARIANT_ENUM_CAST(RenderingDevice::PolygonFrontFace)
1933
VARIANT_ENUM_CAST(RenderingDevice::StencilOperation)
1934
VARIANT_ENUM_CAST(RenderingDevice::LogicOperation)
1935
VARIANT_ENUM_CAST(RenderingDevice::BlendFactor)
1936
VARIANT_ENUM_CAST(RenderingDevice::BlendOperation)
1937
VARIANT_BITFIELD_CAST(RenderingDevice::PipelineDynamicStateFlags)
1938
VARIANT_ENUM_CAST(RenderingDevice::PipelineSpecializationConstantType)
1939
VARIANT_ENUM_CAST(RenderingDevice::Limit)
1940
VARIANT_ENUM_CAST(RenderingDevice::MemoryType)
1941
VARIANT_ENUM_CAST(RenderingDevice::Features)
1942
VARIANT_ENUM_CAST(RenderingDevice::BreadcrumbMarker)
1943
VARIANT_BITFIELD_CAST(RenderingDevice::DrawFlags);
1944
1945
#ifndef DISABLE_DEPRECATED
1946
VARIANT_BITFIELD_CAST(RenderingDevice::BarrierMask);
1947
VARIANT_ENUM_CAST(RenderingDevice::InitialAction)
1948
VARIANT_ENUM_CAST(RenderingDevice::FinalAction)
1949
#endif
1950
1951
typedef RenderingDevice RD;
1952
1953