Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/servers/rendering/rendering_shader_container.cpp
11351 views
1
/**************************************************************************/
2
/* rendering_shader_container.cpp */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#include "rendering_shader_container.h"
32
33
#include "core/io/compression.h"
34
35
#include "thirdparty/spirv-reflect/spirv_reflect.h"
36
37
static inline uint32_t aligned_to(uint32_t p_size, uint32_t p_alignment) {
38
if (p_size % p_alignment) {
39
return p_size + (p_alignment - (p_size % p_alignment));
40
} else {
41
return p_size;
42
}
43
}
44
45
RenderingShaderContainer::ReflectedShaderStage::ReflectedShaderStage() :
46
_module(memnew(SpvReflectShaderModule)) {
47
}
48
49
RenderingShaderContainer::ReflectedShaderStage::~ReflectedShaderStage() {
50
spvReflectDestroyShaderModule(_module);
51
memdelete(_module);
52
}
53
54
const SpvReflectShaderModule &RenderingShaderContainer::ReflectedShaderStage::module() const {
55
return *_module;
56
}
57
58
const Span<uint32_t> RenderingShaderContainer::ReflectedShaderStage::spirv() const {
59
return _spirv_data.span().reinterpret<uint32_t>();
60
}
61
62
uint32_t RenderingShaderContainer::_from_bytes_header_extra_data(const uint8_t *p_bytes) {
63
return 0;
64
}
65
66
uint32_t RenderingShaderContainer::_from_bytes_reflection_extra_data(const uint8_t *p_bytes) {
67
return 0;
68
}
69
70
uint32_t RenderingShaderContainer::_from_bytes_reflection_binding_uniform_extra_data_start(const uint8_t *p_bytes) {
71
return 0;
72
}
73
74
uint32_t RenderingShaderContainer::_from_bytes_reflection_binding_uniform_extra_data(const uint8_t *p_bytes, uint32_t p_index) {
75
return 0;
76
}
77
78
uint32_t RenderingShaderContainer::_from_bytes_reflection_specialization_extra_data_start(const uint8_t *p_bytes) {
79
return 0;
80
}
81
82
uint32_t RenderingShaderContainer::_from_bytes_reflection_specialization_extra_data(const uint8_t *p_bytes, uint32_t p_index) {
83
return 0;
84
}
85
86
uint32_t RenderingShaderContainer::_from_bytes_shader_extra_data_start(const uint8_t *p_bytes) {
87
return 0;
88
}
89
90
uint32_t RenderingShaderContainer::_from_bytes_shader_extra_data(const uint8_t *p_bytes, uint32_t p_index) {
91
return 0;
92
}
93
94
uint32_t RenderingShaderContainer::_from_bytes_footer_extra_data(const uint8_t *p_bytes) {
95
return 0;
96
}
97
98
uint32_t RenderingShaderContainer::_to_bytes_header_extra_data(uint8_t *) const {
99
return 0;
100
}
101
102
uint32_t RenderingShaderContainer::_to_bytes_reflection_extra_data(uint8_t *) const {
103
return 0;
104
}
105
106
uint32_t RenderingShaderContainer::_to_bytes_reflection_binding_uniform_extra_data(uint8_t *, uint32_t) const {
107
return 0;
108
}
109
110
uint32_t RenderingShaderContainer::_to_bytes_reflection_specialization_extra_data(uint8_t *, uint32_t) const {
111
return 0;
112
}
113
114
uint32_t RenderingShaderContainer::_to_bytes_shader_extra_data(uint8_t *, uint32_t) const {
115
return 0;
116
}
117
118
uint32_t RenderingShaderContainer::_to_bytes_footer_extra_data(uint8_t *) const {
119
return 0;
120
}
121
122
void RenderingShaderContainer::_set_from_shader_reflection_post(const RenderingDeviceCommons::ShaderReflection &p_reflection) {
123
// Do nothing.
124
}
125
126
Error RenderingShaderContainer::reflect_spirv(const String &p_shader_name, Span<RenderingDeviceCommons::ShaderStageSPIRVData> p_spirv, LocalVector<ReflectedShaderStage> &r_refl) {
127
using RDC = RenderingDeviceCommons;
128
RDC::ShaderReflection reflection;
129
130
shader_name = p_shader_name.utf8();
131
132
const uint32_t spirv_size = p_spirv.size() + 0;
133
r_refl.resize(spirv_size);
134
135
for (uint32_t i = 0; i < spirv_size; i++) {
136
RDC::ShaderStage stage = p_spirv[i].shader_stage;
137
RDC::ShaderStage stage_flag = (RDC::ShaderStage)(1 << p_spirv[i].shader_stage);
138
r_refl[i].shader_stage = p_spirv[i].shader_stage;
139
r_refl[i]._spirv_data = p_spirv[i].spirv;
140
141
if (p_spirv[i].shader_stage == RDC::SHADER_STAGE_COMPUTE) {
142
reflection.is_compute = true;
143
ERR_FAIL_COND_V_MSG(spirv_size != 1, FAILED,
144
"Compute shaders can only receive one stage, dedicated to compute.");
145
}
146
ERR_FAIL_COND_V_MSG(reflection.stages_bits.has_flag(stage_flag), FAILED,
147
"Stage " + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + " submitted more than once.");
148
149
{
150
SpvReflectShaderModule &module = *r_refl.ptr()[i]._module;
151
const uint8_t *spirv = p_spirv[i].spirv.ptr();
152
SpvReflectResult result = spvReflectCreateShaderModule2(SPV_REFLECT_MODULE_FLAG_NO_COPY, p_spirv[i].spirv.size(), spirv, &module);
153
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
154
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed parsing shader.");
155
156
for (uint32_t j = 0; j < module.capability_count; j++) {
157
if (module.capabilities[j].value == SpvCapabilityMultiView) {
158
reflection.has_multiview = true;
159
break;
160
}
161
}
162
163
if (reflection.is_compute) {
164
reflection.compute_local_size[0] = module.entry_points->local_size.x;
165
reflection.compute_local_size[1] = module.entry_points->local_size.y;
166
reflection.compute_local_size[2] = module.entry_points->local_size.z;
167
}
168
uint32_t binding_count = 0;
169
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, nullptr);
170
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
171
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating descriptor bindings.");
172
173
if (binding_count > 0) {
174
// Parse bindings.
175
176
Vector<SpvReflectDescriptorBinding *> bindings;
177
bindings.resize(binding_count);
178
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, bindings.ptrw());
179
180
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
181
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed getting descriptor bindings.");
182
183
for (uint32_t j = 0; j < binding_count; j++) {
184
const SpvReflectDescriptorBinding &binding = *bindings[j];
185
186
RDC::ShaderUniform uniform;
187
188
bool need_array_dimensions = false;
189
bool need_block_size = false;
190
bool may_be_writable = false;
191
192
switch (binding.descriptor_type) {
193
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: {
194
uniform.type = RDC::UNIFORM_TYPE_SAMPLER;
195
need_array_dimensions = true;
196
} break;
197
case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
198
uniform.type = RDC::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
199
need_array_dimensions = true;
200
} break;
201
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
202
uniform.type = RDC::UNIFORM_TYPE_TEXTURE;
203
need_array_dimensions = true;
204
} break;
205
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
206
uniform.type = RDC::UNIFORM_TYPE_IMAGE;
207
need_array_dimensions = true;
208
may_be_writable = true;
209
} break;
210
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
211
uniform.type = RDC::UNIFORM_TYPE_TEXTURE_BUFFER;
212
need_array_dimensions = true;
213
} break;
214
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
215
uniform.type = RDC::UNIFORM_TYPE_IMAGE_BUFFER;
216
need_array_dimensions = true;
217
may_be_writable = true;
218
} break;
219
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
220
uniform.type = RDC::UNIFORM_TYPE_UNIFORM_BUFFER;
221
need_block_size = true;
222
} break;
223
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
224
uniform.type = RDC::UNIFORM_TYPE_STORAGE_BUFFER;
225
need_block_size = true;
226
may_be_writable = true;
227
} break;
228
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
229
ERR_PRINT("Dynamic uniform buffer not supported.");
230
continue;
231
} break;
232
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
233
ERR_PRINT("Dynamic storage buffer not supported.");
234
continue;
235
} break;
236
case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
237
uniform.type = RDC::UNIFORM_TYPE_INPUT_ATTACHMENT;
238
need_array_dimensions = true;
239
} break;
240
case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
241
ERR_PRINT("Acceleration structure not supported.");
242
continue;
243
} break;
244
}
245
246
if (need_array_dimensions) {
247
if (binding.array.dims_count == 0) {
248
uniform.length = 1;
249
} else {
250
for (uint32_t k = 0; k < binding.array.dims_count; k++) {
251
if (k == 0) {
252
uniform.length = binding.array.dims[0];
253
} else {
254
uniform.length *= binding.array.dims[k];
255
}
256
}
257
}
258
259
} else if (need_block_size) {
260
uniform.length = binding.block.size;
261
} else {
262
uniform.length = 0;
263
}
264
265
if (may_be_writable) {
266
if (binding.descriptor_type == SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
267
uniform.writable = !(binding.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
268
} else {
269
uniform.writable = !(binding.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) && !(binding.block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
270
}
271
} else {
272
uniform.writable = false;
273
}
274
275
uniform.binding = binding.binding;
276
uint32_t set = binding.set;
277
278
ERR_FAIL_COND_V_MSG(set >= RDC::MAX_UNIFORM_SETS, FAILED,
279
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(RDC::MAX_UNIFORM_SETS) + ").");
280
281
if (set < (uint32_t)reflection.uniform_sets.size()) {
282
// Check if this already exists.
283
bool exists = false;
284
for (int k = 0; k < reflection.uniform_sets[set].size(); k++) {
285
if (reflection.uniform_sets[set][k].binding == uniform.binding) {
286
// Already exists, verify that it's the same type.
287
ERR_FAIL_COND_V_MSG(reflection.uniform_sets[set][k].type != uniform.type, FAILED,
288
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform type.");
289
290
// Also, verify that it's the same size.
291
ERR_FAIL_COND_V_MSG(reflection.uniform_sets[set][k].length != uniform.length, FAILED,
292
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform size.");
293
294
// Also, verify that it has the same writability.
295
ERR_FAIL_COND_V_MSG(reflection.uniform_sets[set][k].writable != uniform.writable, FAILED,
296
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different writability.");
297
298
// Just append stage mask and return.
299
reflection.uniform_sets.write[set].write[k].stages.set_flag(stage_flag);
300
exists = true;
301
break;
302
}
303
}
304
305
if (exists) {
306
continue; // Merged.
307
}
308
}
309
310
uniform.stages.set_flag(stage_flag);
311
312
if (set >= (uint32_t)reflection.uniform_sets.size()) {
313
reflection.uniform_sets.resize(set + 1);
314
}
315
316
reflection.uniform_sets.write[set].push_back(uniform);
317
}
318
}
319
320
{
321
// Specialization constants.
322
323
uint32_t sc_count = 0;
324
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, nullptr);
325
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
326
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating specialization constants.");
327
328
if (sc_count) {
329
Vector<SpvReflectSpecializationConstant *> spec_constants;
330
spec_constants.resize(sc_count);
331
332
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, spec_constants.ptrw());
333
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
334
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining specialization constants.");
335
336
for (uint32_t j = 0; j < sc_count; j++) {
337
int32_t existing = -1;
338
RDC::ShaderSpecializationConstant sconst;
339
SpvReflectSpecializationConstant *spc = spec_constants[j];
340
341
sconst.constant_id = spc->constant_id;
342
sconst.int_value = 0; // Clear previous value JIC.
343
switch (spc->constant_type) {
344
case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: {
345
sconst.type = RDC::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
346
sconst.bool_value = spc->default_value.int_bool_value != 0;
347
} break;
348
case SPV_REFLECT_SPECIALIZATION_CONSTANT_INT: {
349
sconst.type = RDC::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
350
sconst.int_value = spc->default_value.int_bool_value;
351
} break;
352
case SPV_REFLECT_SPECIALIZATION_CONSTANT_FLOAT: {
353
sconst.type = RDC::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
354
sconst.float_value = spc->default_value.float_value;
355
} break;
356
}
357
sconst.stages.set_flag(stage_flag);
358
359
for (int k = 0; k < reflection.specialization_constants.size(); k++) {
360
if (reflection.specialization_constants[k].constant_id == sconst.constant_id) {
361
ERR_FAIL_COND_V_MSG(reflection.specialization_constants[k].type != sconst.type, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their types differ.");
362
ERR_FAIL_COND_V_MSG(reflection.specialization_constants[k].int_value != sconst.int_value, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their default values differ.");
363
existing = k;
364
break;
365
}
366
}
367
368
if (existing >= 0) {
369
reflection.specialization_constants.write[existing].stages.set_flag(stage_flag);
370
} else {
371
reflection.specialization_constants.push_back(sconst);
372
}
373
}
374
375
reflection.specialization_constants.sort();
376
}
377
}
378
379
if (stage == RDC::SHADER_STAGE_VERTEX || stage == RDC::SHADER_STAGE_FRAGMENT) {
380
uint32_t iv_count = 0;
381
result = spvReflectEnumerateInputVariables(&module, &iv_count, nullptr);
382
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
383
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating input variables.");
384
385
if (iv_count) {
386
Vector<SpvReflectInterfaceVariable *> input_vars;
387
input_vars.resize(iv_count);
388
389
result = spvReflectEnumerateInputVariables(&module, &iv_count, input_vars.ptrw());
390
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
391
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining input variables.");
392
393
for (const SpvReflectInterfaceVariable *v : input_vars) {
394
if (!v) {
395
continue;
396
}
397
if (stage == RDC::SHADER_STAGE_VERTEX) {
398
if (v->decoration_flags == 0) { // Regular input.
399
reflection.vertex_input_mask |= (((uint64_t)1) << v->location);
400
}
401
}
402
if (v->built_in == SpvBuiltInViewIndex) {
403
reflection.has_multiview = true;
404
}
405
}
406
}
407
}
408
409
if (stage == RDC::SHADER_STAGE_FRAGMENT) {
410
uint32_t ov_count = 0;
411
result = spvReflectEnumerateOutputVariables(&module, &ov_count, nullptr);
412
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
413
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating output variables.");
414
415
if (ov_count) {
416
Vector<SpvReflectInterfaceVariable *> output_vars;
417
output_vars.resize(ov_count);
418
419
result = spvReflectEnumerateOutputVariables(&module, &ov_count, output_vars.ptrw());
420
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
421
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining output variables.");
422
423
for (const SpvReflectInterfaceVariable *refvar : output_vars) {
424
if (!refvar) {
425
continue;
426
}
427
if (refvar->built_in != SpvBuiltInFragDepth) {
428
reflection.fragment_output_mask |= 1 << refvar->location;
429
}
430
}
431
}
432
}
433
434
uint32_t pc_count = 0;
435
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, nullptr);
436
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
437
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating push constants.");
438
439
if (pc_count) {
440
ERR_FAIL_COND_V_MSG(pc_count > 1, FAILED,
441
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
442
443
Vector<SpvReflectBlockVariable *> pconstants;
444
pconstants.resize(pc_count);
445
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, pconstants.ptrw());
446
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
447
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining push constants.");
448
#if 0
449
if (pconstants[0] == nullptr) {
450
Ref<FileAccess> f = FileAccess::open("res://popo.spv", FileAccess::WRITE);
451
f->store_buffer((const uint8_t *)&SpirV[0], SpirV.size() * sizeof(uint32_t));
452
}
453
#endif
454
455
ERR_FAIL_COND_V_MSG(reflection.push_constant_size && reflection.push_constant_size != pconstants[0]->size, FAILED,
456
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
457
458
reflection.push_constant_size = pconstants[0]->size;
459
reflection.push_constant_stages.set_flag(stage_flag);
460
461
//print_line("Stage: " + String(RDC::SHADER_STAGE_NAMES[stage]) + " push constant of size=" + itos(push_constant.push_constant_size));
462
}
463
}
464
465
reflection.stages_bits.set_flag(stage_flag);
466
}
467
468
// Sort all uniform_sets by binding.
469
for (uint32_t i = 0; i < reflection.uniform_sets.size(); i++) {
470
reflection.uniform_sets.write[i].sort();
471
}
472
473
set_from_shader_reflection(reflection);
474
475
return OK;
476
}
477
478
void RenderingShaderContainer::set_from_shader_reflection(const RenderingDeviceCommons::ShaderReflection &p_reflection) {
479
reflection_binding_set_uniforms_count.clear();
480
reflection_binding_set_uniforms_data.clear();
481
reflection_specialization_data.clear();
482
reflection_shader_stages.clear();
483
484
reflection_data.vertex_input_mask = p_reflection.vertex_input_mask;
485
reflection_data.fragment_output_mask = p_reflection.fragment_output_mask;
486
reflection_data.specialization_constants_count = p_reflection.specialization_constants.size();
487
reflection_data.is_compute = p_reflection.is_compute;
488
reflection_data.has_multiview = p_reflection.has_multiview;
489
reflection_data.compute_local_size[0] = p_reflection.compute_local_size[0];
490
reflection_data.compute_local_size[1] = p_reflection.compute_local_size[1];
491
reflection_data.compute_local_size[2] = p_reflection.compute_local_size[2];
492
reflection_data.set_count = p_reflection.uniform_sets.size();
493
reflection_data.push_constant_size = p_reflection.push_constant_size;
494
reflection_data.push_constant_stages_mask = uint32_t(p_reflection.push_constant_stages);
495
reflection_data.shader_name_len = shader_name.length();
496
497
ReflectionBindingData binding_data;
498
for (const Vector<RenderingDeviceCommons::ShaderUniform> &uniform_set : p_reflection.uniform_sets) {
499
for (const RenderingDeviceCommons::ShaderUniform &uniform : uniform_set) {
500
binding_data.type = uint32_t(uniform.type);
501
binding_data.binding = uniform.binding;
502
binding_data.stages = uint32_t(uniform.stages);
503
binding_data.length = uniform.length;
504
binding_data.writable = uint32_t(uniform.writable);
505
reflection_binding_set_uniforms_data.push_back(binding_data);
506
}
507
508
reflection_binding_set_uniforms_count.push_back(uniform_set.size());
509
}
510
511
ReflectionSpecializationData specialization_data;
512
for (const RenderingDeviceCommons::ShaderSpecializationConstant &spec : p_reflection.specialization_constants) {
513
specialization_data.type = uint32_t(spec.type);
514
specialization_data.constant_id = spec.constant_id;
515
specialization_data.int_value = spec.int_value;
516
specialization_data.stage_flags = uint32_t(spec.stages);
517
reflection_specialization_data.push_back(specialization_data);
518
}
519
520
for (uint32_t i = 0; i < RenderingDeviceCommons::SHADER_STAGE_MAX; i++) {
521
if (p_reflection.stages_bits.has_flag(RenderingDeviceCommons::ShaderStage(1U << i))) {
522
reflection_shader_stages.push_back(RenderingDeviceCommons::ShaderStage(i));
523
}
524
}
525
526
reflection_data.stage_count = reflection_shader_stages.size();
527
528
_set_from_shader_reflection_post(p_reflection);
529
}
530
531
bool RenderingShaderContainer::set_code_from_spirv(const String &p_shader_name, Span<RenderingDeviceCommons::ShaderStageSPIRVData> p_spirv) {
532
LocalVector<ReflectedShaderStage> spirv;
533
ERR_FAIL_COND_V(reflect_spirv(p_shader_name, p_spirv, spirv) != OK, false);
534
return _set_code_from_spirv(spirv.span());
535
}
536
537
RenderingDeviceCommons::ShaderReflection RenderingShaderContainer::get_shader_reflection() const {
538
RenderingDeviceCommons::ShaderReflection shader_refl;
539
shader_refl.push_constant_size = reflection_data.push_constant_size;
540
shader_refl.push_constant_stages = reflection_data.push_constant_stages_mask;
541
shader_refl.vertex_input_mask = reflection_data.vertex_input_mask;
542
shader_refl.fragment_output_mask = reflection_data.fragment_output_mask;
543
shader_refl.is_compute = reflection_data.is_compute;
544
shader_refl.has_multiview = reflection_data.has_multiview;
545
shader_refl.compute_local_size[0] = reflection_data.compute_local_size[0];
546
shader_refl.compute_local_size[1] = reflection_data.compute_local_size[1];
547
shader_refl.compute_local_size[2] = reflection_data.compute_local_size[2];
548
shader_refl.uniform_sets.resize(reflection_data.set_count);
549
shader_refl.specialization_constants.resize(reflection_data.specialization_constants_count);
550
shader_refl.stages_vector.resize(reflection_data.stage_count);
551
552
DEV_ASSERT(reflection_binding_set_uniforms_count.size() == reflection_data.set_count && "The amount of elements in the reflection and the shader container can't be different.");
553
uint32_t uniform_index = 0;
554
for (uint32_t i = 0; i < reflection_data.set_count; i++) {
555
Vector<RenderingDeviceCommons::ShaderUniform> &uniform_set = shader_refl.uniform_sets.ptrw()[i];
556
uint32_t uniforms_count = reflection_binding_set_uniforms_count[i];
557
uniform_set.resize(uniforms_count);
558
for (uint32_t j = 0; j < uniforms_count; j++) {
559
const ReflectionBindingData &binding = reflection_binding_set_uniforms_data[uniform_index++];
560
RenderingDeviceCommons::ShaderUniform &uniform = uniform_set.ptrw()[j];
561
uniform.type = RenderingDeviceCommons::UniformType(binding.type);
562
uniform.writable = binding.writable;
563
uniform.length = binding.length;
564
uniform.binding = binding.binding;
565
uniform.stages = binding.stages;
566
}
567
}
568
569
shader_refl.specialization_constants.resize(reflection_data.specialization_constants_count);
570
for (uint32_t i = 0; i < reflection_data.specialization_constants_count; i++) {
571
const ReflectionSpecializationData &spec = reflection_specialization_data[i];
572
RenderingDeviceCommons::ShaderSpecializationConstant &sc = shader_refl.specialization_constants.ptrw()[i];
573
sc.type = RenderingDeviceCommons::PipelineSpecializationConstantType(spec.type);
574
sc.constant_id = spec.constant_id;
575
sc.int_value = spec.int_value;
576
sc.stages = spec.stage_flags;
577
}
578
579
shader_refl.stages_vector.resize(reflection_data.stage_count);
580
for (uint32_t i = 0; i < reflection_data.stage_count; i++) {
581
shader_refl.stages_vector.set(i, reflection_shader_stages[i]);
582
shader_refl.stages_bits.set_flag(RenderingDeviceCommons::ShaderStage(1U << reflection_shader_stages[i]));
583
}
584
585
return shader_refl;
586
}
587
588
bool RenderingShaderContainer::from_bytes(const PackedByteArray &p_bytes) {
589
const uint64_t alignment = sizeof(uint32_t);
590
const uint8_t *bytes_ptr = p_bytes.ptr();
591
uint64_t bytes_offset = 0;
592
593
// Read container header.
594
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ContainerHeader)) > p_bytes.size(), false, "Not enough bytes for a container header in shader container.");
595
const ContainerHeader &container_header = *(const ContainerHeader *)(&bytes_ptr[bytes_offset]);
596
bytes_offset += sizeof(ContainerHeader);
597
bytes_offset += _from_bytes_header_extra_data(&bytes_ptr[bytes_offset]);
598
599
ERR_FAIL_COND_V_MSG(container_header.magic_number != CONTAINER_MAGIC_NUMBER, false, "Incorrect magic number in shader container.");
600
ERR_FAIL_COND_V_MSG(container_header.version > CONTAINER_VERSION, false, "Unsupported version in shader container.");
601
ERR_FAIL_COND_V_MSG(container_header.format != _format(), false, "Incorrect format in shader container.");
602
ERR_FAIL_COND_V_MSG(container_header.format_version > _format_version(), false, "Unsupported format version in shader container.");
603
604
// Adjust shaders to the size indicated by the container header.
605
shaders.resize(container_header.shader_count);
606
607
// Read reflection data.
608
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ReflectionData)) > p_bytes.size(), false, "Not enough bytes for reflection data in shader container.");
609
reflection_data = *(const ReflectionData *)(&bytes_ptr[bytes_offset]);
610
bytes_offset += sizeof(ReflectionData);
611
bytes_offset += _from_bytes_reflection_extra_data(&bytes_ptr[bytes_offset]);
612
613
// Read shader name.
614
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + reflection_data.shader_name_len) > p_bytes.size(), false, "Not enough bytes for shader name in shader container.");
615
if (reflection_data.shader_name_len > 0) {
616
String shader_name_str;
617
shader_name_str.append_utf8((const char *)(&bytes_ptr[bytes_offset]), reflection_data.shader_name_len);
618
shader_name = shader_name_str.utf8();
619
bytes_offset = aligned_to(bytes_offset + reflection_data.shader_name_len, alignment);
620
} else {
621
shader_name = CharString();
622
}
623
624
reflection_binding_set_uniforms_count.resize(reflection_data.set_count);
625
reflection_binding_set_uniforms_data.clear();
626
627
uint32_t uniform_index = 0;
628
for (uint32_t i = 0; i < reflection_data.set_count; i++) {
629
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(uint32_t)) > p_bytes.size(), false, "Not enough bytes for uniform set count in shader container.");
630
uint32_t uniforms_count = *(uint32_t *)(&bytes_ptr[bytes_offset]);
631
reflection_binding_set_uniforms_count.ptrw()[i] = uniforms_count;
632
bytes_offset += sizeof(uint32_t);
633
634
reflection_binding_set_uniforms_data.resize(reflection_binding_set_uniforms_data.size() + uniforms_count);
635
bytes_offset += _from_bytes_reflection_binding_uniform_extra_data_start(&bytes_ptr[bytes_offset]);
636
637
for (uint32_t j = 0; j < uniforms_count; j++) {
638
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ReflectionBindingData)) > p_bytes.size(), false, "Not enough bytes for uniform in shader container.");
639
memcpy(&reflection_binding_set_uniforms_data.ptrw()[uniform_index], &bytes_ptr[bytes_offset], sizeof(ReflectionBindingData));
640
bytes_offset += sizeof(ReflectionBindingData);
641
bytes_offset += _from_bytes_reflection_binding_uniform_extra_data(&bytes_ptr[bytes_offset], uniform_index);
642
uniform_index++;
643
}
644
}
645
646
reflection_specialization_data.resize(reflection_data.specialization_constants_count);
647
bytes_offset += _from_bytes_reflection_specialization_extra_data_start(&bytes_ptr[bytes_offset]);
648
649
for (uint32_t i = 0; i < reflection_data.specialization_constants_count; i++) {
650
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ReflectionSpecializationData)) > p_bytes.size(), false, "Not enough bytes for specialization in shader container.");
651
memcpy(&reflection_specialization_data.ptrw()[i], &bytes_ptr[bytes_offset], sizeof(ReflectionSpecializationData));
652
bytes_offset += sizeof(ReflectionSpecializationData);
653
bytes_offset += _from_bytes_reflection_specialization_extra_data(&bytes_ptr[bytes_offset], i);
654
}
655
656
const uint32_t stage_count = reflection_data.stage_count;
657
if (stage_count > 0) {
658
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + stage_count * sizeof(RenderingDeviceCommons::ShaderStage)) > p_bytes.size(), false, "Not enough bytes for stages in shader container.");
659
reflection_shader_stages.resize(stage_count);
660
bytes_offset += _from_bytes_shader_extra_data_start(&bytes_ptr[bytes_offset]);
661
memcpy(reflection_shader_stages.ptrw(), &bytes_ptr[bytes_offset], stage_count * sizeof(RenderingDeviceCommons::ShaderStage));
662
bytes_offset += stage_count * sizeof(RenderingDeviceCommons::ShaderStage);
663
}
664
665
// Read shaders.
666
for (int64_t i = 0; i < shaders.size(); i++) {
667
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ShaderHeader)) > p_bytes.size(), false, "Not enough bytes for shader header in shader container.");
668
const ShaderHeader &header = *(const ShaderHeader *)(&bytes_ptr[bytes_offset]);
669
bytes_offset += sizeof(ShaderHeader);
670
671
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + header.code_compressed_size) > p_bytes.size(), false, "Not enough bytes for a shader in shader container.");
672
Shader &shader = shaders.ptrw()[i];
673
shader.shader_stage = RenderingDeviceCommons::ShaderStage(header.shader_stage);
674
shader.code_compression_flags = header.code_compression_flags;
675
shader.code_decompressed_size = header.code_decompressed_size;
676
shader.code_compressed_bytes.resize(header.code_compressed_size);
677
memcpy(shader.code_compressed_bytes.ptrw(), &bytes_ptr[bytes_offset], header.code_compressed_size);
678
bytes_offset = aligned_to(bytes_offset + header.code_compressed_size, alignment);
679
bytes_offset += _from_bytes_shader_extra_data(&bytes_ptr[bytes_offset], i);
680
}
681
682
bytes_offset += _from_bytes_footer_extra_data(&bytes_ptr[bytes_offset]);
683
684
ERR_FAIL_COND_V_MSG(bytes_offset != (uint64_t)p_bytes.size(), false, "Amount of bytes in the container does not match the amount of bytes read.");
685
return true;
686
}
687
688
PackedByteArray RenderingShaderContainer::to_bytes() const {
689
// Compute the exact size the container will require for writing everything out.
690
const uint64_t alignment = sizeof(uint32_t);
691
uint64_t total_size = 0;
692
total_size += sizeof(ContainerHeader) + _to_bytes_header_extra_data(nullptr);
693
total_size += sizeof(ReflectionData) + _to_bytes_reflection_extra_data(nullptr);
694
total_size += aligned_to(reflection_data.shader_name_len, alignment);
695
total_size += reflection_binding_set_uniforms_count.size() * sizeof(uint32_t);
696
total_size += reflection_binding_set_uniforms_data.size() * sizeof(ReflectionBindingData);
697
total_size += reflection_specialization_data.size() * sizeof(ReflectionSpecializationData);
698
total_size += reflection_shader_stages.size() * sizeof(RenderingDeviceCommons::ShaderStage);
699
700
for (uint32_t i = 0; i < reflection_binding_set_uniforms_data.size(); i++) {
701
total_size += _to_bytes_reflection_binding_uniform_extra_data(nullptr, i);
702
}
703
704
for (uint32_t i = 0; i < reflection_specialization_data.size(); i++) {
705
total_size += _to_bytes_reflection_specialization_extra_data(nullptr, i);
706
}
707
708
for (uint32_t i = 0; i < shaders.size(); i++) {
709
total_size += sizeof(ShaderHeader);
710
total_size += shaders[i].code_compressed_bytes.size();
711
total_size = aligned_to(total_size, alignment);
712
total_size += _to_bytes_shader_extra_data(nullptr, i);
713
}
714
715
total_size += _to_bytes_footer_extra_data(nullptr);
716
717
// Create the array that will hold all of the data.
718
PackedByteArray bytes;
719
bytes.resize_initialized(total_size);
720
721
// Write out the data to the array.
722
uint64_t bytes_offset = 0;
723
uint8_t *bytes_ptr = bytes.ptrw();
724
ContainerHeader &container_header = *(ContainerHeader *)(&bytes_ptr[bytes_offset]);
725
container_header.magic_number = CONTAINER_MAGIC_NUMBER;
726
container_header.version = CONTAINER_VERSION;
727
container_header.format = _format();
728
container_header.format_version = _format_version();
729
container_header.shader_count = shaders.size();
730
bytes_offset += sizeof(ContainerHeader);
731
bytes_offset += _to_bytes_header_extra_data(&bytes_ptr[bytes_offset]);
732
733
memcpy(&bytes_ptr[bytes_offset], &reflection_data, sizeof(ReflectionData));
734
bytes_offset += sizeof(ReflectionData);
735
bytes_offset += _to_bytes_reflection_extra_data(&bytes_ptr[bytes_offset]);
736
737
if (shader_name.size() > 0) {
738
memcpy(&bytes_ptr[bytes_offset], shader_name.ptr(), reflection_data.shader_name_len);
739
bytes_offset = aligned_to(bytes_offset + reflection_data.shader_name_len, alignment);
740
}
741
742
uint32_t uniform_index = 0;
743
for (uint32_t uniform_count : reflection_binding_set_uniforms_count) {
744
memcpy(&bytes_ptr[bytes_offset], &uniform_count, sizeof(uniform_count));
745
bytes_offset += sizeof(uint32_t);
746
747
for (uint32_t i = 0; i < uniform_count; i++) {
748
memcpy(&bytes_ptr[bytes_offset], &reflection_binding_set_uniforms_data[uniform_index], sizeof(ReflectionBindingData));
749
bytes_offset += sizeof(ReflectionBindingData);
750
bytes_offset += _to_bytes_reflection_binding_uniform_extra_data(&bytes_ptr[bytes_offset], uniform_index);
751
uniform_index++;
752
}
753
}
754
755
for (uint32_t i = 0; i < reflection_specialization_data.size(); i++) {
756
memcpy(&bytes_ptr[bytes_offset], &reflection_specialization_data.ptr()[i], sizeof(ReflectionSpecializationData));
757
bytes_offset += sizeof(ReflectionSpecializationData);
758
bytes_offset += _to_bytes_reflection_specialization_extra_data(&bytes_ptr[bytes_offset], i);
759
}
760
761
if (!reflection_shader_stages.is_empty()) {
762
uint32_t stage_count = reflection_shader_stages.size();
763
memcpy(&bytes_ptr[bytes_offset], reflection_shader_stages.ptr(), stage_count * sizeof(RenderingDeviceCommons::ShaderStage));
764
bytes_offset += stage_count * sizeof(RenderingDeviceCommons::ShaderStage);
765
}
766
767
for (uint32_t i = 0; i < shaders.size(); i++) {
768
const Shader &shader = shaders[i];
769
ShaderHeader &header = *(ShaderHeader *)(&bytes.ptr()[bytes_offset]);
770
header.shader_stage = shader.shader_stage;
771
header.code_compressed_size = uint32_t(shader.code_compressed_bytes.size());
772
header.code_compression_flags = shader.code_compression_flags;
773
header.code_decompressed_size = shader.code_decompressed_size;
774
bytes_offset += sizeof(ShaderHeader);
775
memcpy(&bytes.ptrw()[bytes_offset], shader.code_compressed_bytes.ptr(), shader.code_compressed_bytes.size());
776
bytes_offset = aligned_to(bytes_offset + shader.code_compressed_bytes.size(), alignment);
777
bytes_offset += _to_bytes_shader_extra_data(&bytes_ptr[bytes_offset], i);
778
}
779
780
bytes_offset += _to_bytes_footer_extra_data(&bytes_ptr[bytes_offset]);
781
782
ERR_FAIL_COND_V_MSG(bytes_offset != total_size, PackedByteArray(), "Amount of bytes written does not match the amount of bytes reserved for the container.");
783
return bytes;
784
}
785
786
bool RenderingShaderContainer::compress_code(const uint8_t *p_decompressed_bytes, uint32_t p_decompressed_size, uint8_t *p_compressed_bytes, uint32_t *r_compressed_size, uint32_t *r_compressed_flags) const {
787
DEV_ASSERT(p_decompressed_bytes != nullptr);
788
DEV_ASSERT(p_decompressed_size > 0);
789
DEV_ASSERT(p_compressed_bytes != nullptr);
790
DEV_ASSERT(r_compressed_size != nullptr);
791
DEV_ASSERT(r_compressed_flags != nullptr);
792
793
*r_compressed_flags = 0;
794
795
PackedByteArray zstd_bytes;
796
const int64_t zstd_max_bytes = Compression::get_max_compressed_buffer_size(p_decompressed_size, Compression::MODE_ZSTD);
797
zstd_bytes.resize(zstd_max_bytes);
798
799
const int64_t zstd_size = Compression::compress(zstd_bytes.ptrw(), p_decompressed_bytes, p_decompressed_size, Compression::MODE_ZSTD);
800
if (zstd_size > 0 && (uint32_t)(zstd_size) < p_decompressed_size) {
801
// Only choose Zstd if it results in actual compression.
802
memcpy(p_compressed_bytes, zstd_bytes.ptr(), zstd_size);
803
*r_compressed_size = zstd_size;
804
*r_compressed_flags |= COMPRESSION_FLAG_ZSTD;
805
} else {
806
// Just copy the input to the output directly.
807
memcpy(p_compressed_bytes, p_decompressed_bytes, p_decompressed_size);
808
*r_compressed_size = p_decompressed_size;
809
}
810
811
return true;
812
}
813
814
bool RenderingShaderContainer::decompress_code(const uint8_t *p_compressed_bytes, uint32_t p_compressed_size, uint32_t p_compressed_flags, uint8_t *p_decompressed_bytes, uint32_t p_decompressed_size) const {
815
DEV_ASSERT(p_compressed_bytes != nullptr);
816
DEV_ASSERT(p_compressed_size > 0);
817
DEV_ASSERT(p_decompressed_bytes != nullptr);
818
DEV_ASSERT(p_decompressed_size > 0);
819
820
bool uses_zstd = p_compressed_flags & COMPRESSION_FLAG_ZSTD;
821
if (uses_zstd) {
822
if (!Compression::decompress(p_decompressed_bytes, p_decompressed_size, p_compressed_bytes, p_compressed_size, Compression::MODE_ZSTD)) {
823
ERR_FAIL_V_MSG(false, "Malformed zstd input for decompressing shader code.");
824
}
825
} else {
826
memcpy(p_decompressed_bytes, p_compressed_bytes, MIN(p_compressed_size, p_decompressed_size));
827
}
828
829
return true;
830
}
831
832
RenderingShaderContainer::RenderingShaderContainer() {}
833
834
RenderingShaderContainer::~RenderingShaderContainer() {}
835
836