Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/servers/rendering/renderer_rd/shader_rd.cpp
20912 views
1
/**************************************************************************/
2
/* shader_rd.cpp */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#include "shader_rd.h"
32
33
#include "core/io/dir_access.h"
34
#include "core/io/file_access.h"
35
#include "core/object/worker_thread_pool.h"
36
#include "core/version.h"
37
#include "servers/rendering/rendering_device.h"
38
#include "servers/rendering/shader_include_db.h"
39
40
#define ENABLE_SHADER_CACHE 1
41
42
void ShaderRD::_add_stage(const char *p_code, StageType p_stage_type) {
43
Vector<String> lines = String(p_code).split("\n");
44
45
String text;
46
47
int line_count = lines.size();
48
for (int i = 0; i < line_count; i++) {
49
const String &l = lines[i];
50
bool push_chunk = false;
51
52
StageTemplate::Chunk chunk;
53
54
if (l.begins_with("#VERSION_DEFINES")) {
55
chunk.type = StageTemplate::Chunk::TYPE_VERSION_DEFINES;
56
push_chunk = true;
57
} else if (l.begins_with("#GLOBALS")) {
58
switch (p_stage_type) {
59
case STAGE_TYPE_VERTEX:
60
chunk.type = StageTemplate::Chunk::TYPE_VERTEX_GLOBALS;
61
break;
62
case STAGE_TYPE_FRAGMENT:
63
chunk.type = StageTemplate::Chunk::TYPE_FRAGMENT_GLOBALS;
64
break;
65
case STAGE_TYPE_COMPUTE:
66
chunk.type = StageTemplate::Chunk::TYPE_COMPUTE_GLOBALS;
67
break;
68
case STAGE_TYPE_RAYGEN:
69
chunk.type = StageTemplate::Chunk::TYPE_RAYGEN_GLOBALS;
70
break;
71
case STAGE_TYPE_ANY_HIT:
72
chunk.type = StageTemplate::Chunk::TYPE_ANY_HIT_GLOBALS;
73
break;
74
case STAGE_TYPE_CLOSEST_HIT:
75
chunk.type = StageTemplate::Chunk::TYPE_CLOSEST_HIT_GLOBALS;
76
break;
77
case STAGE_TYPE_MISS:
78
chunk.type = StageTemplate::Chunk::TYPE_MISS_GLOBALS;
79
break;
80
case STAGE_TYPE_INTERSECTION:
81
chunk.type = StageTemplate::Chunk::TYPE_INTERSECTION_GLOBALS;
82
break;
83
default: {
84
}
85
}
86
87
push_chunk = true;
88
} else if (l.begins_with("#MATERIAL_UNIFORMS")) {
89
chunk.type = StageTemplate::Chunk::TYPE_MATERIAL_UNIFORMS;
90
push_chunk = true;
91
} else if (l.begins_with("#CODE")) {
92
chunk.type = StageTemplate::Chunk::TYPE_CODE;
93
push_chunk = true;
94
chunk.code = l.replace_first("#CODE", String()).remove_char(':').strip_edges().to_upper();
95
} else if (l.begins_with("#include ")) {
96
String include_file = l.replace("#include ", "").strip_edges();
97
if (include_file[0] == '"') {
98
int end_pos = include_file.find_char('"', 1);
99
if (end_pos >= 0) {
100
include_file = include_file.substr(1, end_pos - 1);
101
102
String include_code = ShaderIncludeDB::get_built_in_include_file(include_file);
103
if (!include_code.is_empty()) {
104
// Add these lines into our parse list so we parse them as well.
105
Vector<String> include_lines = include_code.split("\n");
106
107
for (int j = include_lines.size() - 1; j >= 0; j--) {
108
lines.insert(i + 1, include_lines[j]);
109
}
110
111
line_count = lines.size();
112
} else {
113
// Add it in as is.
114
text += l + "\n";
115
}
116
} else {
117
// Add it in as is.
118
text += l + "\n";
119
}
120
} else {
121
// Add it in as is.
122
text += l + "\n";
123
}
124
} else {
125
text += l + "\n";
126
}
127
128
if (push_chunk) {
129
if (!text.is_empty()) {
130
StageTemplate::Chunk text_chunk;
131
text_chunk.type = StageTemplate::Chunk::TYPE_TEXT;
132
text_chunk.text = text.utf8();
133
stage_templates[p_stage_type].chunks.push_back(text_chunk);
134
text = String();
135
}
136
stage_templates[p_stage_type].chunks.push_back(chunk);
137
}
138
}
139
140
if (!text.is_empty()) {
141
StageTemplate::Chunk text_chunk;
142
text_chunk.type = StageTemplate::Chunk::TYPE_TEXT;
143
text_chunk.text = text.utf8();
144
stage_templates[p_stage_type].chunks.push_back(text_chunk);
145
text = String();
146
}
147
}
148
149
void ShaderRD::setup(const char *p_vertex_code, const char *p_fragment_code, const char *p_compute_code, const char *p_name) {
150
name = p_name;
151
152
if (p_compute_code) {
153
_add_stage(p_compute_code, STAGE_TYPE_COMPUTE);
154
pipeline_type = RD::PIPELINE_TYPE_COMPUTE;
155
} else {
156
pipeline_type = RD::PIPELINE_TYPE_RASTERIZATION;
157
if (p_vertex_code) {
158
_add_stage(p_vertex_code, STAGE_TYPE_VERTEX);
159
}
160
if (p_fragment_code) {
161
_add_stage(p_fragment_code, STAGE_TYPE_FRAGMENT);
162
}
163
}
164
165
StringBuilder tohash;
166
tohash.append("[GodotVersionNumber]");
167
tohash.append(GODOT_VERSION_NUMBER);
168
tohash.append("[GodotVersionHash]");
169
tohash.append(GODOT_VERSION_HASH);
170
tohash.append("[Vertex]");
171
tohash.append(p_vertex_code ? p_vertex_code : "");
172
tohash.append("[Fragment]");
173
tohash.append(p_fragment_code ? p_fragment_code : "");
174
tohash.append("[Compute]");
175
tohash.append(p_compute_code ? p_compute_code : "");
176
tohash.append("[DebugInfo]");
177
tohash.append(Engine::get_singleton()->is_generate_spirv_debug_info_enabled() ? "1" : "0");
178
179
base_sha256 = tohash.as_string().sha256_text();
180
}
181
182
void ShaderRD::setup_raytracing(const char *p_raygen_code, const char *p_any_hit_code, const char *p_closest_hit_code, const char *p_miss_code, const char *p_intersection_code, const char *p_name) {
183
name = p_name;
184
185
pipeline_type = RD::PIPELINE_TYPE_RAYTRACING;
186
if (p_raygen_code) {
187
_add_stage(p_raygen_code, STAGE_TYPE_RAYGEN);
188
}
189
if (p_any_hit_code) {
190
_add_stage(p_any_hit_code, STAGE_TYPE_ANY_HIT);
191
}
192
if (p_closest_hit_code) {
193
_add_stage(p_closest_hit_code, STAGE_TYPE_CLOSEST_HIT);
194
}
195
if (p_miss_code) {
196
_add_stage(p_miss_code, STAGE_TYPE_MISS);
197
}
198
if (p_intersection_code) {
199
_add_stage(p_intersection_code, STAGE_TYPE_INTERSECTION);
200
}
201
202
StringBuilder tohash;
203
tohash.append("[GodotVersionNumber]");
204
tohash.append(GODOT_VERSION_NUMBER);
205
tohash.append("[GodotVersionHash]");
206
tohash.append(GODOT_VERSION_HASH);
207
tohash.append("[Raygen]");
208
tohash.append(p_raygen_code ? p_raygen_code : "");
209
tohash.append("[AnyHit]");
210
tohash.append(p_any_hit_code ? p_any_hit_code : "");
211
tohash.append("[ClosestHit]");
212
tohash.append(p_closest_hit_code ? p_closest_hit_code : "");
213
tohash.append("[Miss]");
214
tohash.append(p_miss_code ? p_miss_code : "");
215
tohash.append("[Intersection]");
216
tohash.append(p_intersection_code ? p_intersection_code : "");
217
tohash.append("[DebugInfo]");
218
tohash.append(Engine::get_singleton()->is_generate_spirv_debug_info_enabled() ? "1" : "0");
219
220
base_sha256 = tohash.as_string().sha256_text();
221
}
222
223
RID ShaderRD::version_create(bool p_embedded) {
224
//initialize() was never called
225
ERR_FAIL_COND_V(group_to_variant_map.is_empty(), RID());
226
227
Version version;
228
version.dirty = true;
229
version.valid = false;
230
version.initialize_needed = true;
231
version.embedded = p_embedded;
232
version.variants.clear();
233
version.variant_data.clear();
234
235
version.mutex = memnew(Mutex);
236
RID rid = version_owner.make_rid(version);
237
{
238
MutexLock lock(versions_mutex);
239
version_mutexes.insert(rid, version.mutex);
240
}
241
242
if (p_embedded) {
243
MutexLock lock(shader_versions_embedded_set_mutex);
244
shader_versions_embedded_set.insert({ this, rid });
245
}
246
247
return rid;
248
}
249
250
void ShaderRD::_initialize_version(Version *p_version) {
251
_clear_version(p_version);
252
253
p_version->valid = false;
254
p_version->dirty = false;
255
256
p_version->variants.resize_initialized(variant_defines.size());
257
p_version->variant_data.resize(variant_defines.size());
258
p_version->group_compilation_tasks.resize_initialized(group_enabled.size());
259
}
260
261
void ShaderRD::_clear_version(Version *p_version) {
262
_compile_ensure_finished(p_version);
263
264
// Clear versions if they exist.
265
if (!p_version->variants.is_empty()) {
266
for (int i = 0; i < variant_defines.size(); i++) {
267
if (p_version->variants[i].is_valid()) {
268
RD::get_singleton()->free_rid(p_version->variants[i]);
269
}
270
}
271
272
p_version->variants.clear();
273
p_version->variant_data.clear();
274
}
275
}
276
277
void ShaderRD::_build_variant_code(StringBuilder &builder, uint32_t p_variant, const Version *p_version, const StageTemplate &p_template) {
278
for (const StageTemplate::Chunk &chunk : p_template.chunks) {
279
switch (chunk.type) {
280
case StageTemplate::Chunk::TYPE_VERSION_DEFINES: {
281
builder.append("\n"); //make sure defines begin at newline
282
builder.append(general_defines.get_data());
283
builder.append(variant_defines[p_variant].text.get_data());
284
for (int j = 0; j < p_version->custom_defines.size(); j++) {
285
builder.append(p_version->custom_defines[j].get_data());
286
}
287
builder.append("\n"); //make sure defines begin at newline
288
if (p_version->uniforms.size()) {
289
builder.append("#define MATERIAL_UNIFORMS_USED\n");
290
}
291
for (const KeyValue<StringName, CharString> &E : p_version->code_sections) {
292
builder.append(String("#define ") + String(E.key) + "_CODE_USED\n");
293
}
294
builder.append(String("#define RENDER_DRIVER_") + OS::get_singleton()->get_current_rendering_driver_name().to_upper() + "\n");
295
builder.append("#define samplerExternalOES sampler2D\n");
296
builder.append("#define textureExternalOES texture2D\n");
297
} break;
298
case StageTemplate::Chunk::TYPE_MATERIAL_UNIFORMS: {
299
builder.append(p_version->uniforms.get_data()); //uniforms (same for vertex and fragment)
300
} break;
301
case StageTemplate::Chunk::TYPE_VERTEX_GLOBALS: {
302
builder.append(p_version->vertex_globals.get_data()); // vertex globals
303
} break;
304
case StageTemplate::Chunk::TYPE_FRAGMENT_GLOBALS: {
305
builder.append(p_version->fragment_globals.get_data()); // fragment globals
306
} break;
307
case StageTemplate::Chunk::TYPE_COMPUTE_GLOBALS: {
308
builder.append(p_version->compute_globals.get_data()); // compute globals
309
} break;
310
case StageTemplate::Chunk::TYPE_RAYGEN_GLOBALS: {
311
builder.append(p_version->raygen_globals.get_data()); // raygen globals
312
} break;
313
case StageTemplate::Chunk::TYPE_ANY_HIT_GLOBALS: {
314
builder.append(p_version->any_hit_globals.get_data()); // any_hit globals
315
} break;
316
case StageTemplate::Chunk::TYPE_CLOSEST_HIT_GLOBALS: {
317
builder.append(p_version->closest_hit_globals.get_data()); // closest_hit globals
318
} break;
319
case StageTemplate::Chunk::TYPE_MISS_GLOBALS: {
320
builder.append(p_version->miss_globals.get_data()); // miss globals
321
} break;
322
case StageTemplate::Chunk::TYPE_INTERSECTION_GLOBALS: {
323
builder.append(p_version->intersection_globals.get_data()); // intersection globals
324
} break;
325
case StageTemplate::Chunk::TYPE_CODE: {
326
if (p_version->code_sections.has(chunk.code)) {
327
builder.append(p_version->code_sections[chunk.code].get_data());
328
}
329
} break;
330
case StageTemplate::Chunk::TYPE_TEXT: {
331
builder.append(chunk.text.get_data());
332
} break;
333
}
334
}
335
}
336
337
Vector<String> ShaderRD::_build_variant_stage_sources(uint32_t p_variant, CompileData p_data) {
338
if (!variants_enabled[p_variant]) {
339
return Vector<String>(); // Variant is disabled, return.
340
}
341
342
Vector<String> stage_sources;
343
stage_sources.resize(RD::SHADER_STAGE_MAX);
344
345
if (pipeline_type == RD::PIPELINE_TYPE_COMPUTE) {
346
// Compute stage.
347
StringBuilder builder;
348
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_COMPUTE]);
349
stage_sources.write[RD::SHADER_STAGE_COMPUTE] = builder.as_string();
350
} else if (pipeline_type == RD::PIPELINE_TYPE_RAYTRACING) {
351
{
352
// Raygen stage.
353
StringBuilder builder;
354
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_RAYGEN]);
355
stage_sources.write[RD::SHADER_STAGE_RAYGEN] = builder.as_string();
356
}
357
358
{
359
// Any hit stage.
360
StringBuilder builder;
361
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_ANY_HIT]);
362
stage_sources.write[RD::SHADER_STAGE_ANY_HIT] = builder.as_string();
363
}
364
365
{
366
// Closest hit stage.
367
StringBuilder builder;
368
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_CLOSEST_HIT]);
369
stage_sources.write[RD::SHADER_STAGE_CLOSEST_HIT] = builder.as_string();
370
}
371
372
{
373
// Miss stage.
374
StringBuilder builder;
375
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_MISS]);
376
stage_sources.write[RD::SHADER_STAGE_MISS] = builder.as_string();
377
}
378
379
{
380
// Intersection stage.
381
StringBuilder builder;
382
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_INTERSECTION]);
383
stage_sources.write[RD::SHADER_STAGE_INTERSECTION] = builder.as_string();
384
}
385
} else {
386
{
387
// Vertex stage.
388
StringBuilder builder;
389
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_VERTEX]);
390
stage_sources.write[RD::SHADER_STAGE_VERTEX] = builder.as_string();
391
}
392
393
{
394
// Fragment stage.
395
StringBuilder builder;
396
_build_variant_code(builder, p_variant, p_data.version, stage_templates[STAGE_TYPE_FRAGMENT]);
397
stage_sources.write[RD::SHADER_STAGE_FRAGMENT] = builder.as_string();
398
}
399
}
400
401
return stage_sources;
402
}
403
404
void ShaderRD::_compile_variant(uint32_t p_variant, CompileData p_data) {
405
uint32_t variant = group_to_variant_map[p_data.group][p_variant];
406
if (!variants_enabled[variant]) {
407
return; // Variant is disabled, return.
408
}
409
410
Vector<String> variant_stage_sources = _build_variant_stage_sources(variant, p_data);
411
Vector<RD::ShaderStageSPIRVData> variant_stages = compile_stages(variant_stage_sources, dynamic_buffers);
412
ERR_FAIL_COND(variant_stages.is_empty());
413
414
Vector<uint8_t> shader_data = RD::get_singleton()->shader_compile_binary_from_spirv(variant_stages, name + ":" + itos(variant));
415
ERR_FAIL_COND(shader_data.is_empty());
416
417
{
418
p_data.version->variants.write[variant] = RD::get_singleton()->shader_create_from_bytecode_with_samplers(shader_data, p_data.version->variants[variant], immutable_samplers);
419
p_data.version->variant_data.write[variant] = shader_data;
420
}
421
}
422
423
Vector<String> ShaderRD::version_build_variant_stage_sources(RID p_version, int p_variant) {
424
Version *version = version_owner.get_or_null(p_version);
425
ERR_FAIL_NULL_V(version, Vector<String>());
426
427
if (version->dirty) {
428
_initialize_version(version);
429
}
430
431
CompileData compile_data;
432
compile_data.version = version;
433
compile_data.group = variant_to_group[p_variant];
434
return _build_variant_stage_sources(p_variant, compile_data);
435
}
436
437
RS::ShaderNativeSourceCode ShaderRD::version_get_native_source_code(RID p_version) {
438
Version *version = version_owner.get_or_null(p_version);
439
RS::ShaderNativeSourceCode source_code;
440
ERR_FAIL_NULL_V(version, source_code);
441
442
MutexLock lock(*version->mutex);
443
444
source_code.versions.resize(variant_defines.size());
445
446
for (int i = 0; i < source_code.versions.size(); i++) {
447
if (pipeline_type == RD::PIPELINE_TYPE_RASTERIZATION) {
448
// Vertex stage.
449
450
StringBuilder builder;
451
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_VERTEX]);
452
453
RS::ShaderNativeSourceCode::Version::Stage stage;
454
stage.name = "vertex";
455
stage.code = builder.as_string();
456
457
source_code.versions.write[i].stages.push_back(stage);
458
}
459
460
if (pipeline_type == RD::PIPELINE_TYPE_RASTERIZATION) {
461
// Fragment stage.
462
463
StringBuilder builder;
464
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_FRAGMENT]);
465
466
RS::ShaderNativeSourceCode::Version::Stage stage;
467
stage.name = "fragment";
468
stage.code = builder.as_string();
469
470
source_code.versions.write[i].stages.push_back(stage);
471
}
472
473
if (pipeline_type == RD::PIPELINE_TYPE_COMPUTE) {
474
// Compute stage.
475
476
StringBuilder builder;
477
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_COMPUTE]);
478
479
RS::ShaderNativeSourceCode::Version::Stage stage;
480
stage.name = "compute";
481
stage.code = builder.as_string();
482
483
source_code.versions.write[i].stages.push_back(stage);
484
}
485
486
if (pipeline_type == RD::PIPELINE_TYPE_RAYTRACING) {
487
// Raygen stage.
488
489
StringBuilder builder;
490
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_RAYGEN]);
491
492
RS::ShaderNativeSourceCode::Version::Stage stage;
493
stage.name = "raygen";
494
stage.code = builder.as_string();
495
496
source_code.versions.write[i].stages.push_back(stage);
497
}
498
if (pipeline_type == RD::PIPELINE_TYPE_RAYTRACING) {
499
// Any hit stage.
500
501
StringBuilder builder;
502
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_ANY_HIT]);
503
504
RS::ShaderNativeSourceCode::Version::Stage stage;
505
stage.name = "any_hit";
506
stage.code = builder.as_string();
507
508
source_code.versions.write[i].stages.push_back(stage);
509
}
510
if (pipeline_type == RD::PIPELINE_TYPE_RAYTRACING) {
511
// Closest hit stage.
512
513
StringBuilder builder;
514
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_CLOSEST_HIT]);
515
516
RS::ShaderNativeSourceCode::Version::Stage stage;
517
stage.name = "closest_hit";
518
stage.code = builder.as_string();
519
520
source_code.versions.write[i].stages.push_back(stage);
521
}
522
if (pipeline_type == RD::PIPELINE_TYPE_RAYTRACING) {
523
// Miss stage.
524
525
StringBuilder builder;
526
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_MISS]);
527
528
RS::ShaderNativeSourceCode::Version::Stage stage;
529
stage.name = "miss";
530
stage.code = builder.as_string();
531
532
source_code.versions.write[i].stages.push_back(stage);
533
}
534
if (pipeline_type == RD::PIPELINE_TYPE_RAYTRACING) {
535
// Intersection stage.
536
537
StringBuilder builder;
538
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_INTERSECTION]);
539
540
RS::ShaderNativeSourceCode::Version::Stage stage;
541
stage.name = "intersection";
542
stage.code = builder.as_string();
543
544
source_code.versions.write[i].stages.push_back(stage);
545
}
546
}
547
548
return source_code;
549
}
550
551
String ShaderRD::version_get_cache_file_relative_path(RID p_version, int p_group, const String &p_api_name) {
552
Version *version = version_owner.get_or_null(p_version);
553
ERR_FAIL_NULL_V(version, String());
554
555
return _get_cache_file_relative_path(version, p_group, p_api_name);
556
}
557
558
String ShaderRD::_version_get_sha1(Version *p_version) const {
559
StringBuilder hash_build;
560
561
hash_build.append("[uniforms]");
562
hash_build.append(p_version->uniforms.get_data());
563
hash_build.append("[vertex_globals]");
564
hash_build.append(p_version->vertex_globals.get_data());
565
hash_build.append("[fragment_globals]");
566
hash_build.append(p_version->fragment_globals.get_data());
567
hash_build.append("[compute_globals]");
568
hash_build.append(p_version->compute_globals.get_data());
569
hash_build.append("[raygen_globals]");
570
hash_build.append(p_version->raygen_globals.get_data());
571
hash_build.append("[any_hit_globals]");
572
hash_build.append(p_version->any_hit_globals.get_data());
573
hash_build.append("[closest_hit_globals]");
574
hash_build.append(p_version->closest_hit_globals.get_data());
575
hash_build.append("[miss_globals]");
576
hash_build.append(p_version->miss_globals.get_data());
577
hash_build.append("[intersection_globals]");
578
hash_build.append(p_version->intersection_globals.get_data());
579
580
Vector<StringName> code_sections;
581
for (const KeyValue<StringName, CharString> &E : p_version->code_sections) {
582
code_sections.push_back(E.key);
583
}
584
code_sections.sort_custom<StringName::AlphCompare>();
585
586
for (int i = 0; i < code_sections.size(); i++) {
587
hash_build.append(String("[code:") + String(code_sections[i]) + "]");
588
hash_build.append(p_version->code_sections[code_sections[i]].get_data());
589
}
590
for (int i = 0; i < p_version->custom_defines.size(); i++) {
591
hash_build.append("[custom_defines:" + itos(i) + "]");
592
hash_build.append(p_version->custom_defines[i].get_data());
593
}
594
595
return hash_build.as_string().sha1_text();
596
}
597
598
static const char *shader_file_header = "GDSC";
599
static const uint32_t cache_file_version = 4;
600
601
String ShaderRD::_get_cache_file_relative_path(Version *p_version, int p_group, const String &p_api_name) {
602
String sha1 = _version_get_sha1(p_version);
603
return name.path_join(group_sha256[p_group]).path_join(sha1) + "." + p_api_name + ".cache";
604
}
605
606
String ShaderRD::_get_cache_file_path(Version *p_version, int p_group, const String &p_api_name, bool p_user_dir) {
607
const String &shader_cache_dir = p_user_dir ? shader_cache_user_dir : shader_cache_res_dir;
608
String relative_path = _get_cache_file_relative_path(p_version, p_group, p_api_name);
609
return shader_cache_dir.path_join(relative_path);
610
}
611
612
bool ShaderRD::_load_from_cache(Version *p_version, int p_group) {
613
String api_safe_name = String(RD::get_singleton()->get_device_api_name()).validate_filename().to_lower();
614
Ref<FileAccess> f;
615
if (shader_cache_user_dir_valid) {
616
f = FileAccess::open(_get_cache_file_path(p_version, p_group, api_safe_name, true), FileAccess::READ);
617
}
618
619
if (f.is_null() && shader_cache_res_dir_valid) {
620
f = FileAccess::open(_get_cache_file_path(p_version, p_group, api_safe_name, false), FileAccess::READ);
621
}
622
623
if (f.is_null()) {
624
const String &sha1 = _version_get_sha1(p_version);
625
print_verbose(vformat("Shader cache miss for %s", name.path_join(group_sha256[p_group]).path_join(sha1)));
626
return false;
627
}
628
629
char header[5] = { 0, 0, 0, 0, 0 };
630
f->get_buffer((uint8_t *)header, 4);
631
ERR_FAIL_COND_V(header != String(shader_file_header), false);
632
633
uint32_t file_version = f->get_32();
634
if (file_version != cache_file_version) {
635
return false; // wrong version
636
}
637
638
uint32_t variant_count = f->get_32();
639
640
ERR_FAIL_COND_V(variant_count != (uint32_t)group_to_variant_map[p_group].size(), false); //should not happen but check
641
642
for (uint32_t i = 0; i < variant_count; i++) {
643
int variant_id = group_to_variant_map[p_group][i];
644
uint32_t variant_size = f->get_32();
645
if (!variants_enabled[variant_id]) {
646
continue;
647
}
648
if (variant_size == 0) {
649
// A new variant has been requested, failing the entire load will generate it
650
print_verbose(vformat("Shader cache miss for %s due to missing variant %d", name.path_join(group_sha256[p_group]).path_join(_version_get_sha1(p_version)), variant_id));
651
return false;
652
}
653
Vector<uint8_t> variant_bytes;
654
variant_bytes.resize(variant_size);
655
656
uint32_t br = f->get_buffer(variant_bytes.ptrw(), variant_size);
657
658
ERR_FAIL_COND_V(br != variant_size, false);
659
660
p_version->variant_data.write[variant_id] = variant_bytes;
661
}
662
663
for (uint32_t i = 0; i < variant_count; i++) {
664
int variant_id = group_to_variant_map[p_group][i];
665
if (!variants_enabled[variant_id]) {
666
p_version->variants.write[variant_id] = RID();
667
continue;
668
}
669
print_verbose(vformat("Loading cache for shader %s, variant %d", name, i));
670
{
671
RID shader = RD::get_singleton()->shader_create_from_bytecode_with_samplers(p_version->variant_data[variant_id], p_version->variants[variant_id], immutable_samplers);
672
if (shader.is_null()) {
673
for (uint32_t j = 0; j < i; j++) {
674
int variant_free_id = group_to_variant_map[p_group][j];
675
RD::get_singleton()->free_rid(p_version->variants[variant_free_id]);
676
}
677
ERR_FAIL_COND_V(shader.is_null(), false);
678
}
679
680
p_version->variants.write[variant_id] = shader;
681
}
682
}
683
684
p_version->valid = true;
685
return true;
686
}
687
688
void ShaderRD::_save_to_cache(Version *p_version, int p_group) {
689
ERR_FAIL_COND(!shader_cache_user_dir_valid);
690
String api_safe_name = String(RD::get_singleton()->get_device_api_name()).validate_filename().to_lower();
691
const String &path = _get_cache_file_path(p_version, p_group, api_safe_name, true);
692
Ref<FileAccess> f = FileAccess::open(path, FileAccess::WRITE);
693
ERR_FAIL_COND(f.is_null());
694
695
PackedByteArray shader_cache_bytes = ShaderRD::save_shader_cache_bytes(group_to_variant_map[p_group], p_version->variant_data);
696
f->store_buffer(shader_cache_bytes);
697
}
698
699
void ShaderRD::_allocate_placeholders(Version *p_version, int p_group) {
700
ERR_FAIL_COND(p_version->variants.is_empty());
701
702
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
703
int variant_id = group_to_variant_map[p_group][i];
704
RID shader = RD::get_singleton()->shader_create_placeholder();
705
{
706
p_version->variants.write[variant_id] = shader;
707
}
708
}
709
}
710
711
// Try to compile all variants for a given group.
712
// Will skip variants that are disabled.
713
void ShaderRD::_compile_version_start(Version *p_version, int p_group) {
714
if (!group_enabled[p_group]) {
715
return;
716
}
717
718
p_version->dirty = false;
719
720
#if ENABLE_SHADER_CACHE
721
if (shader_cache_user_dir_valid || shader_cache_res_dir_valid) {
722
if (_load_from_cache(p_version, p_group)) {
723
return;
724
}
725
}
726
#endif
727
728
CompileData compile_data;
729
compile_data.version = p_version;
730
compile_data.group = p_group;
731
732
WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_singleton()->add_template_group_task(this, &ShaderRD::_compile_variant, compile_data, group_to_variant_map[p_group].size(), -1, true, SNAME("ShaderCompilation"));
733
p_version->group_compilation_tasks.write[p_group] = group_task;
734
}
735
736
void ShaderRD::_compile_version_end(Version *p_version, int p_group) {
737
if (p_version->group_compilation_tasks.size() <= p_group || p_version->group_compilation_tasks[p_group] == 0) {
738
return;
739
}
740
WorkerThreadPool::GroupID group_task = p_version->group_compilation_tasks[p_group];
741
WorkerThreadPool::get_singleton()->wait_for_group_task_completion(group_task);
742
p_version->group_compilation_tasks.write[p_group] = 0;
743
744
bool all_valid = true;
745
746
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
747
int variant_id = group_to_variant_map[p_group][i];
748
if (!variants_enabled[variant_id]) {
749
continue; // Disabled.
750
}
751
if (p_version->variants[variant_id].is_null()) {
752
all_valid = false;
753
break;
754
}
755
}
756
757
if (!all_valid) {
758
// Clear versions if they exist.
759
for (int i = 0; i < variant_defines.size(); i++) {
760
if (!variants_enabled[i] || !group_enabled[variant_defines[i].group]) {
761
continue; // Disabled.
762
}
763
if (!p_version->variants[i].is_null()) {
764
RD::get_singleton()->free_rid(p_version->variants[i]);
765
}
766
}
767
768
p_version->variants.clear();
769
p_version->variant_data.clear();
770
return;
771
}
772
#if ENABLE_SHADER_CACHE
773
else if (shader_cache_user_dir_valid) {
774
_save_to_cache(p_version, p_group);
775
}
776
#endif
777
778
p_version->valid = true;
779
}
780
781
void ShaderRD::_compile_ensure_finished(Version *p_version) {
782
// Wait for compilation of existing groups if necessary.
783
for (int i = 0; i < group_enabled.size(); i++) {
784
_compile_version_end(p_version, i);
785
}
786
}
787
788
void ShaderRD::_version_set(Version *p_version, const HashMap<String, String> &p_code, const Vector<String> &p_custom_defines) {
789
p_version->code_sections.clear();
790
for (const KeyValue<String, String> &E : p_code) {
791
p_version->code_sections[StringName(E.key.to_upper())] = E.value.utf8();
792
}
793
794
p_version->custom_defines.clear();
795
for (const String &custom_define : p_custom_defines) {
796
p_version->custom_defines.push_back(custom_define.utf8());
797
}
798
799
p_version->dirty = true;
800
if (p_version->initialize_needed) {
801
_initialize_version(p_version);
802
for (int i = 0; i < group_enabled.size(); i++) {
803
if (!group_enabled[i]) {
804
_allocate_placeholders(p_version, i);
805
continue;
806
}
807
_compile_version_start(p_version, i);
808
}
809
p_version->initialize_needed = false;
810
}
811
}
812
813
void ShaderRD::version_set_code(RID p_version, const HashMap<String, String> &p_code, const String &p_uniforms, const String &p_vertex_globals, const String &p_fragment_globals, const Vector<String> &p_custom_defines) {
814
ERR_FAIL_COND(pipeline_type != RD::PIPELINE_TYPE_RASTERIZATION);
815
816
Version *version = version_owner.get_or_null(p_version);
817
ERR_FAIL_NULL(version);
818
819
MutexLock lock(*version->mutex);
820
821
_compile_ensure_finished(version);
822
823
version->vertex_globals = p_vertex_globals.utf8();
824
version->fragment_globals = p_fragment_globals.utf8();
825
version->uniforms = p_uniforms.utf8();
826
827
_version_set(version, p_code, p_custom_defines);
828
}
829
830
void ShaderRD::version_set_compute_code(RID p_version, const HashMap<String, String> &p_code, const String &p_uniforms, const String &p_compute_globals, const Vector<String> &p_custom_defines) {
831
ERR_FAIL_COND(pipeline_type != RD::PIPELINE_TYPE_COMPUTE);
832
833
Version *version = version_owner.get_or_null(p_version);
834
ERR_FAIL_NULL(version);
835
836
MutexLock lock(*version->mutex);
837
838
_compile_ensure_finished(version);
839
840
version->compute_globals = p_compute_globals.utf8();
841
version->uniforms = p_uniforms.utf8();
842
843
_version_set(version, p_code, p_custom_defines);
844
}
845
846
void ShaderRD::version_set_raytracing_code(RID p_version, const HashMap<String, String> &p_code, const String &p_uniforms, const String &p_raygen_globals, const String &p_any_hit_globals, const String &p_closest_hit_globals, const String &p_miss_globals, const String &p_intersection_globals, const Vector<String> &p_custom_defines) {
847
ERR_FAIL_COND(pipeline_type != RD::PIPELINE_TYPE_RAYTRACING);
848
849
Version *version = version_owner.get_or_null(p_version);
850
ERR_FAIL_NULL(version);
851
852
version->raygen_globals = p_raygen_globals.utf8();
853
version->any_hit_globals = p_any_hit_globals.utf8();
854
version->closest_hit_globals = p_closest_hit_globals.utf8();
855
version->miss_globals = p_miss_globals.utf8();
856
version->intersection_globals = p_intersection_globals.utf8();
857
version->uniforms = p_uniforms.utf8();
858
859
_version_set(version, p_code, p_custom_defines);
860
}
861
862
bool ShaderRD::version_is_valid(RID p_version) {
863
Version *version = version_owner.get_or_null(p_version);
864
ERR_FAIL_NULL_V(version, false);
865
866
MutexLock lock(*version->mutex);
867
868
if (version->dirty) {
869
_initialize_version(version);
870
for (int i = 0; i < group_enabled.size(); i++) {
871
if (!group_enabled[i]) {
872
_allocate_placeholders(version, i);
873
continue;
874
}
875
_compile_version_start(version, i);
876
}
877
}
878
879
_compile_ensure_finished(version);
880
881
return version->valid;
882
}
883
884
bool ShaderRD::version_free(RID p_version) {
885
if (version_owner.owns(p_version)) {
886
{
887
MutexLock lock(versions_mutex);
888
version_mutexes.erase(p_version);
889
}
890
891
Version *version = version_owner.get_or_null(p_version);
892
if (version->embedded) {
893
MutexLock lock(shader_versions_embedded_set_mutex);
894
shader_versions_embedded_set.erase({ this, p_version });
895
}
896
897
version->mutex->lock();
898
_clear_version(version);
899
version_owner.free(p_version);
900
version->mutex->unlock();
901
memdelete(version->mutex);
902
} else {
903
return false;
904
}
905
906
return true;
907
}
908
909
void ShaderRD::set_variant_enabled(int p_variant, bool p_enabled) {
910
ERR_FAIL_COND(version_owner.get_rid_count() > 0); //versions exist
911
ERR_FAIL_INDEX(p_variant, variants_enabled.size());
912
variants_enabled.write[p_variant] = p_enabled;
913
}
914
915
bool ShaderRD::is_variant_enabled(int p_variant) const {
916
ERR_FAIL_INDEX_V(p_variant, variants_enabled.size(), false);
917
return variants_enabled[p_variant];
918
}
919
920
int64_t ShaderRD::get_variant_count() const {
921
return variants_enabled.size();
922
}
923
924
int ShaderRD::get_variant_to_group(int p_variant) const {
925
return variant_to_group[p_variant];
926
}
927
928
void ShaderRD::enable_group(int p_group) {
929
ERR_FAIL_INDEX(p_group, group_enabled.size());
930
931
if (group_enabled[p_group]) {
932
// Group already enabled, do nothing.
933
return;
934
}
935
936
group_enabled.write[p_group] = true;
937
938
// Compile all versions again to include the new group.
939
for (const RID &version_rid : version_owner.get_owned_list()) {
940
Version *version = version_owner.get_or_null(version_rid);
941
version->mutex->lock();
942
_compile_version_start(version, p_group);
943
version->mutex->unlock();
944
}
945
}
946
947
bool ShaderRD::is_group_enabled(int p_group) const {
948
return group_enabled[p_group];
949
}
950
951
int64_t ShaderRD::get_group_count() const {
952
return group_enabled.size();
953
}
954
955
const LocalVector<int> &ShaderRD::get_group_to_variants(int p_group) const {
956
return group_to_variant_map[p_group];
957
}
958
959
const String &ShaderRD::get_name() const {
960
return name;
961
}
962
963
const Vector<uint64_t> &ShaderRD::get_dynamic_buffers() const {
964
return dynamic_buffers;
965
}
966
967
bool ShaderRD::shader_cache_cleanup_on_start = false;
968
969
ShaderRD::ShaderRD() {
970
// Do not feel forced to use this, in most cases it makes little to no difference.
971
bool use_32_threads = false;
972
if (RD::get_singleton()->get_device_vendor_name() == "NVIDIA") {
973
use_32_threads = true;
974
}
975
String base_compute_define_text;
976
if (use_32_threads) {
977
base_compute_define_text = "\n#define NATIVE_LOCAL_GROUP_SIZE 32\n#define NATIVE_LOCAL_SIZE_2D_X 8\n#define NATIVE_LOCAL_SIZE_2D_Y 4\n";
978
} else {
979
base_compute_define_text = "\n#define NATIVE_LOCAL_GROUP_SIZE 64\n#define NATIVE_LOCAL_SIZE_2D_X 8\n#define NATIVE_LOCAL_SIZE_2D_Y 8\n";
980
}
981
982
base_compute_defines = base_compute_define_text.ascii();
983
}
984
985
void ShaderRD::initialize(const Vector<String> &p_variant_defines, const String &p_general_defines, const Vector<RD::PipelineImmutableSampler> &p_immutable_samplers, const Vector<uint64_t> &p_dynamic_buffers) {
986
ERR_FAIL_COND(variant_defines.size());
987
ERR_FAIL_COND(p_variant_defines.is_empty());
988
989
general_defines = p_general_defines.utf8();
990
immutable_samplers = p_immutable_samplers;
991
dynamic_buffers = p_dynamic_buffers;
992
993
// When initialized this way, there is just one group and its always enabled.
994
group_to_variant_map.insert(0, LocalVector<int>{});
995
group_enabled.push_back(true);
996
997
for (int i = 0; i < p_variant_defines.size(); i++) {
998
variant_defines.push_back(VariantDefine(0, p_variant_defines[i], true));
999
variants_enabled.push_back(true);
1000
variant_to_group.push_back(0);
1001
group_to_variant_map[0].push_back(i);
1002
}
1003
1004
if (!shader_cache_user_dir.is_empty() || !shader_cache_res_dir.is_empty()) {
1005
group_sha256.resize(1);
1006
_initialize_cache();
1007
}
1008
}
1009
1010
void ShaderRD::_initialize_cache() {
1011
shader_cache_user_dir_valid = !shader_cache_user_dir.is_empty();
1012
shader_cache_res_dir_valid = !shader_cache_res_dir.is_empty();
1013
if (!shader_cache_user_dir_valid) {
1014
return;
1015
}
1016
1017
for (const KeyValue<int, LocalVector<int>> &E : group_to_variant_map) {
1018
StringBuilder hash_build;
1019
1020
hash_build.append("[base_hash]");
1021
hash_build.append(base_sha256);
1022
hash_build.append("[general_defines]");
1023
hash_build.append(general_defines.get_data());
1024
hash_build.append("[group_id]");
1025
hash_build.append(itos(E.key));
1026
for (uint32_t i = 0; i < E.value.size(); i++) {
1027
hash_build.append("[variant_defines:" + itos(E.value[i]) + "]");
1028
hash_build.append(variant_defines[E.value[i]].text.get_data());
1029
}
1030
1031
for (const uint64_t dyn_buffer : dynamic_buffers) {
1032
hash_build.append("[dynamic_buffer]");
1033
hash_build.append(uitos(dyn_buffer));
1034
}
1035
1036
group_sha256[E.key] = hash_build.as_string().sha256_text();
1037
1038
if (!shader_cache_user_dir.is_empty()) {
1039
// Validate if it's possible to write to all the directories required by in the user directory.
1040
Ref<DirAccess> d = DirAccess::open(shader_cache_user_dir);
1041
if (d.is_null()) {
1042
shader_cache_user_dir_valid = false;
1043
ERR_FAIL_MSG(vformat("Unable to open shader cache directory at %s.", shader_cache_user_dir));
1044
}
1045
1046
if (d->change_dir(name) != OK) {
1047
Error err = d->make_dir(name);
1048
if (err != OK) {
1049
shader_cache_user_dir_valid = false;
1050
ERR_FAIL_MSG(vformat("Unable to create shader cache directory %s at %s.", name, shader_cache_user_dir));
1051
}
1052
1053
d->change_dir(name);
1054
}
1055
1056
if (d->change_dir(group_sha256[E.key]) != OK) {
1057
Error err = d->make_dir(group_sha256[E.key]);
1058
if (err != OK) {
1059
shader_cache_user_dir_valid = false;
1060
ERR_FAIL_MSG(vformat("Unable to create shader cache directory %s/%s at %s.", name, group_sha256[E.key], shader_cache_user_dir));
1061
}
1062
}
1063
}
1064
1065
print_verbose("Shader '" + name + "' (group " + itos(E.key) + ") SHA256: " + group_sha256[E.key]);
1066
}
1067
}
1068
1069
// Same as above, but allows specifying shader compilation groups.
1070
void ShaderRD::initialize(const Vector<VariantDefine> &p_variant_defines, const String &p_general_defines, const Vector<RD::PipelineImmutableSampler> &p_immutable_samplers, const Vector<uint64_t> &p_dynamic_buffers) {
1071
ERR_FAIL_COND(variant_defines.size());
1072
ERR_FAIL_COND(p_variant_defines.is_empty());
1073
1074
general_defines = p_general_defines.utf8();
1075
immutable_samplers = p_immutable_samplers;
1076
dynamic_buffers = p_dynamic_buffers;
1077
1078
int max_group_id = 0;
1079
1080
for (int i = 0; i < p_variant_defines.size(); i++) {
1081
// Fill variant array.
1082
variant_defines.push_back(p_variant_defines[i]);
1083
variants_enabled.push_back(true);
1084
variant_to_group.push_back(p_variant_defines[i].group);
1085
1086
// Map variant array index to group id, so we can iterate over groups later.
1087
if (!group_to_variant_map.has(p_variant_defines[i].group)) {
1088
group_to_variant_map.insert(p_variant_defines[i].group, LocalVector<int>{});
1089
}
1090
group_to_variant_map[p_variant_defines[i].group].push_back(i);
1091
1092
// Track max size.
1093
if (p_variant_defines[i].group > max_group_id) {
1094
max_group_id = p_variant_defines[i].group;
1095
}
1096
}
1097
1098
// Set all to groups to false, then enable those that should be default.
1099
group_enabled.resize_initialized(max_group_id + 1);
1100
bool *enabled_ptr = group_enabled.ptrw();
1101
for (int i = 0; i < p_variant_defines.size(); i++) {
1102
if (p_variant_defines[i].default_enabled) {
1103
enabled_ptr[p_variant_defines[i].group] = true;
1104
}
1105
}
1106
1107
if (!shader_cache_user_dir.is_empty()) {
1108
group_sha256.resize(max_group_id + 1);
1109
_initialize_cache();
1110
}
1111
}
1112
1113
void ShaderRD::shaders_embedded_set_lock() {
1114
shader_versions_embedded_set_mutex.lock();
1115
}
1116
1117
const ShaderRD::ShaderVersionPairSet &ShaderRD::shaders_embedded_set_get() {
1118
return shader_versions_embedded_set;
1119
}
1120
1121
void ShaderRD::shaders_embedded_set_unlock() {
1122
shader_versions_embedded_set_mutex.unlock();
1123
}
1124
1125
void ShaderRD::set_shader_cache_user_dir(const String &p_dir) {
1126
shader_cache_user_dir = p_dir;
1127
}
1128
1129
const String &ShaderRD::get_shader_cache_user_dir() {
1130
return shader_cache_user_dir;
1131
}
1132
1133
void ShaderRD::set_shader_cache_res_dir(const String &p_dir) {
1134
shader_cache_res_dir = p_dir;
1135
}
1136
1137
const String &ShaderRD::get_shader_cache_res_dir() {
1138
return shader_cache_res_dir;
1139
}
1140
1141
void ShaderRD::set_shader_cache_save_compressed(bool p_enable) {
1142
shader_cache_save_compressed = p_enable;
1143
}
1144
1145
void ShaderRD::set_shader_cache_save_compressed_zstd(bool p_enable) {
1146
shader_cache_save_compressed_zstd = p_enable;
1147
}
1148
1149
void ShaderRD::set_shader_cache_save_debug(bool p_enable) {
1150
shader_cache_save_debug = p_enable;
1151
}
1152
1153
Vector<RD::ShaderStageSPIRVData> ShaderRD::compile_stages(const Vector<String> &p_stage_sources, const Vector<uint64_t> &p_dynamic_buffers) {
1154
RD::ShaderStageSPIRVData stage;
1155
Vector<RD::ShaderStageSPIRVData> stages;
1156
String error;
1157
RD::ShaderStage compilation_failed_stage = RD::SHADER_STAGE_MAX;
1158
bool compilation_failed = false;
1159
for (int64_t i = 0; i < p_stage_sources.size() && !compilation_failed; i++) {
1160
if (p_stage_sources[i].is_empty()) {
1161
continue;
1162
}
1163
1164
stage.spirv = RD::get_singleton()->shader_compile_spirv_from_source(RD::ShaderStage(i), p_stage_sources[i], RD::SHADER_LANGUAGE_GLSL, &error);
1165
stage.dynamic_buffers = p_dynamic_buffers;
1166
stage.shader_stage = RD::ShaderStage(i);
1167
if (!stage.spirv.is_empty()) {
1168
stages.push_back(stage);
1169
1170
} else {
1171
compilation_failed_stage = RD::ShaderStage(i);
1172
compilation_failed = true;
1173
}
1174
}
1175
1176
if (compilation_failed) {
1177
ERR_PRINT("Error compiling " + String(compilation_failed_stage == RD::SHADER_STAGE_COMPUTE ? "Compute " : (compilation_failed_stage == RD::SHADER_STAGE_VERTEX ? "Vertex" : "Fragment")) + " shader.");
1178
ERR_PRINT(error);
1179
1180
#ifdef DEBUG_ENABLED
1181
ERR_PRINT("code:\n" + p_stage_sources[compilation_failed_stage].get_with_code_lines());
1182
#endif
1183
1184
return Vector<RD::ShaderStageSPIRVData>();
1185
} else {
1186
return stages;
1187
}
1188
}
1189
1190
PackedByteArray ShaderRD::save_shader_cache_bytes(const LocalVector<int> &p_variants, const Vector<Vector<uint8_t>> &p_variant_data) {
1191
uint32_t variant_count = p_variants.size();
1192
PackedByteArray bytes;
1193
int64_t total_size = 0;
1194
total_size += 4 + sizeof(uint32_t) * 2;
1195
for (uint32_t i = 0; i < variant_count; i++) {
1196
total_size += sizeof(uint32_t) + p_variant_data[p_variants[i]].size();
1197
}
1198
1199
bytes.resize(total_size);
1200
1201
uint8_t *bytes_ptr = bytes.ptrw();
1202
memcpy(bytes_ptr, shader_file_header, 4);
1203
bytes_ptr += 4;
1204
1205
*(uint32_t *)(bytes_ptr) = cache_file_version;
1206
bytes_ptr += sizeof(uint32_t);
1207
1208
*(uint32_t *)(bytes_ptr) = variant_count;
1209
bytes_ptr += sizeof(uint32_t);
1210
1211
for (uint32_t i = 0; i < variant_count; i++) {
1212
int variant_id = p_variants[i];
1213
*(uint32_t *)(bytes_ptr) = uint32_t(p_variant_data[variant_id].size());
1214
bytes_ptr += sizeof(uint32_t);
1215
1216
memcpy(bytes_ptr, p_variant_data[variant_id].ptr(), p_variant_data[variant_id].size());
1217
bytes_ptr += p_variant_data[variant_id].size();
1218
}
1219
1220
DEV_ASSERT((bytes.ptrw() + bytes.size()) == bytes_ptr);
1221
return bytes;
1222
}
1223
1224
String ShaderRD::shader_cache_user_dir;
1225
String ShaderRD::shader_cache_res_dir;
1226
bool ShaderRD::shader_cache_save_compressed = true;
1227
bool ShaderRD::shader_cache_save_compressed_zstd = true;
1228
bool ShaderRD::shader_cache_save_debug = true;
1229
1230
ShaderRD::~ShaderRD() {
1231
LocalVector<RID> remaining = version_owner.get_owned_list();
1232
if (remaining.size()) {
1233
ERR_PRINT(itos(remaining.size()) + " shaders of type " + name + " were never freed");
1234
for (const RID &version_rid : remaining) {
1235
version_free(version_rid);
1236
}
1237
}
1238
}
1239
1240