CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/GPU/Vulkan/ShaderManagerVulkan.cpp
Views: 1401
1
// Copyright (c) 2015- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#ifdef _WIN32
19
//#define SHADERLOG
20
#endif
21
22
#include "Common/LogReporting.h"
23
#include "Common/Math/lin/matrix4x4.h"
24
#include "Common/Math/math_util.h"
25
#include "Common/Data/Convert/SmallDataConvert.h"
26
#include "Common/Profiler/Profiler.h"
27
#include "Common/GPU/thin3d.h"
28
#include "Common/Data/Encoding/Utf8.h"
29
#include "Common/TimeUtil.h"
30
#include "Common/MemoryUtil.h"
31
32
#include "Common/StringUtils.h"
33
#include "Common/GPU/Vulkan/VulkanContext.h"
34
#include "Common/GPU/Vulkan/VulkanMemory.h"
35
#include "Common/Log.h"
36
#include "Common/CommonTypes.h"
37
#include "Core/Config.h"
38
#include "GPU/Math3D.h"
39
#include "GPU/GPUState.h"
40
#include "GPU/ge_constants.h"
41
#include "GPU/Common/FragmentShaderGenerator.h"
42
#include "GPU/Common/VertexShaderGenerator.h"
43
#include "GPU/Common/GeometryShaderGenerator.h"
44
#include "GPU/Vulkan/ShaderManagerVulkan.h"
45
#include "GPU/Vulkan/DrawEngineVulkan.h"
46
#include "GPU/Vulkan/FramebufferManagerVulkan.h"
47
48
// Most drivers treat vkCreateShaderModule as pretty much a memcpy. What actually
49
// takes time here, and makes this worthy of parallelization, is GLSLtoSPV.
50
// Takes ownership over tag.
51
// This always returns something, checking the return value for null is not meaningful.
52
static Promise<VkShaderModule> *CompileShaderModuleAsync(VulkanContext *vulkan, VkShaderStageFlagBits stage, const char *code, std::string *tag) {
53
auto compile = [=] {
54
PROFILE_THIS_SCOPE("shadercomp");
55
56
std::string errorMessage;
57
std::vector<uint32_t> spirv;
58
59
bool success = GLSLtoSPV(stage, code, GLSLVariant::VULKAN, spirv, &errorMessage);
60
61
if (!errorMessage.empty()) {
62
if (success) {
63
ERROR_LOG(Log::G3D, "Warnings in shader compilation!");
64
} else {
65
ERROR_LOG(Log::G3D, "Error in shader compilation!");
66
}
67
std::string numberedSource = LineNumberString(code);
68
ERROR_LOG(Log::G3D, "Messages: %s", errorMessage.c_str());
69
ERROR_LOG(Log::G3D, "Shader source:\n%s", numberedSource.c_str());
70
#if PPSSPP_PLATFORM(WINDOWS)
71
OutputDebugStringA("Error messages:\n");
72
OutputDebugStringA(errorMessage.c_str());
73
OutputDebugStringA(numberedSource.c_str());
74
#endif
75
Reporting::ReportMessage("Vulkan error in shader compilation: info: %s / code: %s", errorMessage.c_str(), code);
76
}
77
78
VkShaderModule shaderModule = VK_NULL_HANDLE;
79
if (success) {
80
const char *createTag = tag ? tag->c_str() : nullptr;
81
if (!createTag) {
82
switch (stage) {
83
case VK_SHADER_STAGE_VERTEX_BIT: createTag = "game_vertex"; break;
84
case VK_SHADER_STAGE_FRAGMENT_BIT: createTag = "game_fragment"; break;
85
case VK_SHADER_STAGE_GEOMETRY_BIT: createTag = "game_geometry"; break;
86
case VK_SHADER_STAGE_COMPUTE_BIT: createTag = "game_compute"; break;
87
default: break;
88
}
89
}
90
91
success = vulkan->CreateShaderModule(spirv, &shaderModule, createTag);
92
#ifdef SHADERLOG
93
OutputDebugStringA("OK");
94
#endif
95
delete tag;
96
}
97
return shaderModule;
98
};
99
100
#if defined(_DEBUG)
101
// Don't parallelize in debug mode, pathological behavior due to mutex locks in allocator which is HEAVILY used by glslang.
102
bool singleThreaded = true;
103
#else
104
bool singleThreaded = false;
105
#endif
106
107
if (singleThreaded) {
108
return Promise<VkShaderModule>::AlreadyDone(compile());
109
} else {
110
return Promise<VkShaderModule>::Spawn(&g_threadManager, compile, TaskType::DEDICATED_THREAD);
111
}
112
}
113
114
VulkanFragmentShader::VulkanFragmentShader(VulkanContext *vulkan, FShaderID id, FragmentShaderFlags flags, const char *code)
115
: vulkan_(vulkan), id_(id), flags_(flags) {
116
_assert_(!id.is_invalid());
117
source_ = code;
118
module_ = CompileShaderModuleAsync(vulkan, VK_SHADER_STAGE_FRAGMENT_BIT, source_.c_str(), new std::string(FragmentShaderDesc(id)));
119
VERBOSE_LOG(Log::G3D, "Compiled fragment shader:\n%s\n", (const char *)code);
120
}
121
122
VulkanFragmentShader::~VulkanFragmentShader() {
123
if (module_) {
124
VkShaderModule shaderModule = module_->BlockUntilReady();
125
if (shaderModule) {
126
vulkan_->Delete().QueueDeleteShaderModule(shaderModule);
127
}
128
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *m) {
129
auto module = (Promise<VkShaderModule> *)m;
130
delete module;
131
}, module_);
132
}
133
}
134
135
std::string VulkanFragmentShader::GetShaderString(DebugShaderStringType type) const {
136
switch (type) {
137
case SHADER_STRING_SOURCE_CODE:
138
return source_;
139
case SHADER_STRING_SHORT_DESC:
140
return FragmentShaderDesc(id_);
141
default:
142
return "N/A";
143
}
144
}
145
146
VulkanVertexShader::VulkanVertexShader(VulkanContext *vulkan, VShaderID id, VertexShaderFlags flags, const char *code, bool useHWTransform)
147
: vulkan_(vulkan), useHWTransform_(useHWTransform), flags_(flags), id_(id) {
148
_assert_(!id.is_invalid());
149
source_ = code;
150
module_ = CompileShaderModuleAsync(vulkan, VK_SHADER_STAGE_VERTEX_BIT, source_.c_str(), new std::string(VertexShaderDesc(id)));
151
VERBOSE_LOG(Log::G3D, "Compiled vertex shader:\n%s\n", (const char *)code);
152
}
153
154
VulkanVertexShader::~VulkanVertexShader() {
155
if (module_) {
156
VkShaderModule shaderModule = module_->BlockUntilReady();
157
if (shaderModule) {
158
vulkan_->Delete().QueueDeleteShaderModule(shaderModule);
159
}
160
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *m) {
161
auto module = (Promise<VkShaderModule> *)m;
162
delete module;
163
}, module_);
164
}
165
}
166
167
std::string VulkanVertexShader::GetShaderString(DebugShaderStringType type) const {
168
switch (type) {
169
case SHADER_STRING_SOURCE_CODE:
170
return source_;
171
case SHADER_STRING_SHORT_DESC:
172
return VertexShaderDesc(id_);
173
default:
174
return "N/A";
175
}
176
}
177
178
VulkanGeometryShader::VulkanGeometryShader(VulkanContext *vulkan, GShaderID id, const char *code)
179
: vulkan_(vulkan), id_(id) {
180
_assert_(!id.is_invalid());
181
source_ = code;
182
module_ = CompileShaderModuleAsync(vulkan, VK_SHADER_STAGE_GEOMETRY_BIT, source_.c_str(), new std::string(GeometryShaderDesc(id).c_str()));
183
VERBOSE_LOG(Log::G3D, "Compiled geometry shader:\n%s\n", (const char *)code);
184
}
185
186
VulkanGeometryShader::~VulkanGeometryShader() {
187
if (module_) {
188
VkShaderModule shaderModule = module_->BlockUntilReady();
189
if (shaderModule) {
190
vulkan_->Delete().QueueDeleteShaderModule(shaderModule);
191
}
192
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *m) {
193
auto module = (Promise<VkShaderModule> *)m;
194
delete module;
195
}, module_);
196
}
197
}
198
199
std::string VulkanGeometryShader::GetShaderString(DebugShaderStringType type) const {
200
switch (type) {
201
case SHADER_STRING_SOURCE_CODE:
202
return source_;
203
case SHADER_STRING_SHORT_DESC:
204
return GeometryShaderDesc(id_);
205
default:
206
return "N/A";
207
}
208
}
209
210
static constexpr size_t CODE_BUFFER_SIZE = 32768;
211
212
ShaderManagerVulkan::ShaderManagerVulkan(Draw::DrawContext *draw)
213
: ShaderManagerCommon(draw), compat_(GLSL_VULKAN), fsCache_(16), vsCache_(16), gsCache_(16) {
214
codeBuffer_ = new char[CODE_BUFFER_SIZE];
215
VulkanContext *vulkan = (VulkanContext *)draw->GetNativeObject(Draw::NativeObject::CONTEXT);
216
uboAlignment_ = vulkan->GetPhysicalDeviceProperties().properties.limits.minUniformBufferOffsetAlignment;
217
218
uniforms_ = (Uniforms *)AllocateAlignedMemory(sizeof(Uniforms), 16);
219
220
static_assert(sizeof(uniforms_->ub_base) <= 512, "ub_base grew too big");
221
static_assert(sizeof(uniforms_->ub_lights) <= 512, "ub_lights grew too big");
222
static_assert(sizeof(uniforms_->ub_bones) <= 384, "ub_bones grew too big");
223
}
224
225
ShaderManagerVulkan::~ShaderManagerVulkan() {
226
FreeAlignedMemory(uniforms_);
227
Clear();
228
delete[] codeBuffer_;
229
}
230
231
void ShaderManagerVulkan::DeviceLost() {
232
Clear();
233
draw_ = nullptr;
234
}
235
236
void ShaderManagerVulkan::DeviceRestore(Draw::DrawContext *draw) {
237
VulkanContext *vulkan = (VulkanContext *)draw->GetNativeObject(Draw::NativeObject::CONTEXT);
238
draw_ = draw;
239
uboAlignment_ = vulkan->GetPhysicalDeviceProperties().properties.limits.minUniformBufferOffsetAlignment;
240
}
241
242
void ShaderManagerVulkan::Clear() {
243
fsCache_.Iterate([&](const FShaderID &key, VulkanFragmentShader *shader) {
244
delete shader;
245
});
246
vsCache_.Iterate([&](const VShaderID &key, VulkanVertexShader *shader) {
247
delete shader;
248
});
249
gsCache_.Iterate([&](const GShaderID &key, VulkanGeometryShader *shader) {
250
delete shader;
251
});
252
fsCache_.Clear();
253
vsCache_.Clear();
254
gsCache_.Clear();
255
lastFSID_.set_invalid();
256
lastVSID_.set_invalid();
257
lastGSID_.set_invalid();
258
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_GEOMETRYSHADER_STATE);
259
}
260
261
void ShaderManagerVulkan::ClearShaders() {
262
Clear();
263
DirtyLastShader();
264
gstate_c.Dirty(DIRTY_ALL_UNIFORMS | DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_GEOMETRYSHADER_STATE);
265
}
266
267
void ShaderManagerVulkan::DirtyLastShader() {
268
// Forget the last shader ID
269
lastFSID_.set_invalid();
270
lastVSID_.set_invalid();
271
lastGSID_.set_invalid();
272
lastVShader_ = nullptr;
273
lastFShader_ = nullptr;
274
lastGShader_ = nullptr;
275
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_GEOMETRYSHADER_STATE);
276
}
277
278
uint64_t ShaderManagerVulkan::UpdateUniforms(bool useBufferedRendering) {
279
uint64_t dirty = gstate_c.GetDirtyUniforms();
280
if (dirty != 0) {
281
if (dirty & DIRTY_BASE_UNIFORMS)
282
BaseUpdateUniforms(&uniforms_->ub_base, dirty, false, useBufferedRendering);
283
if (dirty & DIRTY_LIGHT_UNIFORMS)
284
LightUpdateUniforms(&uniforms_->ub_lights, dirty);
285
if (dirty & DIRTY_BONE_UNIFORMS)
286
BoneUpdateUniforms(&uniforms_->ub_bones, dirty);
287
}
288
gstate_c.CleanUniforms();
289
return dirty;
290
}
291
292
void ShaderManagerVulkan::GetShaders(int prim, VertexDecoder *decoder, VulkanVertexShader **vshader, VulkanFragmentShader **fshader, VulkanGeometryShader **gshader, const ComputedPipelineState &pipelineState, bool useHWTransform, bool useHWTessellation, bool weightsAsFloat, bool useSkinInDecode) {
293
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
294
295
VShaderID VSID;
296
VulkanVertexShader *vs = nullptr;
297
if (gstate_c.IsDirty(DIRTY_VERTEXSHADER_STATE)) {
298
gstate_c.Clean(DIRTY_VERTEXSHADER_STATE);
299
ComputeVertexShaderID(&VSID, decoder, useHWTransform, useHWTessellation, weightsAsFloat, useSkinInDecode);
300
if (VSID == lastVSID_) {
301
_dbg_assert_(lastVShader_ != nullptr);
302
vs = lastVShader_;
303
} else if (!vsCache_.Get(VSID, &vs)) {
304
// Vertex shader not in cache. Let's compile it.
305
std::string genErrorString;
306
uint64_t uniformMask = 0; // Not used
307
uint32_t attributeMask = 0; // Not used
308
VertexShaderFlags flags{};
309
bool success = GenerateVertexShader(VSID, codeBuffer_, compat_, draw_->GetBugs(), &attributeMask, &uniformMask, &flags, &genErrorString);
310
_assert_msg_(success, "VS gen error: %s", genErrorString.c_str());
311
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "VS length error: %d", (int)strlen(codeBuffer_));
312
313
// Don't need to re-lookup anymore, now that we lock wider.
314
vs = new VulkanVertexShader(vulkan, VSID, flags, codeBuffer_, useHWTransform);
315
vsCache_.Insert(VSID, vs);
316
}
317
lastVShader_ = vs;
318
lastVSID_ = VSID;
319
} else {
320
VSID = lastVSID_;
321
vs = lastVShader_;
322
}
323
*vshader = vs;
324
325
FShaderID FSID;
326
VulkanFragmentShader *fs = nullptr;
327
if (gstate_c.IsDirty(DIRTY_FRAGMENTSHADER_STATE)) {
328
gstate_c.Clean(DIRTY_FRAGMENTSHADER_STATE);
329
ComputeFragmentShaderID(&FSID, pipelineState, draw_->GetBugs());
330
if (FSID == lastFSID_) {
331
_dbg_assert_(lastFShader_ != nullptr);
332
fs = lastFShader_;
333
} else if (!fsCache_.Get(FSID, &fs)) {
334
// Fragment shader not in cache. Let's compile it.
335
std::string genErrorString;
336
uint64_t uniformMask = 0; // Not used
337
FragmentShaderFlags flags{};
338
bool success = GenerateFragmentShader(FSID, codeBuffer_, compat_, draw_->GetBugs(), &uniformMask, &flags, &genErrorString);
339
_assert_msg_(success, "FS gen error: %s", genErrorString.c_str());
340
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "FS length error: %d", (int)strlen(codeBuffer_));
341
342
fs = new VulkanFragmentShader(vulkan, FSID, flags, codeBuffer_);
343
fsCache_.Insert(FSID, fs);
344
}
345
lastFShader_ = fs;
346
lastFSID_ = FSID;
347
} else {
348
FSID = lastFSID_;
349
fs = lastFShader_;
350
}
351
*fshader = fs;
352
353
GShaderID GSID;
354
VulkanGeometryShader *gs = nullptr;
355
if (gstate_c.IsDirty(DIRTY_GEOMETRYSHADER_STATE)) {
356
gstate_c.Clean(DIRTY_GEOMETRYSHADER_STATE);
357
ComputeGeometryShaderID(&GSID, draw_->GetBugs(), prim);
358
if (GSID == lastGSID_) {
359
// it's ok for this to be null.
360
gs = lastGShader_;
361
} else if (GSID.Bit(GS_BIT_ENABLED)) {
362
if (!gsCache_.Get(GSID, &gs)) {
363
// Geometry shader not in cache. Let's compile it.
364
std::string genErrorString;
365
bool success = GenerateGeometryShader(GSID, codeBuffer_, compat_, draw_->GetBugs(), &genErrorString);
366
_assert_msg_(success, "GS gen error: %s", genErrorString.c_str());
367
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "GS length error: %d", (int)strlen(codeBuffer_));
368
369
gs = new VulkanGeometryShader(vulkan, GSID, codeBuffer_);
370
gsCache_.Insert(GSID, gs);
371
}
372
} else {
373
gs = nullptr;
374
}
375
lastGShader_ = gs;
376
lastGSID_ = GSID;
377
} else {
378
GSID = lastGSID_;
379
gs = lastGShader_;
380
}
381
*gshader = gs;
382
383
_dbg_assert_(FSID.Bit(FS_BIT_FLATSHADE) == VSID.Bit(VS_BIT_FLATSHADE));
384
_dbg_assert_(FSID.Bit(FS_BIT_LMODE) == VSID.Bit(VS_BIT_LMODE));
385
if (GSID.Bit(GS_BIT_ENABLED)) {
386
_dbg_assert_(GSID.Bit(GS_BIT_LMODE) == VSID.Bit(VS_BIT_LMODE));
387
}
388
389
_dbg_assert_msg_((*vshader)->UseHWTransform() == useHWTransform, "Bad vshader was computed");
390
}
391
392
std::vector<std::string> ShaderManagerVulkan::DebugGetShaderIDs(DebugShaderType type) {
393
std::vector<std::string> ids;
394
switch (type) {
395
case SHADER_TYPE_VERTEX:
396
vsCache_.Iterate([&](const VShaderID &id, VulkanVertexShader *shader) {
397
std::string idstr;
398
id.ToString(&idstr);
399
ids.push_back(idstr);
400
});
401
break;
402
case SHADER_TYPE_FRAGMENT:
403
fsCache_.Iterate([&](const FShaderID &id, VulkanFragmentShader *shader) {
404
std::string idstr;
405
id.ToString(&idstr);
406
ids.push_back(idstr);
407
});
408
break;
409
case SHADER_TYPE_GEOMETRY:
410
gsCache_.Iterate([&](const GShaderID &id, VulkanGeometryShader *shader) {
411
std::string idstr;
412
id.ToString(&idstr);
413
ids.push_back(idstr);
414
});
415
break;
416
default:
417
break;
418
}
419
return ids;
420
}
421
422
std::string ShaderManagerVulkan::DebugGetShaderString(std::string id, DebugShaderType type, DebugShaderStringType stringType) {
423
ShaderID shaderId;
424
shaderId.FromString(id);
425
switch (type) {
426
case SHADER_TYPE_VERTEX:
427
{
428
VulkanVertexShader *vs;
429
if (vsCache_.Get(VShaderID(shaderId), &vs)) {
430
return vs ? vs->GetShaderString(stringType) : "null (bad)";
431
} else {
432
return "";
433
}
434
}
435
case SHADER_TYPE_FRAGMENT:
436
{
437
VulkanFragmentShader *fs;
438
if (fsCache_.Get(FShaderID(shaderId), &fs)) {
439
return fs ? fs->GetShaderString(stringType) : "null (bad)";
440
} else {
441
return "";
442
}
443
}
444
case SHADER_TYPE_GEOMETRY:
445
{
446
VulkanGeometryShader *gs;
447
if (gsCache_.Get(GShaderID(shaderId), &gs)) {
448
return gs ? gs->GetShaderString(stringType) : "null (bad)";
449
} else {
450
return "";
451
}
452
}
453
default:
454
return "N/A";
455
}
456
}
457
458
VulkanVertexShader *ShaderManagerVulkan::GetVertexShaderFromModule(VkShaderModule module) {
459
VulkanVertexShader *vs = nullptr;
460
vsCache_.Iterate([&](const VShaderID &id, VulkanVertexShader *shader) {
461
Promise<VkShaderModule> *p = shader->GetModule();
462
VkShaderModule m = p->BlockUntilReady();
463
if (m == module)
464
vs = shader;
465
});
466
return vs;
467
}
468
469
VulkanFragmentShader *ShaderManagerVulkan::GetFragmentShaderFromModule(VkShaderModule module) {
470
VulkanFragmentShader *fs = nullptr;
471
fsCache_.Iterate([&](const FShaderID &id, VulkanFragmentShader *shader) {
472
Promise<VkShaderModule> *p = shader->GetModule();
473
VkShaderModule m = p->BlockUntilReady();
474
if (m == module)
475
fs = shader;
476
});
477
return fs;
478
}
479
480
VulkanGeometryShader *ShaderManagerVulkan::GetGeometryShaderFromModule(VkShaderModule module) {
481
VulkanGeometryShader *gs = nullptr;
482
gsCache_.Iterate([&](const GShaderID &id, VulkanGeometryShader *shader) {
483
Promise<VkShaderModule> *p = shader->GetModule();
484
VkShaderModule m = p->BlockUntilReady();
485
if (m == module)
486
gs = shader;
487
});
488
return gs;
489
}
490
491
// Shader cache.
492
//
493
// We simply store the IDs of the shaders used during gameplay. On next startup of
494
// the same game, we simply compile all the shaders from the start, so we don't have to
495
// compile them on the fly later. We also store the Vulkan pipeline cache, so if it contains
496
// pipelines compiled from SPIR-V matching these shaders, pipeline creation will be practically
497
// instantaneous.
498
499
enum class VulkanCacheDetectFlags {
500
EQUAL_DEPTH = 1,
501
};
502
503
#define CACHE_HEADER_MAGIC 0xff51f420
504
#define CACHE_VERSION 51
505
506
struct VulkanCacheHeader {
507
uint32_t magic;
508
uint32_t version;
509
uint32_t useFlags;
510
uint32_t detectFlags;
511
int numVertexShaders;
512
int numFragmentShaders;
513
int numGeometryShaders;
514
};
515
516
bool ShaderManagerVulkan::LoadCacheFlags(FILE *f, DrawEngineVulkan *drawEngine) {
517
VulkanCacheHeader header{};
518
long pos = ftell(f);
519
bool success = fread(&header, sizeof(header), 1, f) == 1;
520
// We'll read it again later, this is just to check the flags.
521
success = success && fseek(f, pos, SEEK_SET) == 0;
522
if (!success || header.magic != CACHE_HEADER_MAGIC) {
523
WARN_LOG(Log::G3D, "Shader cache magic mismatch");
524
return false;
525
}
526
if (header.version != CACHE_VERSION) {
527
WARN_LOG(Log::G3D, "Shader cache version mismatch, %d, expected %d", header.version, CACHE_VERSION);
528
return false;
529
}
530
531
if ((header.detectFlags & (uint32_t)VulkanCacheDetectFlags::EQUAL_DEPTH) != 0) {
532
drawEngine->SetEverUsedExactEqualDepth(true);
533
}
534
535
return true;
536
}
537
538
bool ShaderManagerVulkan::LoadCache(FILE *f) {
539
VulkanCacheHeader header{};
540
bool success = fread(&header, sizeof(header), 1, f) == 1;
541
// We don't need to validate magic/version again, done in LoadCacheFlags().
542
543
if (header.useFlags != gstate_c.GetUseFlags()) {
544
// This can simply be a result of sawExactEqualDepth_ having been flipped to true in the previous run.
545
// Let's just keep going.
546
WARN_LOG(Log::G3D, "Shader cache useFlags mismatch, %08x, expected %08x", header.useFlags, gstate_c.GetUseFlags());
547
} else {
548
// We're compiling shaders now, so they haven't changed anymore.
549
gstate_c.useFlagsChanged = false;
550
}
551
552
int failCount = 0;
553
554
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
555
for (int i = 0; i < header.numVertexShaders; i++) {
556
VShaderID id;
557
if (fread(&id, sizeof(id), 1, f) != 1) {
558
ERROR_LOG(Log::G3D, "Vulkan shader cache truncated (in VertexShaders)");
559
return false;
560
}
561
bool useHWTransform = id.Bit(VS_BIT_USE_HW_TRANSFORM);
562
std::string genErrorString;
563
uint32_t attributeMask = 0;
564
uint64_t uniformMask = 0;
565
VertexShaderFlags flags;
566
if (!GenerateVertexShader(id, codeBuffer_, compat_, draw_->GetBugs(), &attributeMask, &uniformMask, &flags, &genErrorString)) {
567
ERROR_LOG(Log::G3D, "Failed to generate vertex shader during cache load");
568
// We just ignore this one and carry on.
569
failCount++;
570
continue;
571
}
572
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "VS length error: %d", (int)strlen(codeBuffer_));
573
// Don't add the new shader if already compiled - though this should no longer happen.
574
if (!vsCache_.ContainsKey(id)) {
575
VulkanVertexShader *vs = new VulkanVertexShader(vulkan, id, flags, codeBuffer_, useHWTransform);
576
vsCache_.Insert(id, vs);
577
}
578
}
579
uint32_t vendorID = vulkan->GetPhysicalDeviceProperties().properties.vendorID;
580
581
for (int i = 0; i < header.numFragmentShaders; i++) {
582
FShaderID id;
583
if (fread(&id, sizeof(id), 1, f) != 1) {
584
ERROR_LOG(Log::G3D, "Vulkan shader cache truncated (in FragmentShaders)");
585
return false;
586
}
587
std::string genErrorString;
588
uint64_t uniformMask = 0;
589
FragmentShaderFlags flags;
590
if (!GenerateFragmentShader(id, codeBuffer_, compat_, draw_->GetBugs(), &uniformMask, &flags, &genErrorString)) {
591
ERROR_LOG(Log::G3D, "Failed to generate fragment shader during cache load");
592
// We just ignore this one and carry on.
593
failCount++;
594
continue;
595
}
596
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "FS length error: %d", (int)strlen(codeBuffer_));
597
if (!fsCache_.ContainsKey(id)) {
598
VulkanFragmentShader *fs = new VulkanFragmentShader(vulkan, id, flags, codeBuffer_);
599
fsCache_.Insert(id, fs);
600
}
601
}
602
603
// If it's not enabled, don't create shaders cached from earlier runs - creation will likely fail.
604
if (gstate_c.Use(GPU_USE_GS_CULLING)) {
605
for (int i = 0; i < header.numGeometryShaders; i++) {
606
GShaderID id;
607
if (fread(&id, sizeof(id), 1, f) != 1) {
608
ERROR_LOG(Log::G3D, "Vulkan shader cache truncated (in GeometryShaders)");
609
return false;
610
}
611
std::string genErrorString;
612
if (!GenerateGeometryShader(id, codeBuffer_, compat_, draw_->GetBugs(), &genErrorString)) {
613
ERROR_LOG(Log::G3D, "Failed to generate geometry shader during cache load");
614
// We just ignore this one and carry on.
615
failCount++;
616
continue;
617
}
618
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "GS length error: %d", (int)strlen(codeBuffer_));
619
if (!gsCache_.ContainsKey(id)) {
620
VulkanGeometryShader *gs = new VulkanGeometryShader(vulkan, id, codeBuffer_);
621
gsCache_.Insert(id, gs);
622
}
623
}
624
}
625
626
NOTICE_LOG(Log::G3D, "ShaderCache: Loaded %d vertex, %d fragment shaders and %d geometry shaders (failed %d)", header.numVertexShaders, header.numFragmentShaders, header.numGeometryShaders, failCount);
627
return true;
628
}
629
630
void ShaderManagerVulkan::SaveCache(FILE *f, DrawEngineVulkan *drawEngine) {
631
VulkanCacheHeader header{};
632
header.magic = CACHE_HEADER_MAGIC;
633
header.version = CACHE_VERSION;
634
header.useFlags = gstate_c.GetUseFlags();
635
header.detectFlags = 0;
636
if (drawEngine->EverUsedExactEqualDepth())
637
header.detectFlags |= (uint32_t)VulkanCacheDetectFlags::EQUAL_DEPTH;
638
header.numVertexShaders = (int)vsCache_.size();
639
header.numFragmentShaders = (int)fsCache_.size();
640
header.numGeometryShaders = (int)gsCache_.size();
641
bool writeFailed = fwrite(&header, sizeof(header), 1, f) != 1;
642
vsCache_.Iterate([&](const VShaderID &id, VulkanVertexShader *vs) {
643
writeFailed = writeFailed || fwrite(&id, sizeof(id), 1, f) != 1;
644
});
645
fsCache_.Iterate([&](const FShaderID &id, VulkanFragmentShader *fs) {
646
writeFailed = writeFailed || fwrite(&id, sizeof(id), 1, f) != 1;
647
});
648
gsCache_.Iterate([&](const GShaderID &id, VulkanGeometryShader *gs) {
649
writeFailed = writeFailed || fwrite(&id, sizeof(id), 1, f) != 1;
650
});
651
if (writeFailed) {
652
ERROR_LOG(Log::G3D, "Failed to write Vulkan shader cache, disk full?");
653
} else {
654
NOTICE_LOG(Log::G3D, "Saved %d vertex and %d fragment shaders", header.numVertexShaders, header.numFragmentShaders);
655
}
656
}
657
658