Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/GPU/Vulkan/TextureCacheVulkan.cpp
5654 views
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <cstring>
20
21
#include "ext/xxhash.h"
22
23
#include "Common/File/VFS/VFS.h"
24
#include "Common/Data/Text/I18n.h"
25
#include "Common/LogReporting.h"
26
#include "Common/Math/math_util.h"
27
#include "Common/Profiler/Profiler.h"
28
#include "Common/GPU/thin3d.h"
29
#include "Common/GPU/Vulkan/VulkanRenderManager.h"
30
#include "Common/System/OSD.h"
31
#include "Common/StringUtils.h"
32
#include "Common/TimeUtil.h"
33
#include "Common/GPU/Vulkan/VulkanContext.h"
34
#include "Common/GPU/Vulkan/VulkanImage.h"
35
#include "Common/GPU/Vulkan/VulkanMemory.h"
36
37
#include "Core/Config.h"
38
39
#include "GPU/ge_constants.h"
40
#include "GPU/GPUState.h"
41
#include "GPU/GPUDefinitions.h"
42
#include "GPU/Common/TextureShaderCommon.h"
43
#include "GPU/Common/PostShader.h"
44
#include "GPU/Common/TextureCacheCommon.h"
45
#include "GPU/Common/TextureDecoder.h"
46
#include "GPU/Vulkan/VulkanContext.h"
47
#include "GPU/Vulkan/TextureCacheVulkan.h"
48
#include "GPU/Vulkan/FramebufferManagerVulkan.h"
49
#include "GPU/Vulkan/ShaderManagerVulkan.h"
50
#include "GPU/Vulkan/DrawEngineVulkan.h"
51
52
using namespace PPSSPP_VK;
53
54
#define TEXCACHE_MIN_SLAB_SIZE (8 * 1024 * 1024)
55
#define TEXCACHE_MAX_SLAB_SIZE (32 * 1024 * 1024)
56
#define TEXCACHE_SLAB_PRESSURE 4
57
58
const char *uploadShader = R"(
59
#version 450
60
#extension GL_ARB_separate_shader_objects : enable
61
62
// 8x8 is the most common compute shader workgroup size, and works great on all major
63
// hardware vendors.
64
layout (local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
65
66
uniform layout(set = 0, binding = 0, rgba8) writeonly image2D img;
67
68
layout(std430, set = 0, binding = 1) buffer Buf {
69
uint data[];
70
} buf;
71
72
layout(push_constant) uniform Params {
73
int width;
74
int height;
75
} params;
76
77
uint readColoru(uvec2 p) {
78
return buf.data[p.y * params.width + p.x];
79
}
80
81
vec4 readColorf(uvec2 p) {
82
// Unpack the color (we could look it up in a CLUT here if we wanted...)
83
// The imageStore repack is free.
84
return unpackUnorm4x8(readColoru(p));
85
}
86
87
void writeColorf(ivec2 p, vec4 c) {
88
imageStore(img, p, c);
89
}
90
91
%s
92
93
// Note that main runs once per INPUT pixel, unlike the old model.
94
void main() {
95
uvec2 xy = gl_GlobalInvocationID.xy;
96
// Kill off any out-of-image threads to avoid stray writes.
97
// Should only happen on the tiniest mipmaps as PSP textures are power-of-2,
98
// and we use a 8x8 workgroup size. Probably not really necessary.
99
if (xy.x >= params.width || xy.y >= params.height)
100
return;
101
// applyScaling will write the upscaled pixels, using writeColorf above.
102
// It's expected to write a square of scale*scale pixels, at the location xy*scale.
103
applyScaling(xy);
104
}
105
106
)";
107
108
static int VkFormatBytesPerPixel(VkFormat format) {
109
switch (format) {
110
case VULKAN_8888_FORMAT: return 4;
111
case VULKAN_CLUT8_FORMAT: return 1;
112
default: break;
113
}
114
return 2;
115
}
116
117
SamplerCache::~SamplerCache() {
118
DeviceLost();
119
}
120
121
VkSampler SamplerCache::GetOrCreateSampler(const SamplerCacheKey &key) {
122
VkSampler sampler;
123
if (cache_.Get(key, &sampler)) {
124
return sampler;
125
}
126
127
VkSamplerCreateInfo samp = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
128
samp.addressModeU = key.sClamp ? VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE : VK_SAMPLER_ADDRESS_MODE_REPEAT;
129
samp.addressModeV = key.tClamp ? VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE : VK_SAMPLER_ADDRESS_MODE_REPEAT;
130
// W addressing is irrelevant for 2d textures, but Mali recommends that all clamp modes are the same if possible so just copy from U.
131
samp.addressModeW = key.texture3d ? VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE : samp.addressModeU;
132
samp.compareOp = VK_COMPARE_OP_ALWAYS;
133
samp.flags = 0;
134
samp.magFilter = key.magFilt ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
135
samp.minFilter = key.minFilt ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
136
samp.mipmapMode = key.mipFilt ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST;
137
138
if (key.aniso) {
139
// Docs say the min of this value and the supported max are used.
140
samp.maxAnisotropy = 1 << g_Config.iAnisotropyLevel;
141
samp.anisotropyEnable = true;
142
} else {
143
samp.maxAnisotropy = 1.0f;
144
samp.anisotropyEnable = false;
145
}
146
if (key.maxLevel == 9 * 256) {
147
// No max level needed. Better for performance on some archs like ARM Mali.
148
samp.maxLod = VK_LOD_CLAMP_NONE;
149
} else {
150
samp.maxLod = (float)(int32_t)key.maxLevel * (1.0f / 256.0f);
151
}
152
samp.minLod = (float)(int32_t)key.minLevel * (1.0f / 256.0f);
153
samp.mipLodBias = (float)(int32_t)key.lodBias * (1.0f / 256.0f);
154
155
VkResult res = vkCreateSampler(vulkan_->GetDevice(), &samp, nullptr, &sampler);
156
_assert_(res == VK_SUCCESS);
157
cache_.Insert(key, sampler);
158
return sampler;
159
}
160
161
std::string SamplerCache::DebugGetSamplerString(const std::string &id, DebugShaderStringType stringType) {
162
SamplerCacheKey key;
163
key.FromString(id);
164
return StringFromFormat("%s/%s mag:%s min:%s mip:%s maxLod:%f minLod:%f bias:%f",
165
key.sClamp ? "Clamp" : "Wrap",
166
key.tClamp ? "Clamp" : "Wrap",
167
key.magFilt ? "Linear" : "Nearest",
168
key.minFilt ? "Linear" : "Nearest",
169
key.mipFilt ? "Linear" : "Nearest",
170
key.maxLevel / 256.0f,
171
key.minLevel / 256.0f,
172
key.lodBias / 256.0f);
173
}
174
175
void SamplerCache::DeviceLost() {
176
cache_.Iterate([&](const SamplerCacheKey &key, VkSampler sampler) {
177
vulkan_->Delete().QueueDeleteSampler(sampler);
178
});
179
cache_.Clear();
180
vulkan_ = nullptr;
181
}
182
183
void SamplerCache::DeviceRestore(VulkanContext *vulkan) {
184
vulkan_ = vulkan;
185
}
186
187
std::vector<std::string> SamplerCache::DebugGetSamplerIDs() const {
188
std::vector<std::string> ids;
189
cache_.Iterate([&](const SamplerCacheKey &id, VkSampler sampler) {
190
std::string idstr;
191
id.ToString(&idstr);
192
ids.push_back(idstr);
193
});
194
return ids;
195
}
196
197
TextureCacheVulkan::TextureCacheVulkan(Draw::DrawContext *draw, Draw2D *draw2D, VulkanContext *vulkan)
198
: TextureCacheCommon(draw, draw2D),
199
computeShaderManager_(vulkan),
200
samplerCache_(vulkan) {
201
DeviceRestore(draw);
202
}
203
204
TextureCacheVulkan::~TextureCacheVulkan() {
205
DeviceLost();
206
}
207
208
void TextureCacheVulkan::SetFramebufferManager(FramebufferManagerVulkan *fbManager) {
209
framebufferManager_ = fbManager;
210
}
211
212
void TextureCacheVulkan::DeviceLost() {
213
textureShaderCache_->DeviceLost();
214
215
VulkanContext *vulkan = draw_ ? (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT) : nullptr;
216
217
Clear(true);
218
219
samplerCache_.DeviceLost();
220
if (samplerNearest_)
221
vulkan->Delete().QueueDeleteSampler(samplerNearest_);
222
223
if (uploadCS_ != VK_NULL_HANDLE)
224
vulkan->Delete().QueueDeleteShaderModule(uploadCS_);
225
226
computeShaderManager_.DeviceLost();
227
228
nextTexture_ = nullptr;
229
draw_ = nullptr;
230
Unbind();
231
}
232
233
void TextureCacheVulkan::DeviceRestore(Draw::DrawContext *draw) {
234
draw_ = draw;
235
236
VulkanContext *vulkan = (VulkanContext *)draw->GetNativeObject(Draw::NativeObject::CONTEXT);
237
_assert_(vulkan);
238
239
samplerCache_.DeviceRestore(vulkan);
240
textureShaderCache_->DeviceRestore(draw);
241
242
VkSamplerCreateInfo samp{ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
243
samp.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
244
samp.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
245
samp.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
246
samp.magFilter = VK_FILTER_NEAREST;
247
samp.minFilter = VK_FILTER_NEAREST;
248
samp.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
249
VkResult res = vkCreateSampler(vulkan->GetDevice(), &samp, nullptr, &samplerNearest_);
250
_assert_(res == VK_SUCCESS);
251
252
CompileScalingShader();
253
254
computeShaderManager_.DeviceRestore(draw);
255
}
256
257
void TextureCacheVulkan::NotifyConfigChanged() {
258
TextureCacheCommon::NotifyConfigChanged();
259
CompileScalingShader();
260
}
261
262
static std::string ReadShaderSrc(const Path &filename) {
263
size_t sz = 0;
264
char *data = (char *)g_VFS.ReadFile(filename.c_str(), &sz);
265
if (!data)
266
return std::string();
267
268
std::string src(data, sz);
269
delete[] data;
270
return src;
271
}
272
273
void TextureCacheVulkan::CompileScalingShader() {
274
if (!draw_) {
275
// Something is very wrong.
276
return;
277
}
278
279
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
280
281
if (!g_Config.bTexHardwareScaling || g_Config.sTextureShaderName != textureShader_) {
282
if (uploadCS_ != VK_NULL_HANDLE)
283
vulkan->Delete().QueueDeleteShaderModule(uploadCS_);
284
textureShader_.clear();
285
shaderScaleFactor_ = 0; // no texture scaling shader
286
} else if (uploadCS_) {
287
// No need to recreate.
288
return;
289
}
290
291
if (!g_Config.bTexHardwareScaling)
292
return;
293
294
ReloadAllPostShaderInfo(draw_);
295
const TextureShaderInfo *shaderInfo = GetTextureShaderInfo(g_Config.sTextureShaderName);
296
if (!shaderInfo || shaderInfo->computeShaderFile.empty())
297
return;
298
299
std::string shaderSource = ReadShaderSrc(shaderInfo->computeShaderFile);
300
std::string fullUploadShader = StringFromFormat(uploadShader, shaderSource.c_str());
301
302
std::string error;
303
uploadCS_ = CompileShaderModule(vulkan, VK_SHADER_STAGE_COMPUTE_BIT, fullUploadShader.c_str(), &error);
304
_dbg_assert_msg_(uploadCS_ != VK_NULL_HANDLE, "failed to compile upload shader");
305
306
textureShader_ = g_Config.sTextureShaderName;
307
shaderScaleFactor_ = shaderInfo->scaleFactor;
308
}
309
310
void TextureCacheVulkan::ReleaseTexture(TexCacheEntry *entry, bool delete_them) {
311
delete entry->vkTex;
312
entry->vkTex = nullptr;
313
}
314
315
VkFormat getClutDestFormatVulkan(GEPaletteFormat format) {
316
switch (format) {
317
case GE_CMODE_16BIT_ABGR4444:
318
return VULKAN_4444_FORMAT;
319
case GE_CMODE_16BIT_ABGR5551:
320
return VULKAN_1555_FORMAT;
321
case GE_CMODE_16BIT_BGR5650:
322
return VULKAN_565_FORMAT;
323
case GE_CMODE_32BIT_ABGR8888:
324
return VULKAN_8888_FORMAT;
325
}
326
return VK_FORMAT_UNDEFINED;
327
}
328
329
static const VkFilter MagFiltVK[2] = {
330
VK_FILTER_NEAREST,
331
VK_FILTER_LINEAR
332
};
333
334
void TextureCacheVulkan::StartFrame() {
335
TextureCacheCommon::StartFrame();
336
// TODO: For low memory detection, maybe use some indication from VMA.
337
// Maybe see https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/staying_within_budget.html#staying_within_budget_querying_for_budget .
338
computeShaderManager_.BeginFrame();
339
}
340
341
void TextureCacheVulkan::UpdateCurrentClut(GEPaletteFormat clutFormat, u32 clutBase, bool clutIndexIsSimple) {
342
const u32 clutBaseBytes = clutFormat == GE_CMODE_32BIT_ABGR8888 ? (clutBase * sizeof(u32)) : (clutBase * sizeof(u16));
343
// Technically, these extra bytes weren't loaded, but hopefully it was loaded earlier.
344
// If not, we're going to hash random data, which hopefully doesn't cause a performance issue.
345
//
346
// TODO: Actually, this seems like a hack. The game can upload part of a CLUT and reference other data.
347
// clutTotalBytes_ is the last amount uploaded. We should hash clutMaxBytes_, but this will often hash
348
// unrelated old entries for small palettes.
349
// Adding clutBaseBytes may just be mitigating this for some usage patterns.
350
const u32 clutExtendedBytes = std::min(clutTotalBytes_ + clutBaseBytes, clutMaxBytes_);
351
352
if (replacer_.Enabled())
353
clutHash_ = XXH32((const char *)clutBufRaw_, clutExtendedBytes, 0xC0108888);
354
else
355
clutHash_ = XXH3_64bits((const char *)clutBufRaw_, clutExtendedBytes) & 0xFFFFFFFF;
356
clutBuf_ = clutBufRaw_;
357
358
// Special optimization: fonts typically draw clut4 with just alpha values in a single color.
359
clutAlphaLinear_ = false;
360
clutAlphaLinearColor_ = 0;
361
if (clutFormat == GE_CMODE_16BIT_ABGR4444 && clutIndexIsSimple) {
362
const u16_le *clut = GetCurrentClut<u16_le>();
363
clutAlphaLinear_ = true;
364
clutAlphaLinearColor_ = clut[15] & 0x0FFF;
365
for (int i = 0; i < 16; ++i) {
366
u16 step = clutAlphaLinearColor_ | (i << 12);
367
if (clut[i] != step) {
368
clutAlphaLinear_ = false;
369
break;
370
}
371
}
372
}
373
374
clutLastFormat_ = gstate.clutformat;
375
}
376
377
void TextureCacheVulkan::BindTexture(TexCacheEntry *entry) {
378
if (!entry || !entry->vkTex) {
379
Unbind();
380
return;
381
}
382
383
int maxLevel = (entry->status & TexCacheEntry::STATUS_NO_MIPS) ? 0 : entry->maxLevel;
384
SamplerCacheKey samplerKey = GetSamplingParams(maxLevel, entry);
385
curSampler_ = samplerCache_.GetOrCreateSampler(samplerKey);
386
imageView_ = entry->vkTex->GetImageView();
387
drawEngine_->SetDepalTexture(VK_NULL_HANDLE, false);
388
gstate_c.SetUseShaderDepal(ShaderDepalMode::OFF);
389
}
390
391
void TextureCacheVulkan::ApplySamplingParams(const SamplerCacheKey &key) {
392
curSampler_ = samplerCache_.GetOrCreateSampler(key);
393
}
394
395
void TextureCacheVulkan::Unbind() {
396
imageView_ = VK_NULL_HANDLE;
397
curSampler_ = VK_NULL_HANDLE;
398
}
399
400
void TextureCacheVulkan::BindAsClutTexture(Draw::Texture *tex, bool smooth) {
401
VkImageView clutTexture = (VkImageView)draw_->GetNativeObject(Draw::NativeObject::TEXTURE_VIEW, tex);
402
drawEngine_->SetDepalTexture(clutTexture, smooth);
403
}
404
405
static Draw::DataFormat FromVulkanFormat(VkFormat fmt) {
406
switch (fmt) {
407
case VULKAN_8888_FORMAT: default: return Draw::DataFormat::R8G8B8A8_UNORM;
408
}
409
}
410
411
static VkFormat ToVulkanFormat(Draw::DataFormat fmt) {
412
switch (fmt) {
413
case Draw::DataFormat::BC1_RGBA_UNORM_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
414
case Draw::DataFormat::BC2_UNORM_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
415
case Draw::DataFormat::BC3_UNORM_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
416
case Draw::DataFormat::BC4_UNORM_BLOCK: return VK_FORMAT_BC4_UNORM_BLOCK;
417
case Draw::DataFormat::BC5_UNORM_BLOCK: return VK_FORMAT_BC5_UNORM_BLOCK;
418
case Draw::DataFormat::BC7_UNORM_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
419
case Draw::DataFormat::ASTC_4x4_UNORM_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
420
case Draw::DataFormat::ETC2_R8G8B8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
421
case Draw::DataFormat::ETC2_R8G8B8A1_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
422
case Draw::DataFormat::ETC2_R8G8B8A8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
423
424
case Draw::DataFormat::R8G8B8A8_UNORM: return VULKAN_8888_FORMAT;
425
default: _assert_msg_(false, "Bad texture pixel format"); return VULKAN_8888_FORMAT;
426
}
427
}
428
429
void TextureCacheVulkan::BuildTexture(TexCacheEntry *const entry) {
430
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
431
432
BuildTexturePlan plan;
433
plan.hardwareScaling = g_Config.bTexHardwareScaling && uploadCS_ != VK_NULL_HANDLE;
434
plan.slowScaler = !plan.hardwareScaling || vulkan->DevicePerfClass() == PerfClass::SLOW;
435
if (!PrepareBuildTexture(plan, entry)) {
436
// We're screwed (invalid size or something, corrupt display list), let's just zap it.
437
if (entry->vkTex) {
438
delete entry->vkTex;
439
entry->vkTex = nullptr;
440
}
441
return;
442
}
443
444
VkFormat dstFmt = GetDestFormat(GETextureFormat(entry->format), gstate.getClutPaletteFormat());
445
446
if (plan.scaleFactor > 1) {
447
_dbg_assert_(!plan.doReplace);
448
// Whether hardware or software scaling, this is the dest format.
449
dstFmt = VULKAN_8888_FORMAT;
450
} else if (plan.decodeToClut8) {
451
dstFmt = VULKAN_CLUT8_FORMAT;
452
}
453
454
_dbg_assert_(plan.levelsToLoad <= plan.maxPossibleLevels);
455
456
// We don't generate mipmaps for 512x512 textures because they're almost exclusively used for menu backgrounds
457
// and similar, which don't really need it.
458
// Also, if using replacements, check that we really can generate mips for this format - that's not possible for compressed ones.
459
if (g_Config.iTexFiltering == TEX_FILTER_AUTO_MAX_QUALITY && plan.w <= 256 && plan.h <= 256 && (!plan.doReplace || plan.replaced->Format() == Draw::DataFormat::R8G8B8A8_UNORM)) {
460
// Boost the number of mipmaps.
461
if (plan.maxPossibleLevels > plan.levelsToCreate) { // TODO: Should check against levelsToLoad, no?
462
// We have to generate mips with a shader. This requires decoding to R8G8B8A8_UNORM format to avoid extra complications.
463
dstFmt = VULKAN_8888_FORMAT;
464
}
465
plan.levelsToCreate = plan.maxPossibleLevels;
466
}
467
468
_dbg_assert_(plan.levelsToCreate >= plan.levelsToLoad);
469
470
// Any texture scaling is gonna move away from the original 16-bit format, if any.
471
VkFormat actualFmt = plan.scaleFactor > 1 ? VULKAN_8888_FORMAT : dstFmt;
472
bool bcFormat = false;
473
int bcAlign = 0;
474
if (plan.doReplace) {
475
Draw::DataFormat fmt = plan.replaced->Format();
476
bcFormat = Draw::DataFormatIsBlockCompressed(fmt, &bcAlign);
477
actualFmt = ToVulkanFormat(fmt);
478
}
479
480
bool computeUpload = false;
481
VkCommandBuffer cmdInit = (VkCommandBuffer)draw_->GetNativeObject(Draw::NativeObject::INIT_COMMANDBUFFER);
482
483
delete entry->vkTex;
484
485
VkImageLayout imageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
486
VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
487
488
if (actualFmt == VULKAN_8888_FORMAT && plan.scaleFactor > 1 && plan.hardwareScaling) {
489
if (uploadCS_ != VK_NULL_HANDLE) {
490
computeUpload = true;
491
} else {
492
WARN_LOG(Log::G3D, "Falling back to software scaling, hardware shader didn't compile");
493
}
494
}
495
496
if (computeUpload) {
497
usage |= VK_IMAGE_USAGE_STORAGE_BIT;
498
imageLayout = VK_IMAGE_LAYOUT_GENERAL;
499
}
500
501
if (plan.saveTexture) {
502
DEBUG_LOG(Log::G3D, "About to save texture (%dx%d) (might not, if it already exists)", plan.createW, plan.createH);
503
actualFmt = VULKAN_8888_FORMAT;
504
}
505
506
const VkComponentMapping *mapping;
507
switch (actualFmt) {
508
case VULKAN_4444_FORMAT: mapping = &VULKAN_4444_SWIZZLE; break;
509
case VULKAN_1555_FORMAT: mapping = &VULKAN_1555_SWIZZLE; break;
510
case VULKAN_565_FORMAT: mapping = &VULKAN_565_SWIZZLE; break;
511
default: mapping = &VULKAN_8888_SWIZZLE; break; // no channel swizzle
512
}
513
514
char texName[64];
515
snprintf(texName, sizeof(texName), "tex_%08x_%s_%s", entry->addr, GeTextureFormatToString((GETextureFormat)entry->format, gstate.getClutPaletteFormat()), gstate.isTextureSwizzled() ? "swz" : "lin");
516
entry->vkTex = new VulkanTexture(vulkan, texName);
517
VulkanTexture *image = entry->vkTex;
518
519
VulkanBarrierBatch barrier;
520
bool allocSuccess = image->CreateDirect(plan.createW, plan.createH, plan.depth, plan.levelsToCreate, actualFmt, imageLayout, usage, &barrier, mapping);
521
barrier.Flush(cmdInit);
522
if (!allocSuccess && !lowMemoryMode_) {
523
WARN_LOG_REPORT(Log::G3D, "Texture cache ran out of GPU memory; switching to low memory mode");
524
lowMemoryMode_ = true;
525
decimationCounter_ = 0;
526
Decimate(entry, true);
527
528
// TODO: We should stall the GPU here and wipe things out of memory.
529
// As is, it will almost definitely fail the second time, but next frame it may recover.
530
531
auto err = GetI18NCategory(I18NCat::ERRORS);
532
if (plan.scaleFactor > 1) {
533
g_OSD.Show(OSDType::MESSAGE_WARNING, err->T("Warning: Video memory FULL, reducing upscaling and switching to slow caching mode"), 2.0f);
534
} else {
535
g_OSD.Show(OSDType::MESSAGE_WARNING, err->T("Warning: Video memory FULL, switching to slow caching mode"), 2.0f);
536
}
537
538
// Turn off texture replacement for this texture.
539
plan.replaced = nullptr;
540
541
plan.createW /= plan.scaleFactor;
542
plan.createH /= plan.scaleFactor;
543
plan.scaleFactor = 1;
544
actualFmt = dstFmt;
545
546
allocSuccess = image->CreateDirect(plan.createW, plan.createH, plan.depth, plan.levelsToCreate, actualFmt, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, &barrier, mapping);
547
barrier.Flush(cmdInit);
548
}
549
550
if (!allocSuccess) {
551
ERROR_LOG(Log::G3D, "Failed to create texture (%dx%d)", plan.w, plan.h);
552
delete entry->vkTex;
553
entry->vkTex = nullptr;
554
}
555
556
if (!entry->vkTex) {
557
return;
558
}
559
560
VK_PROFILE_BEGIN(vulkan, cmdInit, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
561
"Texture Upload (%08x) video=%d", entry->addr, plan.isVideo);
562
563
// Upload the texture data. We simply reuse the same loop for 3D texture slices instead of mips, if we have those.
564
int levels;
565
if (plan.depth > 1) {
566
levels = plan.depth;
567
} else {
568
levels = plan.levelsToLoad;
569
}
570
571
VulkanPushPool *pushBuffer = drawEngine_->GetPushBufferForTextureData();
572
573
// Batch the copies.
574
TextureCopyBatch copyBatch;
575
copyBatch.reserve(levels);
576
577
for (int i = 0; i < levels; i++) {
578
int mipUnscaledWidth = gstate.getTextureWidth(i);
579
int mipUnscaledHeight = gstate.getTextureHeight(i);
580
581
int mipWidth;
582
int mipHeight;
583
plan.GetMipSize(i, &mipWidth, &mipHeight);
584
585
int bpp = VkFormatBytesPerPixel(actualFmt);
586
// RoundToNextPowerOf2 is probably not necessary as the optimal alignment is gonna be a power of 2.
587
int optimalStrideAlignment = RoundToNextPowerOf2(std::max(4, (int)vulkan->GetPhysicalDeviceProperties().properties.limits.optimalBufferCopyRowPitchAlignment));
588
int byteStride = RoundUpToMultipleOf(mipWidth * bpp, optimalStrideAlignment); // output stride
589
int pixelStride = byteStride / bpp;
590
int uploadSize = byteStride * mipHeight;
591
592
uint32_t bufferOffset;
593
VkBuffer texBuf;
594
// NVIDIA reports a min alignment of 1 but that can't be healthy... let's align by 16 as a minimum.
595
int pushAlignment = std::max(16, (int)vulkan->GetPhysicalDeviceProperties().properties.limits.optimalBufferCopyOffsetAlignment);
596
void *data;
597
std::vector<uint8_t> saveData;
598
599
// Simple wrapper to avoid reading back from VRAM (very, very expensive).
600
auto loadLevel = [&](int sz, int srcLevel, int lstride, int lfactor) {
601
if (plan.saveTexture) {
602
saveData.resize(sz);
603
data = &saveData[0];
604
} else {
605
data = pushBuffer->Allocate(sz, pushAlignment, &texBuf, &bufferOffset);
606
}
607
LoadVulkanTextureLevel(*entry, (uint8_t *)data, lstride, srcLevel, lfactor, actualFmt);
608
if (plan.saveTexture)
609
bufferOffset = pushBuffer->Push(&saveData[0], sz, pushAlignment, &texBuf);
610
};
611
612
bool dataScaled = true;
613
int srcStride = byteStride;
614
if (plan.doReplace) {
615
int rowLength = pixelStride;
616
if (bcFormat) {
617
// For block compressed formats, we just set the upload size to the data size..
618
uploadSize = plan.replaced->GetLevelDataSizeAfterCopy(plan.baseLevelSrc + i);
619
rowLength = (mipWidth + 3) & ~3;
620
}
621
// Directly load the replaced image.
622
data = pushBuffer->Allocate(uploadSize, pushAlignment, &texBuf, &bufferOffset);
623
double replaceStart = time_now_d();
624
if (!plan.replaced->CopyLevelTo(plan.baseLevelSrc + i, (uint8_t *)data, uploadSize, byteStride)) { // If plan.doReplace, this shouldn't fail.
625
WARN_LOG(Log::G3D, "Failed to copy replaced texture level");
626
// TODO: Fill with some pattern?
627
}
628
replacementTimeThisFrame_ += time_now_d() - replaceStart;
629
entry->vkTex->CopyBufferToMipLevel(cmdInit, &copyBatch, i, mipWidth, mipHeight, 0, texBuf, bufferOffset, rowLength);
630
} else {
631
if (plan.depth != 1) {
632
// 3D texturing.
633
loadLevel(uploadSize, i, byteStride, plan.scaleFactor);
634
entry->vkTex->CopyBufferToMipLevel(cmdInit, &copyBatch, 0, mipWidth, mipHeight, i, texBuf, bufferOffset, pixelStride);
635
} else if (computeUpload) {
636
int srcBpp = VkFormatBytesPerPixel(dstFmt);
637
srcStride = mipUnscaledWidth * srcBpp;
638
int srcSize = srcStride * mipUnscaledHeight;
639
loadLevel(srcSize, i == 0 ? plan.baseLevelSrc : i, srcStride, 1);
640
dataScaled = false;
641
642
// This format can be used with storage images.
643
VkImageView view = entry->vkTex->CreateViewForMip(i);
644
VkDescriptorSet descSet = computeShaderManager_.GetDescriptorSet(view, texBuf, bufferOffset, srcSize);
645
struct Params { int x; int y; } params{ mipUnscaledWidth, mipUnscaledHeight };
646
VK_PROFILE_BEGIN(vulkan, cmdInit, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
647
"Compute Upload: %dx%d->%dx%d", mipUnscaledWidth, mipUnscaledHeight, mipWidth, mipHeight);
648
vkCmdBindPipeline(cmdInit, VK_PIPELINE_BIND_POINT_COMPUTE, computeShaderManager_.GetPipeline(uploadCS_));
649
vkCmdBindDescriptorSets(cmdInit, VK_PIPELINE_BIND_POINT_COMPUTE, computeShaderManager_.GetPipelineLayout(), 0, 1, &descSet, 0, nullptr);
650
vkCmdPushConstants(cmdInit, computeShaderManager_.GetPipelineLayout(), VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(params), &params);
651
vkCmdDispatch(cmdInit, (mipUnscaledWidth + 7) / 8, (mipUnscaledHeight + 7) / 8, 1);
652
VK_PROFILE_END(vulkan, cmdInit, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
653
vulkan->Delete().QueueDeleteImageView(view);
654
} else {
655
loadLevel(uploadSize, i == 0 ? plan.baseLevelSrc : i, byteStride, plan.scaleFactor);
656
entry->vkTex->CopyBufferToMipLevel(cmdInit, &copyBatch, i, mipWidth, mipHeight, 0, texBuf, bufferOffset, pixelStride);
657
}
658
// Format might be wrong in lowMemoryMode_, so don't save.
659
if (plan.saveTexture && !lowMemoryMode_) {
660
// When hardware texture scaling is enabled, this saves the original.
661
const int w = dataScaled ? mipWidth : mipUnscaledWidth;
662
const int h = dataScaled ? mipHeight : mipUnscaledHeight;
663
const int stride = dataScaled ? byteStride : srcStride;
664
// At this point, data should be saveData, and not slow.
665
ReplacedTextureDecodeInfo replacedInfo;
666
replacedInfo.cachekey = entry->CacheKey();
667
replacedInfo.hash = entry->fullhash;
668
replacedInfo.addr = entry->addr;
669
replacedInfo.isVideo = IsVideo(entry->addr);
670
replacedInfo.isFinal = (entry->status & TexCacheEntry::STATUS_TO_SCALE) == 0;
671
replacedInfo.fmt = FromVulkanFormat(actualFmt);
672
replacer_.NotifyTextureDecoded(plan.replaced, replacedInfo, data, stride, plan.baseLevelSrc + i, mipUnscaledWidth, mipUnscaledHeight, w, h);
673
}
674
}
675
}
676
677
if (!copyBatch.empty()) {
678
VK_PROFILE_BEGIN(vulkan, cmdInit, VK_PIPELINE_STAGE_TRANSFER_BIT, "Copy Upload");
679
// Submit the whole batch of mip uploads.
680
entry->vkTex->FinishCopyBatch(cmdInit, &copyBatch);
681
VK_PROFILE_END(vulkan, cmdInit, VK_PIPELINE_STAGE_TRANSFER_BIT);
682
}
683
684
VkImageLayout layout = computeUpload ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
685
VkPipelineStageFlags prevStage = computeUpload ? VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT : VK_PIPELINE_STAGE_TRANSFER_BIT;
686
687
// Generate any additional mipmap levels.
688
// This will transition the whole stack to GENERAL if it wasn't already.
689
if (plan.levelsToLoad < plan.levelsToCreate) {
690
VK_PROFILE_BEGIN(vulkan, cmdInit, VK_PIPELINE_STAGE_TRANSFER_BIT, "Mipgen up to level %d", plan.levelsToCreate);
691
entry->vkTex->GenerateMips(cmdInit, plan.levelsToLoad, computeUpload);
692
layout = VK_IMAGE_LAYOUT_GENERAL;
693
prevStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
694
VK_PROFILE_END(vulkan, cmdInit, VK_PIPELINE_STAGE_TRANSFER_BIT);
695
}
696
697
entry->vkTex->EndCreate(cmdInit, false, prevStage, layout);
698
VK_PROFILE_END(vulkan, cmdInit, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
699
700
// Signal that we support depth textures so use it as one.
701
if (plan.depth > 1) {
702
entry->status |= TexCacheEntry::STATUS_3D;
703
}
704
705
if (plan.doReplace) {
706
entry->SetAlphaStatus(TexCacheEntry::TexStatus(plan.replaced->AlphaStatus()));
707
}
708
}
709
710
VkFormat TextureCacheVulkan::GetDestFormat(GETextureFormat format, GEPaletteFormat clutFormat) {
711
if (!gstate_c.Use(GPU_USE_16BIT_FORMATS)) {
712
return VK_FORMAT_R8G8B8A8_UNORM;
713
}
714
switch (format) {
715
case GE_TFMT_CLUT4:
716
case GE_TFMT_CLUT8:
717
case GE_TFMT_CLUT16:
718
case GE_TFMT_CLUT32:
719
return getClutDestFormatVulkan(clutFormat);
720
case GE_TFMT_4444:
721
return VULKAN_4444_FORMAT;
722
case GE_TFMT_5551:
723
return VULKAN_1555_FORMAT;
724
case GE_TFMT_5650:
725
return VULKAN_565_FORMAT;
726
case GE_TFMT_8888:
727
case GE_TFMT_DXT1:
728
case GE_TFMT_DXT3:
729
case GE_TFMT_DXT5:
730
default:
731
return VULKAN_8888_FORMAT;
732
}
733
}
734
735
void TextureCacheVulkan::LoadVulkanTextureLevel(TexCacheEntry &entry, uint8_t *writePtr, int rowPitch, int level, int scaleFactor, VkFormat dstFmt) {
736
int w = gstate.getTextureWidth(level);
737
int h = gstate.getTextureHeight(level);
738
739
GETextureFormat tfmt = (GETextureFormat)entry.format;
740
GEPaletteFormat clutformat = gstate.getClutPaletteFormat();
741
u32 texaddr = gstate.getTextureAddress(level);
742
743
_assert_msg_(texaddr != 0, "Can't load a texture from address null")
744
745
int bufw = GetTextureBufw(level, texaddr, tfmt);
746
int bpp = VkFormatBytesPerPixel(dstFmt);
747
748
u32 *pixelData;
749
int decPitch;
750
751
TexDecodeFlags texDecFlags{};
752
if (!gstate_c.Use(GPU_USE_16BIT_FORMATS) || scaleFactor > 1 || dstFmt == VULKAN_8888_FORMAT) {
753
texDecFlags |= TexDecodeFlags::EXPAND32;
754
}
755
if (entry.status & TexCacheEntry::STATUS_CLUT_GPU) {
756
texDecFlags |= TexDecodeFlags::TO_CLUT8;
757
}
758
759
if (scaleFactor > 1) {
760
tmpTexBufRearrange_.resize(std::max(bufw, w) * h);
761
pixelData = tmpTexBufRearrange_.data();
762
// We want to end up with a neatly packed texture for scaling.
763
decPitch = w * bpp;
764
} else {
765
pixelData = (u32 *)writePtr;
766
decPitch = rowPitch;
767
}
768
769
CheckAlphaResult alphaResult = DecodeTextureLevel((u8 *)pixelData, decPitch, tfmt, clutformat, texaddr, level, bufw, texDecFlags);
770
entry.SetAlphaStatus(alphaResult, level);
771
772
if (scaleFactor > 1) {
773
u32 fmt = dstFmt;
774
// CPU scaling reads from the destination buffer so we want cached RAM.
775
size_t allocBytes = w * scaleFactor * h * scaleFactor * 4;
776
uint8_t *scaleBuf = (uint8_t *)AllocateAlignedMemory(allocBytes, 16);
777
_assert_msg_(scaleBuf, "Failed to allocate %d aligned bytes for texture scaler", (int)allocBytes);
778
779
scaler_.ScaleAlways((u32 *)scaleBuf, pixelData, w, h, &w, &h, scaleFactor);
780
pixelData = (u32 *)writePtr;
781
782
// We always end up at 8888. Other parts assume this.
783
_assert_(dstFmt == VULKAN_8888_FORMAT);
784
bpp = sizeof(u32);
785
decPitch = w * bpp;
786
787
if (decPitch != rowPitch) {
788
for (int y = 0; y < h; ++y) {
789
memcpy(writePtr + rowPitch * y, scaleBuf + decPitch * y, w * bpp);
790
}
791
decPitch = rowPitch;
792
} else {
793
memcpy(writePtr, scaleBuf, w * h * 4);
794
}
795
FreeAlignedMemory(scaleBuf);
796
}
797
}
798
799
void TextureCacheVulkan::BoundFramebufferTexture() {
800
imageView_ = (VkImageView)draw_->GetNativeObject(Draw::NativeObject::BOUND_TEXTURE0_IMAGEVIEW);
801
}
802
803
bool TextureCacheVulkan::GetCurrentTextureDebug(GPUDebugBuffer &buffer, int level, bool *isFramebuffer) {
804
SetTexture();
805
if (!nextTexture_) {
806
return GetCurrentFramebufferTextureDebug(buffer, isFramebuffer);
807
}
808
809
// Apply texture may need to rebuild the texture if we're about to render, or bind a framebuffer.
810
TexCacheEntry *entry = nextTexture_;
811
ApplyTexture();
812
813
if (!entry->vkTex)
814
return false;
815
816
VulkanTexture *texture = entry->vkTex;
817
VulkanRenderManager *renderManager = (VulkanRenderManager *)draw_->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
818
819
GPUDebugBufferFormat bufferFormat;
820
Draw::DataFormat drawFormat;
821
switch (texture->GetFormat()) {
822
case VULKAN_565_FORMAT:
823
bufferFormat = GPU_DBG_FORMAT_565;
824
drawFormat = Draw::DataFormat::B5G6R5_UNORM_PACK16;
825
break;
826
case VULKAN_1555_FORMAT:
827
bufferFormat = GPU_DBG_FORMAT_5551;
828
drawFormat = Draw::DataFormat::B5G5R5A1_UNORM_PACK16;
829
break;
830
case VULKAN_4444_FORMAT:
831
bufferFormat = GPU_DBG_FORMAT_4444;
832
drawFormat = Draw::DataFormat::B4G4R4A4_UNORM_PACK16;
833
break;
834
case VULKAN_8888_FORMAT:
835
default:
836
bufferFormat = GPU_DBG_FORMAT_8888;
837
drawFormat = Draw::DataFormat::R8G8B8A8_UNORM;
838
break;
839
}
840
841
int w = texture->GetWidth();
842
int h = texture->GetHeight();
843
if (level > 0) {
844
// In the future, maybe this could do something for 3D textures...
845
if (level >= texture->GetNumMips())
846
return false;
847
w >>= level;
848
h >>= level;
849
}
850
buffer.Allocate(w, h, bufferFormat);
851
852
renderManager->CopyImageToMemorySync(texture->GetImage(), level, 0, 0, w, h, drawFormat, (uint8_t *)buffer.GetData(), w, "GetCurrentTextureDebug");
853
854
// Vulkan requires us to re-apply all dynamic state for each command buffer, and the above will cause us to start a new cmdbuf.
855
// So let's dirty the things that are involved in Vulkan dynamic state. Readbacks are not frequent so this won't hurt other backends.
856
gstate_c.Dirty(DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_BLEND_STATE | DIRTY_DEPTHSTENCIL_STATE);
857
framebufferManager_->RebindFramebuffer("RebindFramebuffer - GetCurrentTextureDebug");
858
*isFramebuffer = false;
859
return true;
860
}
861
862
void TextureCacheVulkan::GetStats(char *ptr, size_t size) {
863
snprintf(ptr, size, "N/A");
864
}
865
866
std::vector<std::string> TextureCacheVulkan::DebugGetSamplerIDs() const {
867
return samplerCache_.DebugGetSamplerIDs();
868
}
869
870
std::string TextureCacheVulkan::DebugGetSamplerString(const std::string &id, DebugShaderStringType stringType) {
871
return samplerCache_.DebugGetSamplerString(id, stringType);
872
}
873
874
void *TextureCacheVulkan::GetNativeTextureView(const TexCacheEntry *entry, bool flat) const {
875
VkImageView view = flat ? entry->vkTex->GetImageView() : entry->vkTex->GetImageArrayView();
876
return (void *)view;
877
}
878
879