CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Common/GPU/Vulkan/VulkanFrameData.cpp
Views: 1401
1
#include <mutex>
2
3
#include "VulkanFrameData.h"
4
#include "Common/Log.h"
5
#include "Common/StringUtils.h"
6
7
#if 0 // def _DEBUG
8
#define VLOG(...) NOTICE_LOG(Log::G3D, __VA_ARGS__)
9
#else
10
#define VLOG(...)
11
#endif
12
13
void CachedReadback::Destroy(VulkanContext *vulkan) {
14
if (buffer) {
15
vulkan->Delete().QueueDeleteBufferAllocation(buffer, allocation);
16
}
17
bufferSize = 0;
18
}
19
20
void FrameData::Init(VulkanContext *vulkan, int index) {
21
this->index = index;
22
VkDevice device = vulkan->GetDevice();
23
24
VkSemaphoreCreateInfo semaphoreCreateInfo = { VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
25
semaphoreCreateInfo.flags = 0;
26
VkResult res = vkCreateSemaphore(vulkan->GetDevice(), &semaphoreCreateInfo, nullptr, &acquireSemaphore);
27
_dbg_assert_(res == VK_SUCCESS);
28
res = vkCreateSemaphore(vulkan->GetDevice(), &semaphoreCreateInfo, nullptr, &renderingCompleteSemaphore);
29
_dbg_assert_(res == VK_SUCCESS);
30
31
VkCommandPoolCreateInfo cmd_pool_info = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO };
32
cmd_pool_info.queueFamilyIndex = vulkan->GetGraphicsQueueFamilyIndex();
33
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
34
res = vkCreateCommandPool(device, &cmd_pool_info, nullptr, &cmdPoolInit);
35
_dbg_assert_(res == VK_SUCCESS);
36
res = vkCreateCommandPool(device, &cmd_pool_info, nullptr, &cmdPoolMain);
37
_dbg_assert_(res == VK_SUCCESS);
38
39
VkCommandBufferAllocateInfo cmd_alloc = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
40
cmd_alloc.commandPool = cmdPoolInit;
41
cmd_alloc.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
42
cmd_alloc.commandBufferCount = 1;
43
res = vkAllocateCommandBuffers(device, &cmd_alloc, &initCmd);
44
_dbg_assert_(res == VK_SUCCESS);
45
cmd_alloc.commandPool = cmdPoolMain;
46
res = vkAllocateCommandBuffers(device, &cmd_alloc, &mainCmd);
47
res = vkAllocateCommandBuffers(device, &cmd_alloc, &presentCmd);
48
_dbg_assert_(res == VK_SUCCESS);
49
50
vulkan->SetDebugName(initCmd, VK_OBJECT_TYPE_COMMAND_BUFFER, StringFromFormat("initCmd%d", index).c_str());
51
vulkan->SetDebugName(mainCmd, VK_OBJECT_TYPE_COMMAND_BUFFER, StringFromFormat("mainCmd%d", index).c_str());
52
vulkan->SetDebugName(presentCmd, VK_OBJECT_TYPE_COMMAND_BUFFER, StringFromFormat("presentCmd%d", index).c_str());
53
54
// Creating the frame fence with true so they can be instantly waited on the first frame
55
fence = vulkan->CreateFence(true);
56
vulkan->SetDebugName(fence, VK_OBJECT_TYPE_FENCE, StringFromFormat("fence%d", index).c_str());
57
readyForFence = true;
58
59
VkQueryPoolCreateInfo query_ci{ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO };
60
query_ci.queryCount = MAX_TIMESTAMP_QUERIES;
61
query_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
62
res = vkCreateQueryPool(device, &query_ci, nullptr, &profile.queryPool);
63
}
64
65
void FrameData::Destroy(VulkanContext *vulkan) {
66
VkDevice device = vulkan->GetDevice();
67
vkDestroyCommandPool(device, cmdPoolInit, nullptr);
68
vkDestroyCommandPool(device, cmdPoolMain, nullptr);
69
vkDestroyFence(device, fence, nullptr);
70
vkDestroyQueryPool(device, profile.queryPool, nullptr);
71
vkDestroySemaphore(device, acquireSemaphore, nullptr);
72
vkDestroySemaphore(device, renderingCompleteSemaphore, nullptr);
73
74
readbacks_.IterateMut([=](const ReadbackKey &key, CachedReadback *value) {
75
value->Destroy(vulkan);
76
delete value;
77
});
78
readbacks_.Clear();
79
}
80
81
void FrameData::AcquireNextImage(VulkanContext *vulkan) {
82
_dbg_assert_(!hasAcquired);
83
84
// Get the index of the next available swapchain image, and a semaphore to block command buffer execution on.
85
VkResult res = vkAcquireNextImageKHR(vulkan->GetDevice(), vulkan->GetSwapchain(), UINT64_MAX, acquireSemaphore, (VkFence)VK_NULL_HANDLE, &curSwapchainImage);
86
switch (res) {
87
case VK_SUCCESS:
88
hasAcquired = true;
89
break;
90
case VK_SUBOPTIMAL_KHR:
91
hasAcquired = true;
92
// Hopefully the resize will happen shortly. Ignore - one frame might look bad or something.
93
WARN_LOG(Log::G3D, "VK_SUBOPTIMAL_KHR returned - ignoring");
94
break;
95
case VK_ERROR_OUT_OF_DATE_KHR:
96
case VK_TIMEOUT:
97
case VK_NOT_READY:
98
// We do not set hasAcquired here!
99
WARN_LOG(Log::G3D, "%s returned from AcquireNextImage - processing the frame, but not presenting", VulkanResultToString(res));
100
skipSwap = true;
101
break;
102
case VK_ERROR_SURFACE_LOST_KHR:
103
ERROR_LOG(Log::G3D, "%s returned from AcquireNextImage - ignoring, but this better be during shutdown", VulkanResultToString(res));
104
skipSwap = true;
105
break;
106
default:
107
// Weird, shouldn't get any other values. Maybe lost device?
108
_assert_msg_(false, "vkAcquireNextImageKHR failed! result=%s", VulkanResultToString(res));
109
break;
110
}
111
}
112
113
VkResult FrameData::QueuePresent(VulkanContext *vulkan, FrameDataShared &shared) {
114
_dbg_assert_(hasAcquired);
115
hasAcquired = false;
116
_dbg_assert_(!skipSwap);
117
118
VkSwapchainKHR swapchain = vulkan->GetSwapchain();
119
VkPresentInfoKHR present = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
120
present.swapchainCount = 1;
121
present.pSwapchains = &swapchain;
122
present.pImageIndices = &curSwapchainImage;
123
present.pWaitSemaphores = &renderingCompleteSemaphore;
124
present.waitSemaphoreCount = 1;
125
126
// Can't move these into the if.
127
VkPresentIdKHR presentID{ VK_STRUCTURE_TYPE_PRESENT_ID_KHR };
128
VkPresentTimesInfoGOOGLE presentGOOGLE{ VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE };
129
130
uint64_t frameId = this->frameId;
131
VkPresentTimeGOOGLE presentTimeGOOGLE{ (uint32_t)frameId, 0 }; // it's ok to truncate this. it'll wrap around and work (if we ever reach 4 billion frames..)
132
133
if (shared.measurePresentTime) {
134
if (vulkan->Extensions().KHR_present_id && vulkan->GetDeviceFeatures().enabled.presentId.presentId) {
135
ChainStruct(present, &presentID);
136
presentID.pPresentIds = &frameId;
137
presentID.swapchainCount = 1;
138
} else if (vulkan->Extensions().GOOGLE_display_timing) {
139
ChainStruct(present, &presentGOOGLE);
140
presentGOOGLE.pTimes = &presentTimeGOOGLE;
141
presentGOOGLE.swapchainCount = 1;
142
}
143
}
144
145
return vkQueuePresentKHR(vulkan->GetGraphicsQueue(), &present);
146
}
147
148
VkCommandBuffer FrameData::GetInitCmd(VulkanContext *vulkan) {
149
if (!hasInitCommands) {
150
VkCommandBufferBeginInfo begin = {
151
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
152
nullptr,
153
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
154
};
155
vkResetCommandPool(vulkan->GetDevice(), cmdPoolInit, 0);
156
VkResult res = vkBeginCommandBuffer(initCmd, &begin);
157
if (res != VK_SUCCESS) {
158
return VK_NULL_HANDLE;
159
}
160
161
// Good spot to reset the query pool.
162
if (profile.enabled) {
163
vkCmdResetQueryPool(initCmd, profile.queryPool, 0, MAX_TIMESTAMP_QUERIES);
164
vkCmdWriteTimestamp(initCmd, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, profile.queryPool, 0);
165
}
166
167
hasInitCommands = true;
168
}
169
return initCmd;
170
}
171
172
void FrameData::Submit(VulkanContext *vulkan, FrameSubmitType type, FrameDataShared &sharedData) {
173
VkCommandBuffer cmdBufs[3];
174
int numCmdBufs = 0;
175
176
VkFence fenceToTrigger = VK_NULL_HANDLE;
177
178
if (hasInitCommands) {
179
if (profile.enabled) {
180
// Pre-allocated query ID 1 - end of init cmdbuf.
181
vkCmdWriteTimestamp(initCmd, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, profile.queryPool, 1);
182
}
183
184
VkResult res = vkEndCommandBuffer(initCmd);
185
cmdBufs[numCmdBufs++] = initCmd;
186
187
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (init)! result=%s", VulkanResultToString(res));
188
hasInitCommands = false;
189
}
190
191
if ((hasMainCommands || hasPresentCommands) && type == FrameSubmitType::Sync) {
192
fenceToTrigger = sharedData.readbackFence;
193
}
194
195
if (hasMainCommands) {
196
VkResult res = vkEndCommandBuffer(mainCmd);
197
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (main)! result=%s", VulkanResultToString(res));
198
199
cmdBufs[numCmdBufs++] = mainCmd;
200
hasMainCommands = false;
201
}
202
203
if (hasPresentCommands) {
204
_dbg_assert_(type != FrameSubmitType::Pending);
205
VkResult res = vkEndCommandBuffer(presentCmd);
206
207
_assert_msg_(res == VK_SUCCESS, "vkEndCommandBuffer failed (present)! result=%s", VulkanResultToString(res));
208
209
cmdBufs[numCmdBufs++] = presentCmd;
210
hasPresentCommands = false;
211
}
212
213
if (type == FrameSubmitType::FinishFrame) {
214
_dbg_assert_(!fenceToTrigger);
215
fenceToTrigger = fence;
216
}
217
218
if (!numCmdBufs && fenceToTrigger == VK_NULL_HANDLE) {
219
// Nothing to do.
220
return;
221
}
222
223
VkSubmitInfo submit_info{ VK_STRUCTURE_TYPE_SUBMIT_INFO };
224
VkPipelineStageFlags waitStage[1]{ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
225
if (type == FrameSubmitType::FinishFrame && !skipSwap) {
226
_dbg_assert_(hasAcquired);
227
submit_info.waitSemaphoreCount = 1;
228
submit_info.pWaitSemaphores = &acquireSemaphore;
229
submit_info.pWaitDstStageMask = waitStage;
230
}
231
submit_info.commandBufferCount = (uint32_t)numCmdBufs;
232
submit_info.pCommandBuffers = cmdBufs;
233
if (type == FrameSubmitType::FinishFrame && !skipSwap) {
234
submit_info.signalSemaphoreCount = 1;
235
submit_info.pSignalSemaphores = &renderingCompleteSemaphore;
236
}
237
238
VkResult res;
239
if (fenceToTrigger == fence) {
240
VLOG("Doing queue submit, fencing frame %d", this->index);
241
// The fence is waited on by the main thread, they are not allowed to access it simultaneously.
242
res = vkQueueSubmit(vulkan->GetGraphicsQueue(), 1, &submit_info, fenceToTrigger);
243
if (sharedData.useMultiThreading) {
244
std::lock_guard<std::mutex> lock(fenceMutex);
245
readyForFence = true;
246
fenceCondVar.notify_one();
247
}
248
} else {
249
VLOG("Doing queue submit, fencing something (%p)", fenceToTrigger);
250
res = vkQueueSubmit(vulkan->GetGraphicsQueue(), 1, &submit_info, fenceToTrigger);
251
}
252
253
if (res == VK_ERROR_DEVICE_LOST) {
254
_assert_msg_(false, "Lost the Vulkan device in vkQueueSubmit! If this happens again, switch Graphics Backend away from Vulkan");
255
} else {
256
_assert_msg_(res == VK_SUCCESS, "vkQueueSubmit failed (main)! result=%s", VulkanResultToString(res));
257
}
258
259
if (type == FrameSubmitType::Sync) {
260
// Hard stall of the GPU, not ideal, but necessary so the CPU has the contents of the readback.
261
vkWaitForFences(vulkan->GetDevice(), 1, &sharedData.readbackFence, true, UINT64_MAX);
262
vkResetFences(vulkan->GetDevice(), 1, &sharedData.readbackFence);
263
syncDone = true;
264
}
265
}
266
267
void FrameDataShared::Init(VulkanContext *vulkan, bool useMultiThreading, bool measurePresentTime) {
268
// This fence is used for synchronizing readbacks. Does not need preinitialization.
269
readbackFence = vulkan->CreateFence(false);
270
vulkan->SetDebugName(readbackFence, VK_OBJECT_TYPE_FENCE, "readbackFence");
271
272
this->useMultiThreading = useMultiThreading;
273
this->measurePresentTime = measurePresentTime;
274
}
275
276
void FrameDataShared::Destroy(VulkanContext *vulkan) {
277
VkDevice device = vulkan->GetDevice();
278
vkDestroyFence(device, readbackFence, nullptr);
279
}
280
281