Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/GPU/Common/DrawEngineCommon.cpp
5671 views
1
// Copyright (c) 2013- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <cfloat>
20
21
#include "Common/Data/Convert/ColorConv.h"
22
#include "Common/Profiler/Profiler.h"
23
#include "Common/LogReporting.h"
24
#include "Common/Math/SIMDHeaders.h"
25
#include "Common/Math/CrossSIMD.h"
26
#include "Common/Math/lin/matrix4x4.h"
27
#include "Common/TimeUtil.h"
28
#include "Core/System.h"
29
#include "Core/Config.h"
30
#include "GPU/GPUCommon.h"
31
#include "GPU/Common/DrawEngineCommon.h"
32
#include "GPU/Common/SplineCommon.h"
33
#include "GPU/Common/DepthRaster.h"
34
#include "GPU/Common/VertexDecoderCommon.h"
35
#include "GPU/Common/SoftwareTransformCommon.h"
36
#include "GPU/ge_constants.h"
37
#include "GPU/GPUState.h"
38
39
enum {
40
TRANSFORMED_VERTEX_BUFFER_SIZE = VERTEX_BUFFER_MAX * sizeof(TransformedVertex),
41
};
42
43
DrawEngineCommon::DrawEngineCommon() : decoderMap_(32) {
44
if (g_Config.bVertexDecoderJit && (g_Config.iCpuCore == (int)CPUCore::JIT || g_Config.iCpuCore == (int)CPUCore::JIT_IR)) {
45
decJitCache_ = new VertexDecoderJitCache();
46
}
47
transformed_ = (TransformedVertex *)AllocateMemoryPages(TRANSFORMED_VERTEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
48
transformedExpanded_ = (TransformedVertex *)AllocateMemoryPages(3 * TRANSFORMED_VERTEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
49
decoded_ = (u8 *)AllocateMemoryPages(DECODED_VERTEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
50
decIndex_ = (u16 *)AllocateMemoryPages(DECODED_INDEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
51
indexGen.Setup(decIndex_);
52
53
InitDepthRaster();
54
}
55
56
DrawEngineCommon::~DrawEngineCommon() {
57
FreeMemoryPages(decoded_, DECODED_VERTEX_BUFFER_SIZE);
58
FreeMemoryPages(decIndex_, DECODED_INDEX_BUFFER_SIZE);
59
FreeMemoryPages(transformed_, TRANSFORMED_VERTEX_BUFFER_SIZE);
60
FreeMemoryPages(transformedExpanded_, 3 * TRANSFORMED_VERTEX_BUFFER_SIZE);
61
ShutdownDepthRaster();
62
delete decJitCache_;
63
decoderMap_.Iterate([&](const uint32_t vtype, VertexDecoder *decoder) {
64
delete decoder;
65
});
66
ClearSplineBezierWeights();
67
}
68
69
void DrawEngineCommon::Init() {
70
NotifyConfigChanged();
71
}
72
73
std::vector<std::string> DrawEngineCommon::DebugGetVertexLoaderIDs() {
74
std::vector<std::string> ids;
75
decoderMap_.Iterate([&](const uint32_t vtype, VertexDecoder *decoder) {
76
std::string id;
77
id.resize(sizeof(vtype));
78
memcpy(&id[0], &vtype, sizeof(vtype));
79
ids.push_back(id);
80
});
81
return ids;
82
}
83
84
std::string DrawEngineCommon::DebugGetVertexLoaderString(std::string_view id, DebugShaderStringType stringType) {
85
if (id.size() < sizeof(u32)) {
86
return "N/A";
87
}
88
u32 mapId;
89
memcpy(&mapId, &id[0], sizeof(mapId));
90
VertexDecoder *dec;
91
if (decoderMap_.Get(mapId, &dec)) {
92
return dec->GetString(stringType);
93
} else {
94
return "N/A";
95
}
96
}
97
98
void DrawEngineCommon::NotifyConfigChanged() {
99
if (decJitCache_)
100
decJitCache_->Clear();
101
lastVType_ = -1;
102
dec_ = nullptr;
103
decoderMap_.Iterate([&](const uint32_t vtype, VertexDecoder *decoder) {
104
delete decoder;
105
});
106
decoderMap_.Clear();
107
108
useHWTransform_ = g_Config.bHardwareTransform;
109
useHWTessellation_ = UpdateUseHWTessellation(g_Config.bHardwareTessellation);
110
}
111
112
void DrawEngineCommon::DispatchSubmitImm(GEPrimitiveType prim, TransformedVertex *buffer, int vertexCount, int cullMode, bool continuation) {
113
// Instead of plumbing through properly (we'd need to inject these pretransformed vertices in the middle
114
// of SoftwareTransform(), which would take a lot of refactoring), we'll cheat and just turn these into
115
// through vertices.
116
// Since the only known use is Thrillville and it only uses it to clear, we just use color and pos.
117
struct ImmVertex {
118
float uv[2];
119
uint32_t color;
120
float xyz[3];
121
};
122
std::vector<ImmVertex> temp;
123
temp.resize(vertexCount);
124
uint32_t color1Used = 0;
125
for (int i = 0; i < vertexCount; i++) {
126
// Since we're sending through, scale back up to w/h.
127
temp[i].uv[0] = buffer[i].u * gstate.getTextureWidth(0);
128
temp[i].uv[1] = buffer[i].v * gstate.getTextureHeight(0);
129
temp[i].color = buffer[i].color0_32;
130
temp[i].xyz[0] = buffer[i].pos[0];
131
temp[i].xyz[1] = buffer[i].pos[1];
132
temp[i].xyz[2] = buffer[i].pos[2];
133
color1Used |= buffer[i].color1_32;
134
}
135
int vtype = GE_VTYPE_TC_FLOAT | GE_VTYPE_POS_FLOAT | GE_VTYPE_COL_8888 | GE_VTYPE_THROUGH;
136
// TODO: Handle fog and secondary color somehow?
137
138
if (gstate.isFogEnabled() && !gstate.isModeThrough()) {
139
WARN_LOG_REPORT_ONCE(geimmfog, Log::G3D, "Imm vertex used fog");
140
}
141
if (color1Used != 0 && gstate.isUsingSecondaryColor() && !gstate.isModeThrough()) {
142
WARN_LOG_REPORT_ONCE(geimmcolor1, Log::G3D, "Imm vertex used secondary color");
143
}
144
145
bool prevThrough = gstate.isModeThrough();
146
// Code checks this reg directly, not just the vtype ID.
147
if (!prevThrough) {
148
gstate.vertType |= GE_VTYPE_THROUGH;
149
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_RASTER_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_CULLRANGE);
150
}
151
152
int bytesRead;
153
uint32_t vertTypeID = GetVertTypeID(vtype, 0, applySkinInDecode_);
154
155
bool clockwise = !gstate.isCullEnabled() || gstate.getCullMode() == cullMode;
156
VertexDecoder *dec = GetVertexDecoder(vertTypeID);
157
SubmitPrim(&temp[0], nullptr, prim, vertexCount, dec, vertTypeID, clockwise, &bytesRead);
158
Flush();
159
160
if (!prevThrough) {
161
gstate.vertType &= ~GE_VTYPE_THROUGH;
162
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_RASTER_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_CULLRANGE);
163
}
164
}
165
166
// Gated by DIRTY_CULL_PLANES
167
void DrawEngineCommon::UpdatePlanes() {
168
float view[16];
169
float viewproj[16];
170
ConvertMatrix4x3To4x4(view, gstate.viewMatrix);
171
Matrix4ByMatrix4(viewproj, view, gstate.projMatrix);
172
173
// Next, we need to apply viewport, scissor, region, and even offset - but only for X/Y.
174
// Note that the PSP does not clip against the viewport.
175
const Vec2f baseOffset = Vec2f(gstate.getOffsetX(), gstate.getOffsetY());
176
// Region1 (rate) is used as an X1/Y1 here, matching PSP behavior.
177
minOffset_ = baseOffset + Vec2f(std::max(gstate.getRegionRateX() - 0x100, gstate.getScissorX1()), std::max(gstate.getRegionRateY() - 0x100, gstate.getScissorY1())) - Vec2f(1.0f, 1.0f);
178
maxOffset_ = baseOffset + Vec2f(std::min(gstate.getRegionX2(), gstate.getScissorX2()), std::min(gstate.getRegionY2(), gstate.getScissorY2())) + Vec2f(1.0f, 1.0f);
179
180
// Let's not handle these special cases in the fast culler.
181
offsetOutsideEdge_ = maxOffset_.x >= 4096.0f || minOffset_.x < 1.0f || minOffset_.y < 1.0f || maxOffset_.y >= 4096.0f;
182
183
// Now let's apply the viewport to our scissor/region + offset range.
184
Vec2f inverseViewportScale = Vec2f(1.0f / gstate.getViewportXScale(), 1.0f / gstate.getViewportYScale());
185
Vec2f minViewport = (minOffset_ - Vec2f(gstate.getViewportXCenter(), gstate.getViewportYCenter())) * inverseViewportScale;
186
Vec2f maxViewport = (maxOffset_ - Vec2f(gstate.getViewportXCenter(), gstate.getViewportYCenter())) * inverseViewportScale;
187
188
Vec2f viewportInvSize = Vec2f(1.0f / (maxViewport.x - minViewport.x), 1.0f / (maxViewport.y - minViewport.y));
189
190
Lin::Matrix4x4 applyViewport{};
191
// Scale to the viewport's size.
192
applyViewport.xx = 2.0f * viewportInvSize.x;
193
applyViewport.yy = 2.0f * viewportInvSize.y;
194
applyViewport.zz = 1.0f;
195
applyViewport.ww = 1.0f;
196
// And offset to the viewport's centers.
197
applyViewport.wx = -(maxViewport.x + minViewport.x) * viewportInvSize.x;
198
applyViewport.wy = -(maxViewport.y + minViewport.y) * viewportInvSize.y;
199
200
float mtx[16];
201
Matrix4ByMatrix4(mtx, viewproj, applyViewport.m);
202
// I'm sure there's some fairly optimized way to set these. If we make a version of Matrix4ByMatrix4
203
// that returns a transpose, it looks like these will be more straightforward.
204
planes_.Set(0, mtx[3] - mtx[0], mtx[7] - mtx[4], mtx[11] - mtx[8], mtx[15] - mtx[12]); // Right
205
planes_.Set(1, mtx[3] + mtx[0], mtx[7] + mtx[4], mtx[11] + mtx[8], mtx[15] + mtx[12]); // Left
206
planes_.Set(2, mtx[3] + mtx[1], mtx[7] + mtx[5], mtx[11] + mtx[9], mtx[15] + mtx[13]); // Bottom
207
planes_.Set(3, mtx[3] - mtx[1], mtx[7] - mtx[5], mtx[11] - mtx[9], mtx[15] - mtx[13]); // Top
208
planes_.Set(4, mtx[3] + mtx[2], mtx[7] + mtx[6], mtx[11] + mtx[10], mtx[15] + mtx[14]); // Near
209
planes_.Set(5, mtx[3] - mtx[2], mtx[7] - mtx[6], mtx[11] - mtx[10], mtx[15] - mtx[14]); // Far
210
}
211
212
// This code has plenty of potential for optimization.
213
//
214
// It does the simplest and safest test possible: If all points of a bbox is outside a single of
215
// our clipping planes, we reject the box. Tighter bounds would be desirable but would take more calculations.
216
// The name is a slight misnomer, because any bounding shape will work, not just boxes.
217
//
218
// Potential optimizations:
219
// * SIMD-ify the plane culling, and also the vertex data conversion (could even group together xxxxyyyyzzzz for example)
220
// * Compute min/max of the verts, and then compute a bounding sphere and check that against the planes.
221
// - Less accurate, but..
222
// - Only requires six plane evaluations then.
223
bool DrawEngineCommon::TestBoundingBox(const void *vdata, const void *inds, int vertexCount, const VertexDecoder *dec, u32 vertType) {
224
// Grab temp buffer space from large offsets in decoded_. Not exactly safe for large draws.
225
if (vertexCount > 1024) {
226
return true;
227
}
228
229
SimpleVertex *corners = (SimpleVertex *)(decoded_ + 65536 * 12);
230
float *verts = (float *)(decoded_ + 65536 * 18);
231
232
// Although this may lead to drawing that shouldn't happen, the viewport is more complex on VR.
233
// Let's always say objects are within bounds.
234
if (gstate_c.Use(GPU_USE_VIRTUAL_REALITY))
235
return true;
236
237
// Due to world matrix updates per "thing", this isn't quite as effective as it could be if we did world transform
238
// in here as well. Though, it still does cut down on a lot of updates in Tekken 6.
239
if (gstate_c.IsDirty(DIRTY_CULL_PLANES)) {
240
UpdatePlanes();
241
gpuStats.numPlaneUpdates++;
242
gstate_c.Clean(DIRTY_CULL_PLANES);
243
}
244
245
// Try to skip NormalizeVertices if it's pure positions. No need to bother with a vertex decoder
246
// and a large vertex format.
247
if ((vertType & 0xFFFFFF) == GE_VTYPE_POS_FLOAT && !inds) {
248
memcpy(verts, vdata, sizeof(float) * 3 * vertexCount);
249
} else if ((vertType & 0xFFFFFF) == GE_VTYPE_POS_8BIT && !inds) {
250
const s8 *vtx = (const s8 *)vdata;
251
for (int i = 0; i < vertexCount * 3; i++) {
252
verts[i] = vtx[i] * (1.0f / 128.0f);
253
}
254
} else if ((vertType & 0xFFFFFF) == GE_VTYPE_POS_16BIT && !inds) {
255
const s16 *vtx = (const s16 *)vdata;
256
for (int i = 0; i < vertexCount * 3; i++) {
257
verts[i] = vtx[i] * (1.0f / 32768.0f);
258
}
259
} else {
260
// Simplify away indices, bones, and morph before proceeding.
261
u8 *temp_buffer = decoded_ + 65536 * 24;
262
263
if ((inds || (vertType & (GE_VTYPE_WEIGHT_MASK | GE_VTYPE_MORPHCOUNT_MASK)))) {
264
u16 indexLowerBound = 0;
265
u16 indexUpperBound = (u16)vertexCount - 1;
266
267
if (vertexCount > 0 && inds) {
268
GetIndexBounds(inds, vertexCount, vertType, &indexLowerBound, &indexUpperBound);
269
}
270
// TODO: Avoid normalization if just plain skinning.
271
// Force software skinning.
272
const u32 vertTypeID = GetVertTypeID(vertType, gstate.getUVGenMode(), true);
273
::NormalizeVertices(corners, temp_buffer, (const u8 *)vdata, indexLowerBound, indexUpperBound, dec, vertType);
274
IndexConverter conv(vertType, inds);
275
for (int i = 0; i < vertexCount; i++) {
276
verts[i * 3] = corners[conv(i)].pos.x;
277
verts[i * 3 + 1] = corners[conv(i)].pos.y;
278
verts[i * 3 + 2] = corners[conv(i)].pos.z;
279
}
280
} else {
281
// Simple, most common case.
282
int stride = dec->VertexSize();
283
int offset = dec->posoff;
284
switch (vertType & GE_VTYPE_POS_MASK) {
285
case GE_VTYPE_POS_8BIT:
286
for (int i = 0; i < vertexCount; i++) {
287
const s8 *data = (const s8 *)vdata + i * stride + offset;
288
for (int j = 0; j < 3; j++) {
289
verts[i * 3 + j] = data[j] * (1.0f / 128.0f);
290
}
291
}
292
break;
293
case GE_VTYPE_POS_16BIT:
294
for (int i = 0; i < vertexCount; i++) {
295
const s16 *data = ((const s16 *)((const s8 *)vdata + i * stride + offset));
296
for (int j = 0; j < 3; j++) {
297
verts[i * 3 + j] = data[j] * (1.0f / 32768.0f);
298
}
299
}
300
break;
301
case GE_VTYPE_POS_FLOAT:
302
for (int i = 0; i < vertexCount; i++)
303
memcpy(&verts[i * 3], (const u8 *)vdata + stride * i + offset, sizeof(float) * 3);
304
break;
305
}
306
}
307
}
308
309
// Pretransform the verts in-place so we don't have to do it inside the loop.
310
// We do this differently in the fast version below since we skip the max/minOffset checks there
311
// making it easier to get the whole thing ready for SIMD.
312
for (int i = 0; i < vertexCount; i++) {
313
float worldpos[3];
314
Vec3ByMatrix43(worldpos, &verts[i * 3], gstate.worldMatrix);
315
memcpy(&verts[i * 3], worldpos, 12);
316
}
317
318
// Note: near/far are not checked without clamp/clip enabled, so we skip those planes.
319
int totalPlanes = gstate.isDepthClampEnabled() ? 6 : 4;
320
for (int plane = 0; plane < totalPlanes; plane++) {
321
int inside = 0;
322
int out = 0;
323
for (int i = 0; i < vertexCount; i++) {
324
// Test against the frustum planes, and count.
325
// TODO: We should test 4 vertices at a time using SIMD.
326
// I guess could also test one vertex against 4 planes at a time, though a lot of waste at the common case of 6.
327
const float *worldpos = verts + i * 3;
328
float value = planes_.Test(plane, worldpos);
329
if (value <= -FLT_EPSILON) // Not sure why we use exactly this value. Probably '< 0' would do.
330
out++;
331
else
332
inside++;
333
}
334
335
// No vertices inside this one plane? Don't need to draw.
336
if (inside == 0) {
337
// All out - but check for X and Y if the offset was near the cullbox edge.
338
bool outsideEdge = false;
339
switch (plane) {
340
case 0: outsideEdge = maxOffset_.x >= 4096.0f; break;
341
case 1: outsideEdge = minOffset_.x < 1.0f; break;
342
case 2: outsideEdge = minOffset_.y < 1.0f; break;
343
case 3: outsideEdge = maxOffset_.y >= 4096.0f; break;
344
}
345
346
// Only consider this outside if offset + scissor/region is fully inside the cullbox.
347
if (!outsideEdge)
348
return false;
349
}
350
351
// Any out. For testing that the planes are in the right locations.
352
// if (out != 0) return false;
353
}
354
return true;
355
}
356
357
// NOTE: This doesn't handle through-mode, indexing, morph, or skinning.
358
// TODO: For high vertex counts, we should just take the min/max of all the verts, and test the resulting six cube
359
// corners. That way we can cull more draws quite cheaply.
360
// We could take the min/max during the regular vertex decode, and just skip the draw call if it's trivially culled.
361
// This would help games like Midnight Club (that one does a lot of out-of-bounds drawing) immensely.
362
bool DrawEngineCommon::TestBoundingBoxFast(const void *vdata, int vertexCount, const VertexDecoder *dec, u32 vertType) {
363
SimpleVertex *corners = (SimpleVertex *)(decoded_ + 65536 * 12);
364
float *verts = (float *)(decoded_ + 65536 * 18);
365
366
// Although this may lead to drawing that shouldn't happen, the viewport is more complex on VR.
367
// Let's always say objects are within bounds.
368
if (gstate_c.Use(GPU_USE_VIRTUAL_REALITY))
369
return true;
370
371
// Due to world matrix updates per "thing", this isn't quite as effective as it could be if we did world transform
372
// in here as well. Though, it still does cut down on a lot of updates in Tekken 6.
373
if (gstate_c.IsDirty(DIRTY_CULL_PLANES)) {
374
UpdatePlanes();
375
gpuStats.numPlaneUpdates++;
376
gstate_c.Clean(DIRTY_CULL_PLANES);
377
}
378
379
// Also let's just bail if offsetOutsideEdge_ is set, instead of handling the cases.
380
// NOTE: This is written to in UpdatePlanes so can't check it before.
381
if (offsetOutsideEdge_)
382
return true;
383
384
// Simple, most common case.
385
int stride = dec->VertexSize();
386
int offset = dec->posoff;
387
int vertStride = 3;
388
389
// TODO: Possibly do the plane tests directly against the source formats instead of converting.
390
switch (vertType & GE_VTYPE_POS_MASK) {
391
case GE_VTYPE_POS_8BIT:
392
for (int i = 0; i < vertexCount; i++) {
393
const s8 *data = (const s8 *)vdata + i * stride + offset;
394
for (int j = 0; j < 3; j++) {
395
verts[i * 3 + j] = data[j] * (1.0f / 128.0f);
396
}
397
}
398
break;
399
case GE_VTYPE_POS_16BIT:
400
{
401
#if PPSSPP_ARCH(SSE2)
402
__m128 scaleFactor = _mm_set1_ps(1.0f / 32768.0f);
403
for (int i = 0; i < vertexCount; i++) {
404
const s16 *data = ((const s16 *)((const s8 *)vdata + i * stride + offset));
405
__m128i bits = _mm_loadl_epi64((const __m128i*)data);
406
// Sign extension. Hacky without SSE4.
407
bits = _mm_srai_epi32(_mm_unpacklo_epi16(bits, bits), 16);
408
__m128 pos = _mm_mul_ps(_mm_cvtepi32_ps(bits), scaleFactor);
409
_mm_storeu_ps(verts + i * 3, pos); // TODO: use stride 4 to avoid clashing writes?
410
}
411
#elif PPSSPP_ARCH(ARM_NEON)
412
for (int i = 0; i < vertexCount; i++) {
413
const s16 *dataPtr = ((const s16 *)((const s8 *)vdata + i * stride + offset));
414
int32x4_t data = vmovl_s16(vld1_s16(dataPtr));
415
float32x4_t pos = vcvtq_n_f32_s32(data, 15); // >> 15 = division by 32768.0f
416
vst1q_f32(verts + i * 3, pos);
417
}
418
#else
419
for (int i = 0; i < vertexCount; i++) {
420
const s16 *data = ((const s16 *)((const s8 *)vdata + i * stride + offset));
421
for (int j = 0; j < 3; j++) {
422
verts[i * 3 + j] = data[j] * (1.0f / 32768.0f);
423
}
424
}
425
#endif
426
break;
427
}
428
case GE_VTYPE_POS_FLOAT:
429
// No need to copy in this case, we can just read directly from the source format with a stride.
430
verts = (float *)((uint8_t *)vdata + offset);
431
vertStride = stride / 4;
432
break;
433
}
434
435
// We only check the 4 sides. Near/far won't likely make a huge difference.
436
// We test one vertex against 4 planes to get some SIMD. Vertices need to be transformed to world space
437
// for testing, don't want to re-do that, so we have to use that "pivot" of the data.
438
#if PPSSPP_ARCH(SSE2)
439
const __m128 worldX = _mm_loadu_ps(gstate.worldMatrix);
440
const __m128 worldY = _mm_loadu_ps(gstate.worldMatrix + 3);
441
const __m128 worldZ = _mm_loadu_ps(gstate.worldMatrix + 6);
442
const __m128 worldW = _mm_loadu_ps(gstate.worldMatrix + 9);
443
const __m128 planeX = _mm_loadu_ps(planes_.x);
444
const __m128 planeY = _mm_loadu_ps(planes_.y);
445
const __m128 planeZ = _mm_loadu_ps(planes_.z);
446
const __m128 planeW = _mm_loadu_ps(planes_.w);
447
__m128 inside = _mm_set1_ps(0.0f);
448
for (int i = 0; i < vertexCount; i++) {
449
const float *pos = verts + i * vertStride;
450
__m128 worldpos = _mm_add_ps(
451
_mm_add_ps(
452
_mm_mul_ps(worldX, _mm_set1_ps(pos[0])),
453
_mm_mul_ps(worldY, _mm_set1_ps(pos[1]))
454
),
455
_mm_add_ps(
456
_mm_mul_ps(worldZ, _mm_set1_ps(pos[2])),
457
worldW
458
)
459
);
460
// OK, now we check it against the four planes.
461
// This is really curiously similar to a matrix multiplication (well, it is one).
462
__m128 posX = _mm_shuffle_ps(worldpos, worldpos, _MM_SHUFFLE(0, 0, 0, 0));
463
__m128 posY = _mm_shuffle_ps(worldpos, worldpos, _MM_SHUFFLE(1, 1, 1, 1));
464
__m128 posZ = _mm_shuffle_ps(worldpos, worldpos, _MM_SHUFFLE(2, 2, 2, 2));
465
__m128 planeDist = _mm_add_ps(
466
_mm_add_ps(
467
_mm_mul_ps(planeX, posX),
468
_mm_mul_ps(planeY, posY)
469
),
470
_mm_add_ps(
471
_mm_mul_ps(planeZ, posZ),
472
planeW
473
)
474
);
475
inside = _mm_or_ps(inside, _mm_cmpge_ps(planeDist, _mm_setzero_ps()));
476
}
477
// 0xF means that we found at least one vertex inside every one of the planes.
478
// We don't bother with counts, though it wouldn't be hard if we had a use for them.
479
return _mm_movemask_ps(inside) == 0xF;
480
#elif PPSSPP_ARCH(ARM_NEON)
481
const float32x4_t worldX = vld1q_f32(gstate.worldMatrix);
482
const float32x4_t worldY = vld1q_f32(gstate.worldMatrix + 3);
483
const float32x4_t worldZ = vld1q_f32(gstate.worldMatrix + 6);
484
const float32x4_t worldW = vld1q_f32(gstate.worldMatrix + 9);
485
const float32x4_t planeX = vld1q_f32(planes_.x);
486
const float32x4_t planeY = vld1q_f32(planes_.y);
487
const float32x4_t planeZ = vld1q_f32(planes_.z);
488
const float32x4_t planeW = vld1q_f32(planes_.w);
489
uint32x4_t inside = vdupq_n_u32(0);
490
for (int i = 0; i < vertexCount; i++) {
491
const float *pos = verts + i * vertStride;
492
float32x4_t objpos = vld1q_f32(pos);
493
float32x4_t worldpos = vaddq_f32(
494
vmlaq_laneq_f32(
495
vmulq_laneq_f32(worldX, objpos, 0),
496
worldY, objpos, 1),
497
vmlaq_laneq_f32(worldW, worldZ, objpos, 2)
498
);
499
// OK, now we check it against the four planes.
500
// This is really curiously similar to a matrix multiplication (well, it is one).
501
float32x4_t planeDist = vaddq_f32(
502
vmlaq_laneq_f32(
503
vmulq_laneq_f32(planeX, worldpos, 0),
504
planeY, worldpos, 1),
505
vmlaq_laneq_f32(planeW, planeZ, worldpos, 2)
506
);
507
inside = vorrq_u32(inside, vcgezq_f32(planeDist));
508
}
509
uint64_t insideBits = vget_lane_u64(vreinterpret_u64_u16(vmovn_u32(inside)), 0);
510
return ~insideBits == 0; // InsideBits all ones means that we found at least one vertex inside every one of the planes. We don't bother with counts, though it wouldn't be hard.
511
#else
512
int inside[4]{};
513
for (int i = 0; i < vertexCount; i++) {
514
const float *pos = verts + i * vertStride;
515
float worldpos[3];
516
Vec3ByMatrix43(worldpos, pos, gstate.worldMatrix);
517
for (int plane = 0; plane < 4; plane++) {
518
float value = planes_.Test(plane, worldpos);
519
if (value >= 0.0f)
520
inside[plane]++;
521
}
522
}
523
524
for (int plane = 0; plane < 4; plane++) {
525
if (inside[plane] == 0) {
526
return false;
527
}
528
}
529
#endif
530
return true;
531
}
532
533
// 2D bounding box test against scissor. No indexing yet.
534
// Only supports non-indexed draws with float positions.
535
bool DrawEngineCommon::TestBoundingBoxThrough(const void *vdata, int vertexCount, const VertexDecoder *dec, u32 vertType, int *bytesRead) {
536
// Grab temp buffer space from large offsets in decoded_. Not exactly safe for large draws.
537
if (vertexCount > 16) {
538
return true;
539
}
540
541
// Although this may lead to drawing that shouldn't happen, the viewport is more complex on VR.
542
// Let's always say objects are within bounds.
543
if (gstate_c.Use(GPU_USE_VIRTUAL_REALITY))
544
return true;
545
546
const int stride = dec->VertexSize();
547
const int posOffset = dec->posoff;
548
549
*bytesRead = stride * vertexCount;
550
551
bool allOutsideLeft = true;
552
bool allOutsideTop = true;
553
bool allOutsideRight = true;
554
bool allOutsideBottom = true;
555
const float left = gstate.getScissorX1();
556
const float top = gstate.getScissorY1();
557
const float right = gstate.getScissorX2();
558
const float bottom = gstate.getScissorY2();
559
560
switch (vertType & GE_VTYPE_POS_MASK) {
561
case GE_VTYPE_POS_FLOAT:
562
{
563
// TODO: This can be SIMD'd, with some trickery.
564
for (int i = 0; i < vertexCount; i++) {
565
const float *pos = (const float*)((const u8 *)vdata + stride * i + posOffset);
566
const float x = pos[0];
567
const float y = pos[1];
568
if (x >= left) {
569
allOutsideLeft = false;
570
}
571
if (x <= right + 1) {
572
allOutsideRight = false;
573
}
574
if (y >= top) {
575
allOutsideTop = false;
576
}
577
if (y <= bottom + 1) {
578
allOutsideBottom = false;
579
}
580
}
581
if (allOutsideLeft || allOutsideTop || allOutsideRight || allOutsideBottom) {
582
return false;
583
}
584
return true;
585
}
586
default:
587
// Shouldn't end up here with the checks outside this function.
588
_dbg_assert_(false);
589
return true;
590
}
591
}
592
593
void DrawEngineCommon::ApplyFramebufferRead(FBOTexState *fboTexState) {
594
if (gstate_c.Use(GPU_USE_FRAMEBUFFER_FETCH)) {
595
*fboTexState = FBO_TEX_READ_FRAMEBUFFER;
596
} else {
597
gpuStats.numCopiesForShaderBlend++;
598
*fboTexState = FBO_TEX_COPY_BIND_TEX;
599
}
600
gstate_c.Dirty(DIRTY_SHADERBLEND);
601
}
602
603
int DrawEngineCommon::ComputeNumVertsToDecode() const {
604
int sum = 0;
605
for (int i = 0; i < numDrawVerts_; i++) {
606
sum += drawVerts_[i].indexUpperBound + 1 - drawVerts_[i].indexLowerBound;
607
}
608
return sum;
609
}
610
611
// Takes a list of consecutive PRIM opcodes, and extends the current draw call to include them.
612
// This is just a performance optimization.
613
int DrawEngineCommon::ExtendNonIndexedPrim(const uint32_t *cmd, const uint32_t *stall, const VertexDecoder *dec, u32 vertTypeID, bool clockwise, int *bytesRead, bool isTriangle) {
614
const uint32_t *start = cmd;
615
int prevDrawVerts = numDrawVerts_ - 1;
616
DeferredVerts &dv = drawVerts_[prevDrawVerts];
617
int offset = dv.vertexCount;
618
619
_dbg_assert_(numDrawInds_ <= MAX_DEFERRED_DRAW_INDS); // if it's equal, the check below will take care of it before any action is taken.
620
_dbg_assert_(numDrawVerts_ > 0);
621
622
if (!clockwise) {
623
anyCCWOrIndexed_ = true;
624
}
625
int seenPrims = 0;
626
int numDrawInds = numDrawInds_;
627
while (cmd != stall) {
628
uint32_t data = *cmd;
629
if ((data & 0xFFF80000) != 0x04000000) {
630
break;
631
}
632
GEPrimitiveType newPrim = static_cast<GEPrimitiveType>((data >> 16) & 7);
633
if (IsTrianglePrim(newPrim) != isTriangle)
634
break;
635
int vertexCount = data & 0xFFFF;
636
if (numDrawInds >= MAX_DEFERRED_DRAW_INDS || vertexCountInDrawCalls_ + offset + vertexCount > VERTEX_BUFFER_MAX) {
637
break;
638
}
639
DeferredInds &di = drawInds_[numDrawInds++];
640
di.indexType = 0;
641
di.prim = newPrim;
642
seenPrims |= (1 << newPrim);
643
di.clockwise = clockwise;
644
di.vertexCount = vertexCount;
645
di.vertDecodeIndex = prevDrawVerts;
646
di.offset = offset;
647
offset += vertexCount;
648
cmd++;
649
}
650
numDrawInds_ = numDrawInds;
651
seenPrims_ |= seenPrims;
652
653
int totalCount = offset - dv.vertexCount;
654
dv.vertexCount = offset;
655
dv.indexUpperBound = dv.vertexCount - 1;
656
vertexCountInDrawCalls_ += totalCount;
657
*bytesRead = totalCount * dec->VertexSize();
658
return cmd - start;
659
}
660
661
void DrawEngineCommon::SkipPrim(GEPrimitiveType prim, int vertexCount, const VertexDecoder *dec, u32 vertTypeID, int *bytesRead) {
662
if (!indexGen.PrimCompatible(prevPrim_, prim)) {
663
Flush();
664
}
665
666
// This isn't exactly right, if we flushed, since prims can straddle previous calls.
667
// But it generally works for common usage.
668
if (prim == GE_PRIM_KEEP_PREVIOUS) {
669
// Has to be set to something, let's assume POINTS (0) if no previous.
670
if (prevPrim_ == GE_PRIM_INVALID)
671
prevPrim_ = GE_PRIM_POINTS;
672
prim = prevPrim_;
673
} else {
674
prevPrim_ = prim;
675
}
676
677
*bytesRead = vertexCount * dec->VertexSize();
678
}
679
680
// vertTypeID is the vertex type but with the UVGen mode smashed into the top bits.
681
bool DrawEngineCommon::SubmitPrim(const void *verts, const void *inds, GEPrimitiveType prim, int vertexCount, const VertexDecoder *dec, u32 vertTypeID, bool clockwise, int *bytesRead) {
682
if (!indexGen.PrimCompatible(prevPrim_, prim) || numDrawVerts_ >= MAX_DEFERRED_DRAW_VERTS || numDrawInds_ >= MAX_DEFERRED_DRAW_INDS || vertexCountInDrawCalls_ + vertexCount > VERTEX_BUFFER_MAX) {
683
Flush();
684
}
685
_dbg_assert_(numDrawVerts_ < MAX_DEFERRED_DRAW_VERTS);
686
_dbg_assert_(numDrawInds_ < MAX_DEFERRED_DRAW_INDS);
687
688
// This isn't exactly right, if we flushed, since prims can straddle previous calls.
689
// But it generally works for common usage.
690
if (prim == GE_PRIM_KEEP_PREVIOUS) {
691
// Has to be set to something, let's assume POINTS (0) if no previous.
692
if (prevPrim_ == GE_PRIM_INVALID)
693
prevPrim_ = GE_PRIM_POINTS;
694
prim = prevPrim_;
695
} else {
696
prevPrim_ = prim;
697
}
698
699
// If vtype has changed, setup the vertex decoder. Don't need to nullcheck dec_ since we set lastVType_ to an invalid value whenever we null it.
700
if (vertTypeID != lastVType_) {
701
dec_ = dec;
702
_dbg_assert_(dec->VertexType() == vertTypeID);
703
lastVType_ = vertTypeID;
704
} else {
705
_dbg_assert_(dec_->VertexType() == lastVType_);
706
}
707
708
*bytesRead = vertexCount * dec_->VertexSize();
709
710
// Check that we have enough vertices to form the requested primitive.
711
if (vertexCount < 3) {
712
if ((vertexCount < 2 && prim > 0) || (prim > GE_PRIM_LINE_STRIP && prim != GE_PRIM_RECTANGLES)) {
713
return false;
714
}
715
if (vertexCount <= 0) {
716
// Unfortunately we need to do this check somewhere since GetIndexBounds doesn't handle zero-length arrays.
717
return false;
718
}
719
} else if (prim == GE_PRIM_TRIANGLES) {
720
// Make sure the vertex count is divisible by 3, round down. See issue #7503
721
const int rem = vertexCount % 3;
722
if (rem != 0) {
723
vertexCount -= rem;
724
}
725
}
726
727
bool applySkin = dec_->skinInDecode;
728
729
DeferredInds &di = drawInds_[numDrawInds_++];
730
_dbg_assert_(numDrawInds_ <= MAX_DEFERRED_DRAW_INDS);
731
732
di.inds = inds;
733
int indexType = (vertTypeID & GE_VTYPE_IDX_MASK) >> GE_VTYPE_IDX_SHIFT;
734
if (indexType) {
735
anyCCWOrIndexed_ = true;
736
}
737
di.indexType = indexType;
738
di.prim = prim;
739
di.clockwise = clockwise;
740
if (!clockwise) {
741
anyCCWOrIndexed_ = true;
742
}
743
di.vertexCount = vertexCount;
744
const int numDrawVerts = numDrawVerts_;
745
di.vertDecodeIndex = numDrawVerts;
746
di.offset = 0;
747
748
_dbg_assert_(numDrawVerts <= MAX_DEFERRED_DRAW_VERTS);
749
750
if (inds && numDrawVerts > decodeVertsCounter_ && drawVerts_[numDrawVerts - 1].verts == verts && !applySkin) {
751
// Same vertex pointer as a previous un-decoded draw call - let's just extend the decode!
752
di.vertDecodeIndex = numDrawVerts - 1;
753
u16 lb;
754
u16 ub;
755
GetIndexBounds(inds, vertexCount, vertTypeID, &lb, &ub);
756
DeferredVerts &dv = drawVerts_[numDrawVerts - 1];
757
if (lb < dv.indexLowerBound)
758
dv.indexLowerBound = lb;
759
if (ub > dv.indexUpperBound)
760
dv.indexUpperBound = ub;
761
} else {
762
// Record a new draw, and a new index gen.
763
DeferredVerts &dv = drawVerts_[numDrawVerts];
764
numDrawVerts_ = numDrawVerts + 1; // Increment the uncached variable
765
dv.verts = verts;
766
dv.vertexCount = vertexCount;
767
dv.uvScale = gstate_c.uv;
768
// Does handle the unindexed case.
769
GetIndexBounds(inds, vertexCount, vertTypeID, &dv.indexLowerBound, &dv.indexUpperBound);
770
}
771
772
vertexCountInDrawCalls_ += vertexCount;
773
seenPrims_ |= (1 << prim);
774
775
if (prim == GE_PRIM_RECTANGLES && (gstate.getTextureAddress(0) & 0x3FFFFFFF) == (gstate.getFrameBufAddress() & 0x3FFFFFFF)) {
776
// This prevents issues with consecutive self-renders in Ridge Racer.
777
gstate_c.Dirty(DIRTY_TEXTURE_PARAMS);
778
Flush();
779
}
780
return true;
781
}
782
783
void DrawEngineCommon::BeginFrame() {
784
applySkinInDecode_ = g_Config.bSoftwareSkinning;
785
}
786
787
void DrawEngineCommon::DecodeVerts(const VertexDecoder *dec, u8 *dest) {
788
const int numDrawVerts = numDrawVerts_;
789
if (!numDrawVerts) {
790
return;
791
}
792
// Note that this should be able to continue a partial decode - we don't necessarily start from zero here (although we do most of the time).
793
int i = decodeVertsCounter_;
794
const int stride = (int)dec->GetDecVtxFmt().stride;
795
int numDecodedVerts = numDecodedVerts_; // Move to a local for better codegen.
796
for (; i < numDrawVerts; i++) {
797
const DeferredVerts &dv = drawVerts_[i];
798
799
const int indexLowerBound = dv.indexLowerBound;
800
drawVertexOffsets_[i] = numDecodedVerts - indexLowerBound;
801
const int indexUpperBound = dv.indexUpperBound;
802
const int count = indexUpperBound - indexLowerBound + 1;
803
if (count + numDecodedVerts >= VERTEX_BUFFER_MAX) {
804
// Hit our limit! Stop decoding in this draw.
805
break;
806
}
807
808
// Decode the verts (and at the same time apply morphing/skinning). Simple.
809
const u8 *startPos = (const u8 *)dv.verts + indexLowerBound * dec->VertexSize();
810
dec->DecodeVerts(dest + numDecodedVerts * stride, startPos, &dv.uvScale, count);
811
numDecodedVerts += count;
812
}
813
numDecodedVerts_ = numDecodedVerts;
814
decodeVertsCounter_ = i;
815
}
816
817
int DrawEngineCommon::DecodeInds() {
818
// Note that this should be able to continue a partial decode - we don't necessarily start from zero here (although we do most of the time).
819
820
int i = decodeIndsCounter_;
821
for (; i < numDrawInds_; i++) {
822
const DeferredInds &di = drawInds_[i];
823
824
const int indexOffset = drawVertexOffsets_[di.vertDecodeIndex] + di.offset;
825
const bool clockwise = di.clockwise;
826
// We've already collapsed subsequent draws with the same vertex pointer, so no tricky logic here anymore.
827
// 2. Loop through the drawcalls, translating indices as we go.
828
switch (di.indexType) {
829
case GE_VTYPE_IDX_NONE >> GE_VTYPE_IDX_SHIFT:
830
indexGen.AddPrim(di.prim, di.vertexCount, indexOffset, clockwise);
831
break;
832
case GE_VTYPE_IDX_8BIT >> GE_VTYPE_IDX_SHIFT:
833
indexGen.TranslatePrim(di.prim, di.vertexCount, (const u8 *)di.inds, indexOffset, clockwise);
834
break;
835
case GE_VTYPE_IDX_16BIT >> GE_VTYPE_IDX_SHIFT:
836
indexGen.TranslatePrim(di.prim, di.vertexCount, (const u16_le *)di.inds, indexOffset, clockwise);
837
break;
838
case GE_VTYPE_IDX_32BIT >> GE_VTYPE_IDX_SHIFT:
839
indexGen.TranslatePrim(di.prim, di.vertexCount, (const u32_le *)di.inds, indexOffset, clockwise);
840
break;
841
}
842
}
843
decodeIndsCounter_ = i;
844
845
return indexGen.VertexCount();
846
}
847
848
bool DrawEngineCommon::CanUseHardwareTransform(int prim) const {
849
if (!useHWTransform_)
850
return false;
851
return !gstate.isModeThrough() && prim != GE_PRIM_RECTANGLES && prim > GE_PRIM_LINE_STRIP;
852
}
853
854
bool DrawEngineCommon::CanUseHardwareTessellation(GEPatchPrimType prim) const {
855
if (useHWTessellation_) {
856
return CanUseHardwareTransform(PatchPrimToPrim(prim));
857
}
858
return false;
859
}
860
861
void TessellationDataTransfer::CopyControlPoints(float *pos, float *tex, float *col, int posStride, int texStride, int colStride, const SimpleVertex *const *points, int size, u32 vertType) {
862
bool hasColor = (vertType & GE_VTYPE_COL_MASK) != 0;
863
bool hasTexCoord = (vertType & GE_VTYPE_TC_MASK) != 0;
864
865
for (int i = 0; i < size; ++i) {
866
memcpy(pos, points[i]->pos.AsArray(), 3 * sizeof(float));
867
pos += posStride;
868
}
869
if (hasTexCoord) {
870
for (int i = 0; i < size; ++i) {
871
memcpy(tex, points[i]->uv, 2 * sizeof(float));
872
tex += texStride;
873
}
874
}
875
if (hasColor) {
876
for (int i = 0; i < size; ++i) {
877
memcpy(col, Vec4f::FromRGBA(points[i]->color_32).AsArray(), 4 * sizeof(float));
878
col += colStride;
879
}
880
}
881
}
882
883
bool DrawEngineCommon::DescribeCodePtr(const u8 *ptr, std::string &name) const {
884
if (!decJitCache_ || !decJitCache_->IsInSpace(ptr)) {
885
return false;
886
}
887
888
// Loop through all the decoders and see if we have a match.
889
VertexDecoder *found = nullptr;
890
u32 foundKey;
891
892
decoderMap_.Iterate([&](u32 key, VertexDecoder *value) {
893
if (!found) {
894
if (value->IsInSpace(ptr)) {
895
foundKey = key;
896
found = value;
897
}
898
}
899
});
900
901
if (found) {
902
char temp[256];
903
found->ToString(temp, false);
904
name = temp;
905
snprintf(temp, sizeof(temp), "_%08X", foundKey);
906
name += temp;
907
return true;
908
} else {
909
return false;
910
}
911
}
912
913
enum {
914
DEPTH_TRANSFORMED_MAX_VERTS = VERTEX_BUFFER_MAX,
915
DEPTH_TRANSFORMED_BYTES = DEPTH_TRANSFORMED_MAX_VERTS * 4 * sizeof(float),
916
DEPTH_SCREENVERTS_COMPONENT_COUNT = VERTEX_BUFFER_MAX,
917
DEPTH_SCREENVERTS_COMPONENT_BYTES = DEPTH_SCREENVERTS_COMPONENT_COUNT * sizeof(int) + 384,
918
DEPTH_SCREENVERTS_TOTAL_BYTES = DEPTH_SCREENVERTS_COMPONENT_BYTES * 3,
919
DEPTH_INDEXBUFFER_BYTES = DEPTH_TRANSFORMED_MAX_VERTS * 3 * sizeof(uint16_t), // hmmm
920
};
921
922
// We process vertices for depth rendering in several stages:
923
// First, we transform and collect vertices into depthTransformed_ (4-vectors, xyzw).
924
// Then, we group and cull the vertices into four-triangle groups, which are placed in
925
// depthScreenVerts_, with x, y and z separated into different part of the array.
926
// (Alternatively, if drawing rectangles, they're just added linearly).
927
// After that, we send these groups out for SIMD setup and rasterization.
928
void DrawEngineCommon::InitDepthRaster() {
929
switch ((DepthRasterMode)g_Config.iDepthRasterMode) {
930
case DepthRasterMode::DEFAULT:
931
case DepthRasterMode::LOW_QUALITY:
932
useDepthRaster_ = PSP_CoreParameter().compat.flags().SoftwareRasterDepth;
933
break;
934
case DepthRasterMode::FORCE_ON:
935
useDepthRaster_ = true;
936
break;
937
case DepthRasterMode::OFF:
938
useDepthRaster_ = false;
939
}
940
941
if (useDepthRaster_) {
942
depthDraws_.reserve(256);
943
depthTransformed_ = (float *)AllocateMemoryPages(DEPTH_TRANSFORMED_BYTES, MEM_PROT_READ | MEM_PROT_WRITE);
944
depthScreenVerts_ = (int *)AllocateMemoryPages(DEPTH_SCREENVERTS_TOTAL_BYTES, MEM_PROT_READ | MEM_PROT_WRITE);
945
depthIndices_ = (uint16_t *)AllocateMemoryPages(DEPTH_INDEXBUFFER_BYTES, MEM_PROT_READ | MEM_PROT_WRITE);
946
}
947
}
948
949
void DrawEngineCommon::ShutdownDepthRaster() {
950
if (depthTransformed_) {
951
FreeMemoryPages(depthTransformed_, DEPTH_TRANSFORMED_BYTES);
952
}
953
if (depthScreenVerts_) {
954
FreeMemoryPages(depthScreenVerts_, DEPTH_SCREENVERTS_TOTAL_BYTES);
955
}
956
if (depthIndices_) {
957
FreeMemoryPages(depthIndices_, DEPTH_INDEXBUFFER_BYTES);
958
}
959
}
960
961
Mat4F32 ComputeFinalProjMatrix() {
962
const float viewportTranslate[4] = {
963
gstate.getViewportXCenter() - gstate.getOffsetX(),
964
gstate.getViewportYCenter() - gstate.getOffsetY(),
965
gstate.getViewportZCenter(),
966
0.0f,
967
};
968
969
Mat4F32 wv = Mul4x3By4x4(Mat4x3F32(gstate.worldMatrix), Mat4F32::Load4x3(gstate.viewMatrix));
970
Mat4F32 m = Mul4x4By4x4(wv, Mat4F32(gstate.projMatrix));
971
// NOTE: Applying the translation actually works pre-divide, since W is also affected.
972
Vec4F32 scale = Vec4F32::LoadF24x3_One(&gstate.viewportxscale);
973
Vec4F32 translate = Vec4F32::Load(viewportTranslate);
974
TranslateAndScaleInplace(m, scale, translate);
975
return m;
976
}
977
978
bool DrawEngineCommon::CalculateDepthDraw(DepthDraw *draw, GEPrimitiveType prim, int vertexCount) {
979
switch (prim) {
980
case GE_PRIM_INVALID:
981
case GE_PRIM_KEEP_PREVIOUS:
982
case GE_PRIM_LINES:
983
case GE_PRIM_LINE_STRIP:
984
case GE_PRIM_POINTS:
985
return false;
986
default:
987
break;
988
}
989
990
// Ignore some useless compare modes.
991
switch (gstate.getDepthTestFunction()) {
992
case GE_COMP_ALWAYS:
993
draw->compareMode = ZCompareMode::Always;
994
break;
995
case GE_COMP_LEQUAL:
996
case GE_COMP_LESS:
997
draw->compareMode = ZCompareMode::Less;
998
break;
999
case GE_COMP_GEQUAL:
1000
case GE_COMP_GREATER:
1001
draw->compareMode = ZCompareMode::Greater; // Most common
1002
break;
1003
case GE_COMP_NEVER:
1004
case GE_COMP_EQUAL:
1005
// These will never have a useful effect in Z-only raster.
1006
[[fallthrough]];
1007
case GE_COMP_NOTEQUAL:
1008
// This is highly unusual, let's just ignore it.
1009
[[fallthrough]];
1010
default:
1011
return false;
1012
}
1013
if (gstate.isModeClear()) {
1014
if (!gstate.isClearModeDepthMask()) {
1015
return false;
1016
}
1017
draw->compareMode = ZCompareMode::Always;
1018
} else {
1019
// These should have been caught earlier.
1020
_dbg_assert_(gstate.isDepthTestEnabled());
1021
_dbg_assert_(gstate.isDepthWriteEnabled());
1022
}
1023
1024
if (depthVertexCount_ + vertexCount >= DEPTH_TRANSFORMED_MAX_VERTS) {
1025
// Can't add more. We need to flush.
1026
return false;
1027
}
1028
1029
draw->depthAddr = gstate.getDepthBufRawAddress() | 0x04000000;
1030
draw->depthStride = gstate.DepthBufStride();
1031
draw->vertexOffset = depthVertexCount_;
1032
draw->indexOffset = depthIndexCount_;
1033
draw->vertexCount = vertexCount;
1034
draw->cullEnabled = gstate.isCullEnabled();
1035
draw->cullMode = gstate.getCullMode();
1036
draw->prim = prim;
1037
draw->scissor.x1 = gstate.getScissorX1();
1038
draw->scissor.y1 = gstate.getScissorY1();
1039
draw->scissor.x2 = gstate.getScissorX2();
1040
draw->scissor.y2 = gstate.getScissorY2();
1041
return true;
1042
}
1043
1044
void DrawEngineCommon::DepthRasterSubmitRaw(GEPrimitiveType prim, const VertexDecoder *dec, uint32_t vertTypeID, int vertexCount) {
1045
if (!gstate.isModeClear() && (!gstate.isDepthTestEnabled() || !gstate.isDepthWriteEnabled())) {
1046
return;
1047
}
1048
1049
if (vertTypeID & (GE_VTYPE_WEIGHT_MASK | GE_VTYPE_MORPHCOUNT_MASK)) {
1050
return;
1051
}
1052
1053
_dbg_assert_(prim != GE_PRIM_RECTANGLES);
1054
1055
float worldviewproj[16];
1056
ComputeFinalProjMatrix().Store(worldviewproj);
1057
1058
DepthDraw draw;
1059
if (!CalculateDepthDraw(&draw, prim, vertexCount)) {
1060
return;
1061
}
1062
1063
TimeCollector collectStat(&gpuStats.msPrepareDepth, coreCollectDebugStats);
1064
1065
// Decode.
1066
int numDecoded = 0;
1067
for (int i = 0; i < numDrawVerts_; i++) {
1068
const DeferredVerts &dv = drawVerts_[i];
1069
if (dv.indexUpperBound + 1 - dv.indexLowerBound + numDecoded >= DEPTH_TRANSFORMED_MAX_VERTS) {
1070
// Hit our limit! Stop decoding in this draw.
1071
// We should have already broken out in CalculateDepthDraw.
1072
break;
1073
}
1074
// Decode the verts (and at the same time apply morphing/skinning). Simple.
1075
DecodeAndTransformForDepthRaster(depthTransformed_ + (draw.vertexOffset + numDecoded) * 4, worldviewproj, dv.verts, dv.indexLowerBound, dv.indexUpperBound, dec, vertTypeID);
1076
numDecoded += dv.indexUpperBound - dv.indexLowerBound + 1;
1077
}
1078
1079
// Copy indices.
1080
memcpy(depthIndices_ + draw.indexOffset, decIndex_, sizeof(uint16_t) * vertexCount);
1081
1082
// Commit
1083
depthIndexCount_ += vertexCount;
1084
depthVertexCount_ += numDecoded;
1085
1086
if (depthDraws_.empty()) {
1087
rasterTimeStart_ = time_now_d();
1088
}
1089
1090
depthDraws_.push_back(draw);
1091
1092
// FlushQueuedDepth();
1093
}
1094
1095
void DrawEngineCommon::DepthRasterPredecoded(GEPrimitiveType prim, const void *inVerts, int numDecoded, const VertexDecoder *dec, int vertexCount) {
1096
if (!gstate.isModeClear() && (!gstate.isDepthTestEnabled() || !gstate.isDepthWriteEnabled())) {
1097
return;
1098
}
1099
1100
DepthDraw draw;
1101
if (!CalculateDepthDraw(&draw, prim, vertexCount)) {
1102
return;
1103
}
1104
1105
TimeCollector collectStat(&gpuStats.msPrepareDepth, coreCollectDebugStats);
1106
1107
// Make sure these have already been indexed away.
1108
_dbg_assert_(prim != GE_PRIM_TRIANGLE_STRIP && prim != GE_PRIM_TRIANGLE_FAN);
1109
1110
if (dec->throughmode) {
1111
ConvertPredecodedThroughForDepthRaster(depthTransformed_ + 4 * draw.vertexOffset, decoded_, dec, numDecoded);
1112
} else {
1113
if (dec->VertexType() & (GE_VTYPE_WEIGHT_MASK | GE_VTYPE_MORPHCOUNT_MASK)) {
1114
return;
1115
}
1116
float worldviewproj[16];
1117
ComputeFinalProjMatrix().Store(worldviewproj);
1118
TransformPredecodedForDepthRaster(depthTransformed_ + 4 * draw.vertexOffset, worldviewproj, decoded_, dec, numDecoded);
1119
}
1120
1121
// Copy indices.
1122
memcpy(depthIndices_ + draw.indexOffset, decIndex_, sizeof(uint16_t) * vertexCount);
1123
1124
// Commit
1125
depthIndexCount_ += vertexCount;
1126
depthVertexCount_ += numDecoded;
1127
1128
depthDraws_.push_back(draw);
1129
1130
if (depthDraws_.empty()) {
1131
rasterTimeStart_ = time_now_d();
1132
}
1133
// FlushQueuedDepth();
1134
}
1135
1136
void DrawEngineCommon::FlushQueuedDepth() {
1137
if (rasterTimeStart_ != 0.0) {
1138
gpuStats.msRasterTimeAvailable += time_now_d() - rasterTimeStart_;
1139
rasterTimeStart_ = 0.0;
1140
}
1141
1142
const bool collectStats = coreCollectDebugStats;
1143
const bool lowQ = g_Config.iDepthRasterMode == (int)DepthRasterMode::LOW_QUALITY;
1144
1145
for (const auto &draw : depthDraws_) {
1146
int *tx = depthScreenVerts_;
1147
int *ty = depthScreenVerts_ + DEPTH_SCREENVERTS_COMPONENT_COUNT;
1148
float *tz = (float *)(depthScreenVerts_ + DEPTH_SCREENVERTS_COMPONENT_COUNT * 2);
1149
1150
int outVertCount = 0;
1151
1152
const float *vertices = depthTransformed_ + 4 * draw.vertexOffset;
1153
const uint16_t *indices = depthIndices_ + draw.indexOffset;
1154
1155
DepthScissor tileScissor = draw.scissor.Tile(0, 1);
1156
1157
{
1158
TimeCollector collectStat(&gpuStats.msCullDepth, collectStats);
1159
switch (draw.prim) {
1160
case GE_PRIM_RECTANGLES:
1161
outVertCount = DepthRasterClipIndexedRectangles(tx, ty, tz, vertices, indices, draw, tileScissor);
1162
break;
1163
case GE_PRIM_TRIANGLES:
1164
outVertCount = DepthRasterClipIndexedTriangles(tx, ty, tz, vertices, indices, draw, tileScissor);
1165
break;
1166
default:
1167
_dbg_assert_(false);
1168
break;
1169
}
1170
}
1171
{
1172
TimeCollector collectStat(&gpuStats.msRasterizeDepth, collectStats);
1173
DepthRasterScreenVerts((uint16_t *)Memory::GetPointerWrite(draw.depthAddr), draw.depthStride, tx, ty, tz, outVertCount, draw, tileScissor, lowQ);
1174
}
1175
}
1176
1177
// Reset queue
1178
depthIndexCount_ = 0;
1179
depthVertexCount_ = 0;
1180
depthDraws_.clear();
1181
}
1182
1183