CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/GPU/Common/DrawEngineCommon.cpp
Views: 1401
1
// Copyright (c) 2013- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <cfloat>
20
21
#include "Common/Data/Convert/ColorConv.h"
22
#include "Common/Profiler/Profiler.h"
23
#include "Common/LogReporting.h"
24
#include "Common/Math/CrossSIMD.h"
25
#include "Common/Math/lin/matrix4x4.h"
26
#include "Core/Config.h"
27
#include "GPU/Common/DrawEngineCommon.h"
28
#include "GPU/Common/SplineCommon.h"
29
#include "GPU/Common/VertexDecoderCommon.h"
30
#include "GPU/ge_constants.h"
31
#include "GPU/GPUState.h"
32
33
#define QUAD_INDICES_MAX 65536
34
35
enum {
36
TRANSFORMED_VERTEX_BUFFER_SIZE = VERTEX_BUFFER_MAX * sizeof(TransformedVertex)
37
};
38
39
DrawEngineCommon::DrawEngineCommon() : decoderMap_(16) {
40
if (g_Config.bVertexDecoderJit && (g_Config.iCpuCore == (int)CPUCore::JIT || g_Config.iCpuCore == (int)CPUCore::JIT_IR)) {
41
decJitCache_ = new VertexDecoderJitCache();
42
}
43
transformed_ = (TransformedVertex *)AllocateMemoryPages(TRANSFORMED_VERTEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
44
transformedExpanded_ = (TransformedVertex *)AllocateMemoryPages(3 * TRANSFORMED_VERTEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
45
decoded_ = (u8 *)AllocateMemoryPages(DECODED_VERTEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
46
decIndex_ = (u16 *)AllocateMemoryPages(DECODED_INDEX_BUFFER_SIZE, MEM_PROT_READ | MEM_PROT_WRITE);
47
}
48
49
DrawEngineCommon::~DrawEngineCommon() {
50
FreeMemoryPages(decoded_, DECODED_VERTEX_BUFFER_SIZE);
51
FreeMemoryPages(decIndex_, DECODED_INDEX_BUFFER_SIZE);
52
FreeMemoryPages(transformed_, TRANSFORMED_VERTEX_BUFFER_SIZE);
53
FreeMemoryPages(transformedExpanded_, 3 * TRANSFORMED_VERTEX_BUFFER_SIZE);
54
delete decJitCache_;
55
decoderMap_.Iterate([&](const uint32_t vtype, VertexDecoder *decoder) {
56
delete decoder;
57
});
58
ClearSplineBezierWeights();
59
}
60
61
void DrawEngineCommon::Init() {
62
NotifyConfigChanged();
63
}
64
65
VertexDecoder *DrawEngineCommon::GetVertexDecoder(u32 vtype) {
66
VertexDecoder *dec;
67
if (decoderMap_.Get(vtype, &dec))
68
return dec;
69
dec = new VertexDecoder();
70
_assert_(dec);
71
dec->SetVertexType(vtype, decOptions_, decJitCache_);
72
decoderMap_.Insert(vtype, dec);
73
return dec;
74
}
75
76
std::vector<std::string> DrawEngineCommon::DebugGetVertexLoaderIDs() {
77
std::vector<std::string> ids;
78
decoderMap_.Iterate([&](const uint32_t vtype, VertexDecoder *decoder) {
79
std::string id;
80
id.resize(sizeof(vtype));
81
memcpy(&id[0], &vtype, sizeof(vtype));
82
ids.push_back(id);
83
});
84
return ids;
85
}
86
87
std::string DrawEngineCommon::DebugGetVertexLoaderString(std::string id, DebugShaderStringType stringType) {
88
u32 mapId;
89
memcpy(&mapId, &id[0], sizeof(mapId));
90
VertexDecoder *dec;
91
if (decoderMap_.Get(mapId, &dec)) {
92
return dec->GetString(stringType);
93
} else {
94
return "N/A";
95
}
96
}
97
98
static Vec3f ClipToScreen(const Vec4f& coords) {
99
float xScale = gstate.getViewportXScale();
100
float xCenter = gstate.getViewportXCenter();
101
float yScale = gstate.getViewportYScale();
102
float yCenter = gstate.getViewportYCenter();
103
float zScale = gstate.getViewportZScale();
104
float zCenter = gstate.getViewportZCenter();
105
106
float x = coords.x * xScale / coords.w + xCenter;
107
float y = coords.y * yScale / coords.w + yCenter;
108
float z = coords.z * zScale / coords.w + zCenter;
109
110
// 16 = 0xFFFF / 4095.9375
111
return Vec3f(x * 16 - gstate.getOffsetX16(), y * 16 - gstate.getOffsetY16(), z);
112
}
113
114
static Vec3f ScreenToDrawing(const Vec3f& coords) {
115
Vec3f ret;
116
ret.x = coords.x * (1.0f / 16.0f);
117
ret.y = coords.y * (1.0f / 16.0f);
118
ret.z = coords.z;
119
return ret;
120
}
121
122
void DrawEngineCommon::NotifyConfigChanged() {
123
if (decJitCache_)
124
decJitCache_->Clear();
125
lastVType_ = -1;
126
dec_ = nullptr;
127
decoderMap_.Iterate([&](const uint32_t vtype, VertexDecoder *decoder) {
128
delete decoder;
129
});
130
decoderMap_.Clear();
131
ClearTrackedVertexArrays();
132
133
useHWTransform_ = g_Config.bHardwareTransform;
134
useHWTessellation_ = UpdateUseHWTessellation(g_Config.bHardwareTessellation);
135
decOptions_.applySkinInDecode = g_Config.bSoftwareSkinning;
136
}
137
138
u32 DrawEngineCommon::NormalizeVertices(u8 *outPtr, u8 *bufPtr, const u8 *inPtr, int lowerBound, int upperBound, u32 vertType, int *vertexSize) {
139
const u32 vertTypeID = GetVertTypeID(vertType, gstate.getUVGenMode(), decOptions_.applySkinInDecode);
140
VertexDecoder *dec = GetVertexDecoder(vertTypeID);
141
if (vertexSize)
142
*vertexSize = dec->VertexSize();
143
return DrawEngineCommon::NormalizeVertices(outPtr, bufPtr, inPtr, dec, lowerBound, upperBound, vertType);
144
}
145
146
void DrawEngineCommon::DispatchSubmitImm(GEPrimitiveType prim, TransformedVertex *buffer, int vertexCount, int cullMode, bool continuation) {
147
// Instead of plumbing through properly (we'd need to inject these pretransformed vertices in the middle
148
// of SoftwareTransform(), which would take a lot of refactoring), we'll cheat and just turn these into
149
// through vertices.
150
// Since the only known use is Thrillville and it only uses it to clear, we just use color and pos.
151
struct ImmVertex {
152
float uv[2];
153
uint32_t color;
154
float xyz[3];
155
};
156
std::vector<ImmVertex> temp;
157
temp.resize(vertexCount);
158
uint32_t color1Used = 0;
159
for (int i = 0; i < vertexCount; i++) {
160
// Since we're sending through, scale back up to w/h.
161
temp[i].uv[0] = buffer[i].u * gstate.getTextureWidth(0);
162
temp[i].uv[1] = buffer[i].v * gstate.getTextureHeight(0);
163
temp[i].color = buffer[i].color0_32;
164
temp[i].xyz[0] = buffer[i].pos[0];
165
temp[i].xyz[1] = buffer[i].pos[1];
166
temp[i].xyz[2] = buffer[i].pos[2];
167
color1Used |= buffer[i].color1_32;
168
}
169
int vtype = GE_VTYPE_TC_FLOAT | GE_VTYPE_POS_FLOAT | GE_VTYPE_COL_8888 | GE_VTYPE_THROUGH;
170
// TODO: Handle fog and secondary color somehow?
171
172
if (gstate.isFogEnabled() && !gstate.isModeThrough()) {
173
WARN_LOG_REPORT_ONCE(geimmfog, Log::G3D, "Imm vertex used fog");
174
}
175
if (color1Used != 0 && gstate.isUsingSecondaryColor() && !gstate.isModeThrough()) {
176
WARN_LOG_REPORT_ONCE(geimmcolor1, Log::G3D, "Imm vertex used secondary color");
177
}
178
179
bool prevThrough = gstate.isModeThrough();
180
// Code checks this reg directly, not just the vtype ID.
181
if (!prevThrough) {
182
gstate.vertType |= GE_VTYPE_THROUGH;
183
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_RASTER_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_CULLRANGE);
184
}
185
186
int bytesRead;
187
uint32_t vertTypeID = GetVertTypeID(vtype, 0, decOptions_.applySkinInDecode);
188
189
bool clockwise = !gstate.isCullEnabled() || gstate.getCullMode() == cullMode;
190
SubmitPrim(&temp[0], nullptr, prim, vertexCount, vertTypeID, clockwise, &bytesRead);
191
DispatchFlush();
192
193
if (!prevThrough) {
194
gstate.vertType &= ~GE_VTYPE_THROUGH;
195
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_RASTER_STATE | DIRTY_VIEWPORTSCISSOR_STATE | DIRTY_CULLRANGE);
196
}
197
}
198
199
// Gated by DIRTY_CULL_PLANES
200
void DrawEngineCommon::UpdatePlanes() {
201
float view[16];
202
float viewproj[16];
203
ConvertMatrix4x3To4x4(view, gstate.viewMatrix);
204
Matrix4ByMatrix4(viewproj, view, gstate.projMatrix);
205
206
// Next, we need to apply viewport, scissor, region, and even offset - but only for X/Y.
207
// Note that the PSP does not clip against the viewport.
208
const Vec2f baseOffset = Vec2f(gstate.getOffsetX(), gstate.getOffsetY());
209
// Region1 (rate) is used as an X1/Y1 here, matching PSP behavior.
210
minOffset_ = baseOffset + Vec2f(std::max(gstate.getRegionRateX() - 0x100, gstate.getScissorX1()), std::max(gstate.getRegionRateY() - 0x100, gstate.getScissorY1())) - Vec2f(1.0f, 1.0f);
211
maxOffset_ = baseOffset + Vec2f(std::min(gstate.getRegionX2(), gstate.getScissorX2()), std::min(gstate.getRegionY2(), gstate.getScissorY2())) + Vec2f(1.0f, 1.0f);
212
213
// Let's not handle these special cases in the fast culler.
214
offsetOutsideEdge_ = maxOffset_.x >= 4096.0f || minOffset_.x < 1.0f || minOffset_.y < 1.0f || maxOffset_.y >= 4096.0f;
215
216
// Now let's apply the viewport to our scissor/region + offset range.
217
Vec2f inverseViewportScale = Vec2f(1.0f / gstate.getViewportXScale(), 1.0f / gstate.getViewportYScale());
218
Vec2f minViewport = (minOffset_ - Vec2f(gstate.getViewportXCenter(), gstate.getViewportYCenter())) * inverseViewportScale;
219
Vec2f maxViewport = (maxOffset_ - Vec2f(gstate.getViewportXCenter(), gstate.getViewportYCenter())) * inverseViewportScale;
220
221
Vec2f viewportInvSize = Vec2f(1.0f / (maxViewport.x - minViewport.x), 1.0f / (maxViewport.y - minViewport.y));
222
223
Lin::Matrix4x4 applyViewport{};
224
// Scale to the viewport's size.
225
applyViewport.xx = 2.0f * viewportInvSize.x;
226
applyViewport.yy = 2.0f * viewportInvSize.y;
227
applyViewport.zz = 1.0f;
228
applyViewport.ww = 1.0f;
229
// And offset to the viewport's centers.
230
applyViewport.wx = -(maxViewport.x + minViewport.x) * viewportInvSize.x;
231
applyViewport.wy = -(maxViewport.y + minViewport.y) * viewportInvSize.y;
232
233
float mtx[16];
234
Matrix4ByMatrix4(mtx, viewproj, applyViewport.m);
235
// I'm sure there's some fairly optimized way to set these.
236
planes_.Set(0, mtx[3] - mtx[0], mtx[7] - mtx[4], mtx[11] - mtx[8], mtx[15] - mtx[12]); // Right
237
planes_.Set(1, mtx[3] + mtx[0], mtx[7] + mtx[4], mtx[11] + mtx[8], mtx[15] + mtx[12]); // Left
238
planes_.Set(2, mtx[3] + mtx[1], mtx[7] + mtx[5], mtx[11] + mtx[9], mtx[15] + mtx[13]); // Bottom
239
planes_.Set(3, mtx[3] - mtx[1], mtx[7] - mtx[5], mtx[11] - mtx[9], mtx[15] - mtx[13]); // Top
240
planes_.Set(4, mtx[3] + mtx[2], mtx[7] + mtx[6], mtx[11] + mtx[10], mtx[15] + mtx[14]); // Near
241
planes_.Set(5, mtx[3] - mtx[2], mtx[7] - mtx[6], mtx[11] - mtx[10], mtx[15] - mtx[14]); // Far
242
}
243
244
// This code has plenty of potential for optimization.
245
//
246
// It does the simplest and safest test possible: If all points of a bbox is outside a single of
247
// our clipping planes, we reject the box. Tighter bounds would be desirable but would take more calculations.
248
// The name is a slight misnomer, because any bounding shape will work, not just boxes.
249
//
250
// Potential optimizations:
251
// * SIMD-ify the plane culling, and also the vertex data conversion (could even group together xxxxyyyyzzzz for example)
252
// * Compute min/max of the verts, and then compute a bounding sphere and check that against the planes.
253
// - Less accurate, but..
254
// - Only requires six plane evaluations then.
255
256
bool DrawEngineCommon::TestBoundingBox(const void *vdata, const void *inds, int vertexCount, u32 vertType) {
257
// Grab temp buffer space from large offsets in decoded_. Not exactly safe for large draws.
258
if (vertexCount > 1024) {
259
return true;
260
}
261
262
SimpleVertex *corners = (SimpleVertex *)(decoded_ + 65536 * 12);
263
float *verts = (float *)(decoded_ + 65536 * 18);
264
265
// Although this may lead to drawing that shouldn't happen, the viewport is more complex on VR.
266
// Let's always say objects are within bounds.
267
if (gstate_c.Use(GPU_USE_VIRTUAL_REALITY))
268
return true;
269
270
// Due to world matrix updates per "thing", this isn't quite as effective as it could be if we did world transform
271
// in here as well. Though, it still does cut down on a lot of updates in Tekken 6.
272
if (gstate_c.IsDirty(DIRTY_CULL_PLANES)) {
273
UpdatePlanes();
274
gpuStats.numPlaneUpdates++;
275
gstate_c.Clean(DIRTY_CULL_PLANES);
276
}
277
278
// Try to skip NormalizeVertices if it's pure positions. No need to bother with a vertex decoder
279
// and a large vertex format.
280
if ((vertType & 0xFFFFFF) == GE_VTYPE_POS_FLOAT && !inds) {
281
memcpy(verts, vdata, sizeof(float) * 3 * vertexCount);
282
} else if ((vertType & 0xFFFFFF) == GE_VTYPE_POS_8BIT && !inds) {
283
const s8 *vtx = (const s8 *)vdata;
284
for (int i = 0; i < vertexCount * 3; i++) {
285
verts[i] = vtx[i] * (1.0f / 128.0f);
286
}
287
} else if ((vertType & 0xFFFFFF) == GE_VTYPE_POS_16BIT && !inds) {
288
const s16 *vtx = (const s16 *)vdata;
289
for (int i = 0; i < vertexCount * 3; i++) {
290
verts[i] = vtx[i] * (1.0f / 32768.0f);
291
}
292
} else {
293
// Simplify away indices, bones, and morph before proceeding.
294
u8 *temp_buffer = decoded_ + 65536 * 24;
295
296
if ((inds || (vertType & (GE_VTYPE_WEIGHT_MASK | GE_VTYPE_MORPHCOUNT_MASK)))) {
297
u16 indexLowerBound = 0;
298
u16 indexUpperBound = (u16)vertexCount - 1;
299
300
if (vertexCount > 0 && inds) {
301
GetIndexBounds(inds, vertexCount, vertType, &indexLowerBound, &indexUpperBound);
302
}
303
// TODO: Avoid normalization if just plain skinning.
304
// Force software skinning.
305
bool wasApplyingSkinInDecode = decOptions_.applySkinInDecode;
306
decOptions_.applySkinInDecode = true;
307
NormalizeVertices((u8 *)corners, temp_buffer, (const u8 *)vdata, indexLowerBound, indexUpperBound, vertType);
308
decOptions_.applySkinInDecode = wasApplyingSkinInDecode;
309
310
IndexConverter conv(vertType, inds);
311
for (int i = 0; i < vertexCount; i++) {
312
verts[i * 3] = corners[conv(i)].pos.x;
313
verts[i * 3 + 1] = corners[conv(i)].pos.y;
314
verts[i * 3 + 2] = corners[conv(i)].pos.z;
315
}
316
} else {
317
// Simple, most common case.
318
VertexDecoder *dec = GetVertexDecoder(vertType);
319
int stride = dec->VertexSize();
320
int offset = dec->posoff;
321
switch (vertType & GE_VTYPE_POS_MASK) {
322
case GE_VTYPE_POS_8BIT:
323
for (int i = 0; i < vertexCount; i++) {
324
const s8 *data = (const s8 *)vdata + i * stride + offset;
325
for (int j = 0; j < 3; j++) {
326
verts[i * 3 + j] = data[j] * (1.0f / 128.0f);
327
}
328
}
329
break;
330
case GE_VTYPE_POS_16BIT:
331
for (int i = 0; i < vertexCount; i++) {
332
const s16 *data = ((const s16 *)((const s8 *)vdata + i * stride + offset));
333
for (int j = 0; j < 3; j++) {
334
verts[i * 3 + j] = data[j] * (1.0f / 32768.0f);
335
}
336
}
337
break;
338
case GE_VTYPE_POS_FLOAT:
339
for (int i = 0; i < vertexCount; i++)
340
memcpy(&verts[i * 3], (const u8 *)vdata + stride * i + offset, sizeof(float) * 3);
341
break;
342
}
343
}
344
}
345
346
// Pretransform the verts in-place so we don't have to do it inside the loop.
347
// We do this differently in the fast version below since we skip the max/minOffset checks there
348
// making it easier to get the whole thing ready for SIMD.
349
for (int i = 0; i < vertexCount; i++) {
350
float worldpos[3];
351
Vec3ByMatrix43(worldpos, &verts[i * 3], gstate.worldMatrix);
352
memcpy(&verts[i * 3], worldpos, 12);
353
}
354
355
// Note: near/far are not checked without clamp/clip enabled, so we skip those planes.
356
int totalPlanes = gstate.isDepthClampEnabled() ? 6 : 4;
357
for (int plane = 0; plane < totalPlanes; plane++) {
358
int inside = 0;
359
int out = 0;
360
for (int i = 0; i < vertexCount; i++) {
361
// Test against the frustum planes, and count.
362
// TODO: We should test 4 vertices at a time using SIMD.
363
// I guess could also test one vertex against 4 planes at a time, though a lot of waste at the common case of 6.
364
const float *worldpos = verts + i * 3;
365
float value = planes_.Test(plane, worldpos);
366
if (value <= -FLT_EPSILON) // Not sure why we use exactly this value. Probably '< 0' would do.
367
out++;
368
else
369
inside++;
370
}
371
372
// No vertices inside this one plane? Don't need to draw.
373
if (inside == 0) {
374
// All out - but check for X and Y if the offset was near the cullbox edge.
375
bool outsideEdge = false;
376
switch (plane) {
377
case 0: outsideEdge = maxOffset_.x >= 4096.0f; break;
378
case 1: outsideEdge = minOffset_.x < 1.0f; break;
379
case 2: outsideEdge = minOffset_.y < 1.0f; break;
380
case 3: outsideEdge = maxOffset_.y >= 4096.0f; break;
381
}
382
383
// Only consider this outside if offset + scissor/region is fully inside the cullbox.
384
if (!outsideEdge)
385
return false;
386
}
387
388
// Any out. For testing that the planes are in the right locations.
389
// if (out != 0) return false;
390
}
391
return true;
392
}
393
394
// NOTE: This doesn't handle through-mode, indexing, morph, or skinning.
395
bool DrawEngineCommon::TestBoundingBoxFast(const void *vdata, int vertexCount, u32 vertType) {
396
SimpleVertex *corners = (SimpleVertex *)(decoded_ + 65536 * 12);
397
float *verts = (float *)(decoded_ + 65536 * 18);
398
399
// Although this may lead to drawing that shouldn't happen, the viewport is more complex on VR.
400
// Let's always say objects are within bounds.
401
if (gstate_c.Use(GPU_USE_VIRTUAL_REALITY))
402
return true;
403
404
// Due to world matrix updates per "thing", this isn't quite as effective as it could be if we did world transform
405
// in here as well. Though, it still does cut down on a lot of updates in Tekken 6.
406
if (gstate_c.IsDirty(DIRTY_CULL_PLANES)) {
407
UpdatePlanes();
408
gpuStats.numPlaneUpdates++;
409
gstate_c.Clean(DIRTY_CULL_PLANES);
410
}
411
412
// Also let's just bail if offsetOutsideEdge_ is set, instead of handling the cases.
413
// NOTE: This is written to in UpdatePlanes so can't check it before.
414
if (offsetOutsideEdge_)
415
return true;
416
417
// Simple, most common case.
418
VertexDecoder *dec = GetVertexDecoder(vertType);
419
int stride = dec->VertexSize();
420
int offset = dec->posoff;
421
int vertStride = 3;
422
423
// TODO: Possibly do the plane tests directly against the source formats instead of converting.
424
switch (vertType & GE_VTYPE_POS_MASK) {
425
case GE_VTYPE_POS_8BIT:
426
for (int i = 0; i < vertexCount; i++) {
427
const s8 *data = (const s8 *)vdata + i * stride + offset;
428
for (int j = 0; j < 3; j++) {
429
verts[i * 3 + j] = data[j] * (1.0f / 128.0f);
430
}
431
}
432
break;
433
case GE_VTYPE_POS_16BIT:
434
{
435
#if PPSSPP_ARCH(SSE2)
436
__m128 scaleFactor = _mm_set1_ps(1.0f / 32768.0f);
437
for (int i = 0; i < vertexCount; i++) {
438
const s16 *data = ((const s16 *)((const s8 *)vdata + i * stride + offset));
439
__m128i bits = _mm_castpd_si128(_mm_load_sd((const double *)data));
440
// Sign extension. Hacky without SSE4.
441
bits = _mm_srai_epi32(_mm_unpacklo_epi16(bits, bits), 16);
442
__m128 pos = _mm_mul_ps(_mm_cvtepi32_ps(bits), scaleFactor);
443
_mm_storeu_ps(verts + i * 3, pos); // TODO: use stride 4 to avoid clashing writes?
444
}
445
#elif PPSSPP_ARCH(ARM_NEON)
446
for (int i = 0; i < vertexCount; i++) {
447
const s16 *dataPtr = ((const s16 *)((const s8 *)vdata + i * stride + offset));
448
int32x4_t data = vmovl_s16(vld1_s16(dataPtr));
449
float32x4_t pos = vcvtq_n_f32_s32(data, 15); // >> 15 = division by 32768.0f
450
vst1q_f32(verts + i * 3, pos);
451
}
452
#else
453
for (int i = 0; i < vertexCount; i++) {
454
const s16 *data = ((const s16 *)((const s8 *)vdata + i * stride + offset));
455
for (int j = 0; j < 3; j++) {
456
verts[i * 3 + j] = data[j] * (1.0f / 32768.0f);
457
}
458
}
459
#endif
460
break;
461
}
462
case GE_VTYPE_POS_FLOAT:
463
// No need to copy in this case, we can just read directly from the source format with a stride.
464
verts = (float *)((uint8_t *)vdata + offset);
465
vertStride = stride / 4;
466
break;
467
}
468
469
// We only check the 4 sides. Near/far won't likely make a huge difference.
470
// We test one vertex against 4 planes to get some SIMD. Vertices need to be transformed to world space
471
// for testing, don't want to re-do that, so we have to use that "pivot" of the data.
472
#if PPSSPP_ARCH(SSE2)
473
const __m128 worldX = _mm_loadu_ps(gstate.worldMatrix);
474
const __m128 worldY = _mm_loadu_ps(gstate.worldMatrix + 3);
475
const __m128 worldZ = _mm_loadu_ps(gstate.worldMatrix + 6);
476
const __m128 worldW = _mm_loadu_ps(gstate.worldMatrix + 9);
477
const __m128 planeX = _mm_loadu_ps(planes_.x);
478
const __m128 planeY = _mm_loadu_ps(planes_.y);
479
const __m128 planeZ = _mm_loadu_ps(planes_.z);
480
const __m128 planeW = _mm_loadu_ps(planes_.w);
481
__m128 inside = _mm_set1_ps(0.0f);
482
for (int i = 0; i < vertexCount; i++) {
483
const float *pos = verts + i * vertStride;
484
__m128 worldpos = _mm_add_ps(
485
_mm_add_ps(
486
_mm_mul_ps(worldX, _mm_set1_ps(pos[0])),
487
_mm_mul_ps(worldY, _mm_set1_ps(pos[1]))
488
),
489
_mm_add_ps(
490
_mm_mul_ps(worldZ, _mm_set1_ps(pos[2])),
491
worldW
492
)
493
);
494
// OK, now we check it against the four planes.
495
// This is really curiously similar to a matrix multiplication (well, it is one).
496
__m128 posX = _mm_shuffle_ps(worldpos, worldpos, _MM_SHUFFLE(0, 0, 0, 0));
497
__m128 posY = _mm_shuffle_ps(worldpos, worldpos, _MM_SHUFFLE(1, 1, 1, 1));
498
__m128 posZ = _mm_shuffle_ps(worldpos, worldpos, _MM_SHUFFLE(2, 2, 2, 2));
499
__m128 planeDist = _mm_add_ps(
500
_mm_add_ps(
501
_mm_mul_ps(planeX, posX),
502
_mm_mul_ps(planeY, posY)
503
),
504
_mm_add_ps(
505
_mm_mul_ps(planeZ, posZ),
506
planeW
507
)
508
);
509
inside = _mm_or_ps(inside, _mm_cmpge_ps(planeDist, _mm_setzero_ps()));
510
}
511
// 0xF means that we found at least one vertex inside every one of the planes.
512
// We don't bother with counts, though it wouldn't be hard if we had a use for them.
513
return _mm_movemask_ps(inside) == 0xF;
514
#elif PPSSPP_ARCH(ARM_NEON)
515
const float32x4_t worldX = vld1q_f32(gstate.worldMatrix);
516
const float32x4_t worldY = vld1q_f32(gstate.worldMatrix + 3);
517
const float32x4_t worldZ = vld1q_f32(gstate.worldMatrix + 6);
518
const float32x4_t worldW = vld1q_f32(gstate.worldMatrix + 9);
519
const float32x4_t planeX = vld1q_f32(planes_.x);
520
const float32x4_t planeY = vld1q_f32(planes_.y);
521
const float32x4_t planeZ = vld1q_f32(planes_.z);
522
const float32x4_t planeW = vld1q_f32(planes_.w);
523
uint32x4_t inside = vdupq_n_u32(0);
524
for (int i = 0; i < vertexCount; i++) {
525
const float *pos = verts + i * vertStride;
526
float32x4_t objpos = vld1q_f32(pos);
527
float32x4_t worldpos = vaddq_f32(
528
vmlaq_laneq_f32(
529
vmulq_laneq_f32(worldX, objpos, 0),
530
worldY, objpos, 1),
531
vmlaq_laneq_f32(worldW, worldZ, objpos, 2)
532
);
533
// OK, now we check it against the four planes.
534
// This is really curiously similar to a matrix multiplication (well, it is one).
535
float32x4_t planeDist = vaddq_f32(
536
vmlaq_laneq_f32(
537
vmulq_laneq_f32(planeX, worldpos, 0),
538
planeY, worldpos, 1),
539
vmlaq_laneq_f32(planeW, planeZ, worldpos, 2)
540
);
541
inside = vorrq_u32(inside, vcgezq_f32(planeDist));
542
}
543
uint64_t insideBits = vget_lane_u64(vreinterpret_u64_u16(vmovn_u32(inside)), 0);
544
return ~insideBits == 0; // InsideBits all ones means that we found at least one vertex inside every one of the planes. We don't bother with counts, though it wouldn't be hard.
545
#else
546
int inside[4]{};
547
for (int i = 0; i < vertexCount; i++) {
548
const float *pos = verts + i * vertStride;
549
float worldpos[3];
550
Vec3ByMatrix43(worldpos, pos, gstate.worldMatrix);
551
for (int plane = 0; plane < 4; plane++) {
552
float value = planes_.Test(plane, worldpos);
553
if (value >= 0.0f)
554
inside[plane]++;
555
}
556
}
557
558
for (int plane = 0; plane < 4; plane++) {
559
if (inside[plane] == 0) {
560
return false;
561
}
562
}
563
#endif
564
return true;
565
}
566
567
// TODO: This probably is not the best interface.
568
bool DrawEngineCommon::GetCurrentSimpleVertices(int count, std::vector<GPUDebugVertex> &vertices, std::vector<u16> &indices) {
569
// This is always for the current vertices.
570
u16 indexLowerBound = 0;
571
u16 indexUpperBound = count - 1;
572
573
if (!Memory::IsValidAddress(gstate_c.vertexAddr) || count == 0)
574
return false;
575
576
bool savedVertexFullAlpha = gstate_c.vertexFullAlpha;
577
578
if ((gstate.vertType & GE_VTYPE_IDX_MASK) != GE_VTYPE_IDX_NONE) {
579
const u8 *inds = Memory::GetPointer(gstate_c.indexAddr);
580
const u16_le *inds16 = (const u16_le *)inds;
581
const u32_le *inds32 = (const u32_le *)inds;
582
583
if (inds) {
584
GetIndexBounds(inds, count, gstate.vertType, &indexLowerBound, &indexUpperBound);
585
indices.resize(count);
586
switch (gstate.vertType & GE_VTYPE_IDX_MASK) {
587
case GE_VTYPE_IDX_8BIT:
588
for (int i = 0; i < count; ++i) {
589
indices[i] = inds[i];
590
}
591
break;
592
case GE_VTYPE_IDX_16BIT:
593
for (int i = 0; i < count; ++i) {
594
indices[i] = inds16[i];
595
}
596
break;
597
case GE_VTYPE_IDX_32BIT:
598
WARN_LOG_REPORT_ONCE(simpleIndexes32, Log::G3D, "SimpleVertices: Decoding 32-bit indexes");
599
for (int i = 0; i < count; ++i) {
600
// These aren't documented and should be rare. Let's bounds check each one.
601
if (inds32[i] != (u16)inds32[i]) {
602
ERROR_LOG_REPORT_ONCE(simpleIndexes32Bounds, Log::G3D, "SimpleVertices: Index outside 16-bit range");
603
}
604
indices[i] = (u16)inds32[i];
605
}
606
break;
607
}
608
} else {
609
indices.clear();
610
}
611
} else {
612
indices.clear();
613
}
614
615
static std::vector<u32> temp_buffer;
616
static std::vector<SimpleVertex> simpleVertices;
617
temp_buffer.resize(std::max((int)indexUpperBound, 8192) * 128 / sizeof(u32));
618
simpleVertices.resize(indexUpperBound + 1);
619
NormalizeVertices((u8 *)(&simpleVertices[0]), (u8 *)(&temp_buffer[0]), Memory::GetPointerUnchecked(gstate_c.vertexAddr), indexLowerBound, indexUpperBound, gstate.vertType);
620
621
float world[16];
622
float view[16];
623
float worldview[16];
624
float worldviewproj[16];
625
ConvertMatrix4x3To4x4(world, gstate.worldMatrix);
626
ConvertMatrix4x3To4x4(view, gstate.viewMatrix);
627
Matrix4ByMatrix4(worldview, world, view);
628
Matrix4ByMatrix4(worldviewproj, worldview, gstate.projMatrix);
629
630
vertices.resize(indexUpperBound + 1);
631
uint32_t vertType = gstate.vertType;
632
for (int i = indexLowerBound; i <= indexUpperBound; ++i) {
633
const SimpleVertex &vert = simpleVertices[i];
634
635
if ((vertType & GE_VTYPE_THROUGH) != 0) {
636
if (vertType & GE_VTYPE_TC_MASK) {
637
vertices[i].u = vert.uv[0];
638
vertices[i].v = vert.uv[1];
639
} else {
640
vertices[i].u = 0.0f;
641
vertices[i].v = 0.0f;
642
}
643
vertices[i].x = vert.pos.x;
644
vertices[i].y = vert.pos.y;
645
vertices[i].z = vert.pos.z;
646
if (vertType & GE_VTYPE_COL_MASK) {
647
memcpy(vertices[i].c, vert.color, sizeof(vertices[i].c));
648
} else {
649
memset(vertices[i].c, 0, sizeof(vertices[i].c));
650
}
651
vertices[i].nx = 0; // No meaningful normals in through mode
652
vertices[i].ny = 0;
653
vertices[i].nz = 1.0f;
654
} else {
655
float clipPos[4];
656
Vec3ByMatrix44(clipPos, vert.pos.AsArray(), worldviewproj);
657
Vec3f screenPos = ClipToScreen(clipPos);
658
Vec3f drawPos = ScreenToDrawing(screenPos);
659
660
if (vertType & GE_VTYPE_TC_MASK) {
661
vertices[i].u = vert.uv[0] * (float)gstate.getTextureWidth(0);
662
vertices[i].v = vert.uv[1] * (float)gstate.getTextureHeight(0);
663
} else {
664
vertices[i].u = 0.0f;
665
vertices[i].v = 0.0f;
666
}
667
// Should really have separate coordinates for before and after transform.
668
vertices[i].x = drawPos.x;
669
vertices[i].y = drawPos.y;
670
vertices[i].z = drawPos.z;
671
if (vertType & GE_VTYPE_COL_MASK) {
672
memcpy(vertices[i].c, vert.color, sizeof(vertices[i].c));
673
} else {
674
memset(vertices[i].c, 0, sizeof(vertices[i].c));
675
}
676
vertices[i].nx = vert.nrm.x;
677
vertices[i].ny = vert.nrm.y;
678
vertices[i].nz = vert.nrm.z;
679
}
680
}
681
682
gstate_c.vertexFullAlpha = savedVertexFullAlpha;
683
684
return true;
685
}
686
687
// This normalizes a set of vertices in any format to SimpleVertex format, by processing away morphing AND skinning.
688
// The rest of the transform pipeline like lighting will go as normal, either hardware or software.
689
// The implementation is initially a bit inefficient but shouldn't be a big deal.
690
// An intermediate buffer of not-easy-to-predict size is stored at bufPtr.
691
u32 DrawEngineCommon::NormalizeVertices(u8 *outPtr, u8 *bufPtr, const u8 *inPtr, VertexDecoder *dec, int lowerBound, int upperBound, u32 vertType) {
692
// First, decode the vertices into a GPU compatible format. This step can be eliminated but will need a separate
693
// implementation of the vertex decoder.
694
dec->DecodeVerts(bufPtr, inPtr, &gstate_c.uv, lowerBound, upperBound);
695
696
// OK, morphing eliminated but bones still remain to be taken care of.
697
// Let's do a partial software transform where we only do skinning.
698
699
VertexReader reader(bufPtr, dec->GetDecVtxFmt(), vertType);
700
701
SimpleVertex *sverts = (SimpleVertex *)outPtr;
702
703
const u8 defaultColor[4] = {
704
(u8)gstate.getMaterialAmbientR(),
705
(u8)gstate.getMaterialAmbientG(),
706
(u8)gstate.getMaterialAmbientB(),
707
(u8)gstate.getMaterialAmbientA(),
708
};
709
710
// Let's have two separate loops, one for non skinning and one for skinning.
711
if (!dec->skinInDecode && (vertType & GE_VTYPE_WEIGHT_MASK) != GE_VTYPE_WEIGHT_NONE) {
712
int numBoneWeights = vertTypeGetNumBoneWeights(vertType);
713
for (int i = lowerBound; i <= upperBound; i++) {
714
reader.Goto(i - lowerBound);
715
SimpleVertex &sv = sverts[i];
716
if (vertType & GE_VTYPE_TC_MASK) {
717
reader.ReadUV(sv.uv);
718
}
719
720
if (vertType & GE_VTYPE_COL_MASK) {
721
sv.color_32 = reader.ReadColor0_8888();
722
} else {
723
memcpy(sv.color, defaultColor, 4);
724
}
725
726
float nrm[3], pos[3];
727
float bnrm[3], bpos[3];
728
729
if (vertType & GE_VTYPE_NRM_MASK) {
730
// Normals are generated during tessellation anyway, not sure if any need to supply
731
reader.ReadNrm(nrm);
732
} else {
733
nrm[0] = 0;
734
nrm[1] = 0;
735
nrm[2] = 1.0f;
736
}
737
reader.ReadPos(pos);
738
739
// Apply skinning transform directly
740
float weights[8];
741
reader.ReadWeights(weights);
742
// Skinning
743
Vec3Packedf psum(0, 0, 0);
744
Vec3Packedf nsum(0, 0, 0);
745
for (int w = 0; w < numBoneWeights; w++) {
746
if (weights[w] != 0.0f) {
747
Vec3ByMatrix43(bpos, pos, gstate.boneMatrix + w * 12);
748
Vec3Packedf tpos(bpos);
749
psum += tpos * weights[w];
750
751
Norm3ByMatrix43(bnrm, nrm, gstate.boneMatrix + w * 12);
752
Vec3Packedf tnorm(bnrm);
753
nsum += tnorm * weights[w];
754
}
755
}
756
sv.pos = psum;
757
sv.nrm = nsum;
758
}
759
} else {
760
for (int i = lowerBound; i <= upperBound; i++) {
761
reader.Goto(i - lowerBound);
762
SimpleVertex &sv = sverts[i];
763
if (vertType & GE_VTYPE_TC_MASK) {
764
reader.ReadUV(sv.uv);
765
} else {
766
sv.uv[0] = 0.0f; // This will get filled in during tessellation
767
sv.uv[1] = 0.0f;
768
}
769
if (vertType & GE_VTYPE_COL_MASK) {
770
sv.color_32 = reader.ReadColor0_8888();
771
} else {
772
memcpy(sv.color, defaultColor, 4);
773
}
774
if (vertType & GE_VTYPE_NRM_MASK) {
775
// Normals are generated during tessellation anyway, not sure if any need to supply
776
reader.ReadNrm((float *)&sv.nrm);
777
} else {
778
sv.nrm.x = 0.0f;
779
sv.nrm.y = 0.0f;
780
sv.nrm.z = 1.0f;
781
}
782
reader.ReadPos((float *)&sv.pos);
783
}
784
}
785
786
// Okay, there we are! Return the new type (but keep the index bits)
787
return GE_VTYPE_TC_FLOAT | GE_VTYPE_COL_8888 | GE_VTYPE_NRM_FLOAT | GE_VTYPE_POS_FLOAT | (vertType & (GE_VTYPE_IDX_MASK | GE_VTYPE_THROUGH));
788
}
789
790
void DrawEngineCommon::ApplyFramebufferRead(FBOTexState *fboTexState) {
791
if (gstate_c.Use(GPU_USE_FRAMEBUFFER_FETCH)) {
792
*fboTexState = FBO_TEX_READ_FRAMEBUFFER;
793
} else {
794
gpuStats.numCopiesForShaderBlend++;
795
*fboTexState = FBO_TEX_COPY_BIND_TEX;
796
}
797
798
gstate_c.Dirty(DIRTY_SHADERBLEND);
799
}
800
801
int DrawEngineCommon::ComputeNumVertsToDecode() const {
802
int sum = 0;
803
for (int i = 0; i < numDrawVerts_; i++) {
804
sum += drawVerts_[i].indexUpperBound + 1 - drawVerts_[i].indexLowerBound;
805
}
806
return sum;
807
}
808
809
int DrawEngineCommon::ExtendNonIndexedPrim(const uint32_t *cmd, const uint32_t *stall, u32 vertTypeID, bool clockwise, int *bytesRead, bool isTriangle) {
810
const uint32_t *start = cmd;
811
int prevDrawVerts = numDrawVerts_ - 1;
812
DeferredVerts &dv = drawVerts_[prevDrawVerts];
813
int offset = dv.vertexCount;
814
815
_dbg_assert_(numDrawInds_ <= MAX_DEFERRED_DRAW_INDS); // if it's equal, the check below will take care of it before any action is taken.
816
_dbg_assert_(numDrawVerts_ > 0);
817
818
if (!clockwise) {
819
anyCCWOrIndexed_ = true;
820
}
821
int seenPrims = 0;
822
while (cmd != stall) {
823
uint32_t data = *cmd;
824
if ((data & 0xFFF80000) != 0x04000000) {
825
break;
826
}
827
GEPrimitiveType newPrim = static_cast<GEPrimitiveType>((data >> 16) & 7);
828
if (IsTrianglePrim(newPrim) != isTriangle)
829
break;
830
int vertexCount = data & 0xFFFF;
831
if (numDrawInds_ >= MAX_DEFERRED_DRAW_INDS || vertexCountInDrawCalls_ + offset + vertexCount > VERTEX_BUFFER_MAX) {
832
break;
833
}
834
DeferredInds &di = drawInds_[numDrawInds_++];
835
di.indexType = 0;
836
di.prim = newPrim;
837
seenPrims |= (1 << newPrim);
838
di.clockwise = clockwise;
839
di.vertexCount = vertexCount;
840
di.vertDecodeIndex = prevDrawVerts;
841
di.offset = offset;
842
offset += vertexCount;
843
cmd++;
844
}
845
846
seenPrims_ |= seenPrims;
847
848
int totalCount = offset - dv.vertexCount;
849
dv.vertexCount = offset;
850
dv.indexUpperBound = dv.vertexCount - 1;
851
vertexCountInDrawCalls_ += totalCount;
852
*bytesRead = totalCount * dec_->VertexSize();
853
return cmd - start;
854
}
855
856
void DrawEngineCommon::SkipPrim(GEPrimitiveType prim, int vertexCount, u32 vertTypeID, int *bytesRead) {
857
if (!indexGen.PrimCompatible(prevPrim_, prim)) {
858
DispatchFlush();
859
}
860
861
// This isn't exactly right, if we flushed, since prims can straddle previous calls.
862
// But it generally works for common usage.
863
if (prim == GE_PRIM_KEEP_PREVIOUS) {
864
// Has to be set to something, let's assume POINTS (0) if no previous.
865
if (prevPrim_ == GE_PRIM_INVALID)
866
prevPrim_ = GE_PRIM_POINTS;
867
prim = prevPrim_;
868
} else {
869
prevPrim_ = prim;
870
}
871
872
// If vtype has changed, setup the vertex decoder.
873
if (vertTypeID != lastVType_ || !dec_) {
874
dec_ = GetVertexDecoder(vertTypeID);
875
lastVType_ = vertTypeID;
876
}
877
878
*bytesRead = vertexCount * dec_->VertexSize();
879
}
880
881
// vertTypeID is the vertex type but with the UVGen mode smashed into the top bits.
882
bool DrawEngineCommon::SubmitPrim(const void *verts, const void *inds, GEPrimitiveType prim, int vertexCount, u32 vertTypeID, bool clockwise, int *bytesRead) {
883
if (!indexGen.PrimCompatible(prevPrim_, prim) || numDrawVerts_ >= MAX_DEFERRED_DRAW_VERTS || numDrawInds_ >= MAX_DEFERRED_DRAW_INDS || vertexCountInDrawCalls_ + vertexCount > VERTEX_BUFFER_MAX) {
884
DispatchFlush();
885
}
886
_dbg_assert_(numDrawVerts_ < MAX_DEFERRED_DRAW_VERTS);
887
_dbg_assert_(numDrawInds_ < MAX_DEFERRED_DRAW_INDS);
888
889
// This isn't exactly right, if we flushed, since prims can straddle previous calls.
890
// But it generally works for common usage.
891
if (prim == GE_PRIM_KEEP_PREVIOUS) {
892
// Has to be set to something, let's assume POINTS (0) if no previous.
893
if (prevPrim_ == GE_PRIM_INVALID)
894
prevPrim_ = GE_PRIM_POINTS;
895
prim = prevPrim_;
896
} else {
897
prevPrim_ = prim;
898
}
899
900
// If vtype has changed, setup the vertex decoder. Don't need to nullcheck dec_ since we set lastVType_ to an invalid value whenever we null it.
901
if (vertTypeID != lastVType_) {
902
dec_ = GetVertexDecoder(vertTypeID);
903
lastVType_ = vertTypeID;
904
}
905
906
*bytesRead = vertexCount * dec_->VertexSize();
907
908
// Check that we have enough vertices to form the requested primitive.
909
if (vertexCount < 3) {
910
if ((vertexCount < 2 && prim > 0) || (prim > GE_PRIM_LINE_STRIP && prim != GE_PRIM_RECTANGLES)) {
911
return false;
912
}
913
if (vertexCount <= 0) {
914
// Unfortunately we need to do this check somewhere since GetIndexBounds doesn't handle zero-length arrays.
915
return false;
916
}
917
}
918
919
bool applySkin = (vertTypeID & GE_VTYPE_WEIGHT_MASK) && decOptions_.applySkinInDecode;
920
921
DeferredInds &di = drawInds_[numDrawInds_++];
922
di.inds = inds;
923
int indexType = (vertTypeID & GE_VTYPE_IDX_MASK) >> GE_VTYPE_IDX_SHIFT;
924
if (indexType) {
925
anyCCWOrIndexed_ = true;
926
}
927
di.indexType = indexType;
928
di.prim = prim;
929
di.clockwise = clockwise;
930
if (!clockwise) {
931
anyCCWOrIndexed_ = true;
932
}
933
di.vertexCount = vertexCount;
934
di.vertDecodeIndex = numDrawVerts_;
935
di.offset = 0;
936
937
_dbg_assert_(numDrawVerts_ <= MAX_DEFERRED_DRAW_VERTS);
938
_dbg_assert_(numDrawInds_ <= MAX_DEFERRED_DRAW_INDS);
939
940
if (inds && numDrawVerts_ > decodeVertsCounter_ && drawVerts_[numDrawVerts_ - 1].verts == verts && !applySkin) {
941
// Same vertex pointer as a previous un-decoded draw call - let's just extend the decode!
942
di.vertDecodeIndex = numDrawVerts_ - 1;
943
u16 lb;
944
u16 ub;
945
GetIndexBounds(inds, vertexCount, vertTypeID, &lb, &ub);
946
DeferredVerts &dv = drawVerts_[numDrawVerts_ - 1];
947
if (lb < dv.indexLowerBound)
948
dv.indexLowerBound = lb;
949
if (ub > dv.indexUpperBound)
950
dv.indexUpperBound = ub;
951
} else {
952
// Record a new draw, and a new index gen.
953
DeferredVerts &dv = drawVerts_[numDrawVerts_++];
954
dv.verts = verts;
955
dv.vertexCount = vertexCount;
956
dv.uvScale = gstate_c.uv;
957
// Does handle the unindexed case.
958
GetIndexBounds(inds, vertexCount, vertTypeID, &dv.indexLowerBound, &dv.indexUpperBound);
959
}
960
961
vertexCountInDrawCalls_ += vertexCount;
962
seenPrims_ |= (1 << prim);
963
964
if (prim == GE_PRIM_RECTANGLES && (gstate.getTextureAddress(0) & 0x3FFFFFFF) == (gstate.getFrameBufAddress() & 0x3FFFFFFF)) {
965
// This prevents issues with consecutive self-renders in Ridge Racer.
966
gstate_c.Dirty(DIRTY_TEXTURE_PARAMS);
967
DispatchFlush();
968
}
969
return true;
970
}
971
972
void DrawEngineCommon::DecodeVerts(u8 *dest) {
973
// Note that this should be able to continue a partial decode - we don't necessarily start from zero here (although we do most of the time).
974
int i = decodeVertsCounter_;
975
int stride = (int)dec_->GetDecVtxFmt().stride;
976
for (; i < numDrawVerts_; i++) {
977
DeferredVerts &dv = drawVerts_[i];
978
979
int indexLowerBound = dv.indexLowerBound;
980
drawVertexOffsets_[i] = numDecodedVerts_ - indexLowerBound;
981
982
int indexUpperBound = dv.indexUpperBound;
983
984
if (indexUpperBound + 1 - indexLowerBound + numDecodedVerts_ >= VERTEX_BUFFER_MAX) {
985
// Hit our limit! Stop decoding in this draw.
986
break;
987
}
988
989
// Decode the verts (and at the same time apply morphing/skinning). Simple.
990
dec_->DecodeVerts(dest + numDecodedVerts_ * stride, dv.verts, &dv.uvScale, indexLowerBound, indexUpperBound);
991
numDecodedVerts_ += indexUpperBound - indexLowerBound + 1;
992
}
993
decodeVertsCounter_ = i;
994
}
995
996
int DrawEngineCommon::DecodeInds() {
997
// Note that this should be able to continue a partial decode - we don't necessarily start from zero here (although we do most of the time).
998
999
int i = decodeIndsCounter_;
1000
for (; i < numDrawInds_; i++) {
1001
const DeferredInds &di = drawInds_[i];
1002
1003
int indexOffset = drawVertexOffsets_[di.vertDecodeIndex] + di.offset;
1004
bool clockwise = di.clockwise;
1005
// We've already collapsed subsequent draws with the same vertex pointer, so no tricky logic here anymore.
1006
// 2. Loop through the drawcalls, translating indices as we go.
1007
switch (di.indexType) {
1008
case GE_VTYPE_IDX_NONE >> GE_VTYPE_IDX_SHIFT:
1009
indexGen.AddPrim(di.prim, di.vertexCount, indexOffset, clockwise);
1010
break;
1011
case GE_VTYPE_IDX_8BIT >> GE_VTYPE_IDX_SHIFT:
1012
indexGen.TranslatePrim(di.prim, di.vertexCount, (const u8 *)di.inds, indexOffset, clockwise);
1013
break;
1014
case GE_VTYPE_IDX_16BIT >> GE_VTYPE_IDX_SHIFT:
1015
indexGen.TranslatePrim(di.prim, di.vertexCount, (const u16_le *)di.inds, indexOffset, clockwise);
1016
break;
1017
case GE_VTYPE_IDX_32BIT >> GE_VTYPE_IDX_SHIFT:
1018
indexGen.TranslatePrim(di.prim, di.vertexCount, (const u32_le *)di.inds, indexOffset, clockwise);
1019
break;
1020
}
1021
}
1022
decodeIndsCounter_ = i;
1023
1024
return indexGen.VertexCount();
1025
}
1026
1027
bool DrawEngineCommon::CanUseHardwareTransform(int prim) const {
1028
if (!useHWTransform_)
1029
return false;
1030
return !gstate.isModeThrough() && prim != GE_PRIM_RECTANGLES && prim > GE_PRIM_LINE_STRIP;
1031
}
1032
1033
bool DrawEngineCommon::CanUseHardwareTessellation(GEPatchPrimType prim) const {
1034
if (useHWTessellation_) {
1035
return CanUseHardwareTransform(PatchPrimToPrim(prim));
1036
}
1037
return false;
1038
}
1039
1040
void TessellationDataTransfer::CopyControlPoints(float *pos, float *tex, float *col, int posStride, int texStride, int colStride, const SimpleVertex *const *points, int size, u32 vertType) {
1041
bool hasColor = (vertType & GE_VTYPE_COL_MASK) != 0;
1042
bool hasTexCoord = (vertType & GE_VTYPE_TC_MASK) != 0;
1043
1044
for (int i = 0; i < size; ++i) {
1045
memcpy(pos, points[i]->pos.AsArray(), 3 * sizeof(float));
1046
pos += posStride;
1047
}
1048
if (hasTexCoord) {
1049
for (int i = 0; i < size; ++i) {
1050
memcpy(tex, points[i]->uv, 2 * sizeof(float));
1051
tex += texStride;
1052
}
1053
}
1054
if (hasColor) {
1055
for (int i = 0; i < size; ++i) {
1056
memcpy(col, Vec4f::FromRGBA(points[i]->color_32).AsArray(), 4 * sizeof(float));
1057
col += colStride;
1058
}
1059
}
1060
}
1061
1062
bool DrawEngineCommon::DescribeCodePtr(const u8 *ptr, std::string &name) const {
1063
if (!decJitCache_ || !decJitCache_->IsInSpace(ptr)) {
1064
return false;
1065
}
1066
1067
// Loop through all the decoders and see if we have a match.
1068
VertexDecoder *found = nullptr;
1069
u32 foundKey;
1070
1071
decoderMap_.Iterate([&](u32 key, VertexDecoder *value) {
1072
if (!found) {
1073
if (value->IsInSpace(ptr)) {
1074
foundKey = key;
1075
found = value;
1076
}
1077
}
1078
});
1079
1080
if (found) {
1081
char temp[256];
1082
found->ToString(temp, false);
1083
name = temp;
1084
snprintf(temp, sizeof(temp), "_%08X", foundKey);
1085
name += temp;
1086
return true;
1087
} else {
1088
return false;
1089
}
1090
}
1091
1092