CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/ARM/ArmCompVFPUNEON.cpp
Views: 1401
1
// Copyright (c) 2013- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
// NEON VFPU
19
// This is where we will create an alternate implementation of the VFPU emulation
20
// that uses NEON Q registers to cache pairs/tris/quads, and so on.
21
// Will require major extensions to the reg cache and other things.
22
23
// ARM NEON can only do pairs and quads, not tris and scalars.
24
// We can do scalars, though, for many operations if all the operands
25
// are below Q8 (D16, S32) using regular VFP instructions but really not sure
26
// if it's worth it.
27
28
#include "ppsspp_config.h"
29
#if PPSSPP_ARCH(ARM)
30
31
#include <cmath>
32
33
#include "Common/Data/Convert/SmallDataConvert.h"
34
#include "Common/Math/math_util.h"
35
36
#include "Common/CPUDetect.h"
37
#include "Core/MemMap.h"
38
#include "Core/MIPS/MIPS.h"
39
#include "Core/MIPS/MIPSAnalyst.h"
40
#include "Core/MIPS/MIPSCodeUtils.h"
41
#include "Core/MIPS/MIPSVFPUUtils.h"
42
#include "Core/Config.h"
43
#include "Core/Reporting.h"
44
45
#include "Core/MIPS/ARM/ArmJit.h"
46
#include "Core/MIPS/ARM/ArmRegCache.h"
47
#include "Core/MIPS/ARM/ArmRegCacheFPU.h"
48
#include "Core/MIPS/ARM/ArmCompVFPUNEONUtil.h"
49
50
// TODO: Somehow #ifdef away on ARMv5eabi, without breaking the linker.
51
52
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
53
// Currently known non working ones should have DISABLE.
54
55
// #define CONDITIONAL_DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
56
#define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
57
#define DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
58
#define DISABLE_UNKNOWN_PREFIX { WARN_LOG(Log::JIT, "DISABLE: Unknown Prefix in %s", __FUNCTION__); fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
59
60
#define _RS MIPS_GET_RS(op)
61
#define _RT MIPS_GET_RT(op)
62
#define _RD MIPS_GET_RD(op)
63
#define _FS MIPS_GET_FS(op)
64
#define _FT MIPS_GET_FT(op)
65
#define _FD MIPS_GET_FD(op)
66
#define _SA MIPS_GET_SA(op)
67
#define _POS ((op>> 6) & 0x1F)
68
#define _SIZE ((op>>11) & 0x1F)
69
#define _IMM16 (signed short)(op & 0xFFFF)
70
#define _IMM26 (op & 0x03FFFFFF)
71
72
73
namespace MIPSComp {
74
75
using namespace ArmGen;
76
using namespace ArmJitConstants;
77
78
static const float minus_one = -1.0f;
79
static const float one = 1.0f;
80
static const float zero = 0.0f;
81
82
83
void ArmJit::CompNEON_VecDo3(MIPSOpcode op) {
84
CONDITIONAL_DISABLE(VFPU_VEC);
85
if (js.HasUnknownPrefix()) {
86
DISABLE_UNKNOWN_PREFIX;
87
}
88
89
VectorSize sz = GetVecSize(op);
90
int n = GetNumVectorElements(sz);
91
92
MappedRegs r = NEONMapDirtyInIn(op, sz, sz, sz);
93
ARMReg temp = MatchSize(Q0, r.vs);
94
// TODO: Special case for scalar
95
switch (op >> 26) {
96
case 24: //VFPU0
97
switch ((op >> 23) & 7) {
98
case 0: VADD(F_32, r.vd, r.vs, r.vt); break; // vadd
99
case 1: VSUB(F_32, r.vd, r.vs, r.vt); break; // vsub
100
case 7: // vdiv // vdiv THERE IS NO NEON SIMD VDIV :( There's a fast reciprocal iterator thing though.
101
{
102
// Implement by falling back to VFP
103
VMOV(D0, D_0(r.vs));
104
VMOV(D1, D_0(r.vt));
105
VDIV(S0, S0, S2);
106
if (sz >= V_Pair)
107
VDIV(S1, S1, S3);
108
VMOV(D_0(r.vd), D0);
109
if (sz >= V_Triple) {
110
VMOV(D0, D_1(r.vs));
111
VMOV(D1, D_1(r.vt));
112
VDIV(S0, S0, S2);
113
if (sz == V_Quad)
114
VDIV(S1, S1, S3);
115
VMOV(D_1(r.vd), D0);
116
}
117
}
118
break;
119
default:
120
DISABLE;
121
}
122
break;
123
case 25: //VFPU1
124
switch ((op >> 23) & 7) {
125
case 0: VMUL(F_32, r.vd, r.vs, r.vt); break; // vmul
126
default:
127
DISABLE;
128
}
129
break;
130
case 27: //VFPU3
131
switch ((op >> 23) & 7) {
132
case 2: VMIN(F_32, r.vd, r.vs, r.vt); break; // vmin
133
case 3: VMAX(F_32, r.vd, r.vs, r.vt); break; // vmax
134
case 6: // vsge
135
VMOV_immf(temp, 1.0f);
136
VCGE(F_32, r.vd, r.vs, r.vt);
137
VAND(r.vd, r.vd, temp);
138
break;
139
case 7: // vslt
140
VMOV_immf(temp, 1.0f);
141
VCLT(F_32, r.vd, r.vs, r.vt);
142
VAND(r.vd, r.vd, temp);
143
break;
144
}
145
break;
146
147
default:
148
DISABLE;
149
}
150
151
NEONApplyPrefixD(r.vd);
152
153
fpr.ReleaseSpillLocksAndDiscardTemps();
154
}
155
156
157
// #define CONDITIONAL_DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
158
159
void ArmJit::CompNEON_SV(MIPSOpcode op) {
160
CONDITIONAL_DISABLE(LSU_VFPU);
161
CheckMemoryBreakpoint();
162
163
// Remember to use single lane stores here and not VLDR/VSTR - switching usage
164
// between NEON and VFPU can be expensive on some chips.
165
166
// Here's a common idiom we should optimize:
167
// lv.s S200, 0(s4)
168
// lv.s S201, 4(s4)
169
// lv.s S202, 8(s4)
170
// vone.s S203
171
// vtfm4.q C000, E600, C200
172
// Would be great if we could somehow combine the lv.s into one vector instead of mapping three
173
// separate quads.
174
175
s32 offset = (signed short)(op & 0xFFFC);
176
int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5);
177
MIPSGPReg rs = _RS;
178
179
bool doCheck = false;
180
switch (op >> 26)
181
{
182
case 50: //lv.s // VI(vt) = Memory::Read_U32(addr);
183
{
184
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
185
INFO_LOG(Log::HLE, "LV.S fastmode!");
186
// TODO: Also look forward and combine multiple loads.
187
gpr.MapRegAsPointer(rs);
188
ARMReg ar = fpr.QMapReg(vt, V_Single, MAP_NOINIT | MAP_DIRTY);
189
if (offset) {
190
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
191
VLD1_lane(F_32, ar, R0, 0, true);
192
} else {
193
VLD1_lane(F_32, ar, gpr.RPtr(rs), 0, true);
194
}
195
break;
196
}
197
INFO_LOG(Log::HLE, "LV.S slowmode!");
198
199
// CC might be set by slow path below, so load regs first.
200
ARMReg ar = fpr.QMapReg(vt, V_Single, MAP_DIRTY | MAP_NOINIT);
201
if (gpr.IsImm(rs)) {
202
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
203
gpr.SetRegImm(R0, addr + (u32)Memory::base);
204
} else {
205
gpr.MapReg(rs);
206
if (g_Config.bFastMemory) {
207
SetR0ToEffectiveAddress(rs, offset);
208
} else {
209
SetCCAndR0ForSafeAddress(rs, offset, R1);
210
doCheck = true;
211
}
212
ADD(R0, R0, MEMBASEREG);
213
}
214
FixupBranch skip;
215
if (doCheck) {
216
skip = B_CC(CC_EQ);
217
}
218
VLD1_lane(F_32, ar, R0, 0, true);
219
if (doCheck) {
220
SetJumpTarget(skip);
221
SetCC(CC_AL);
222
}
223
}
224
break;
225
226
case 58: //sv.s // Memory::Write_U32(VI(vt), addr);
227
{
228
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
229
INFO_LOG(Log::HLE, "SV.S fastmode!");
230
// TODO: Also look forward and combine multiple stores.
231
gpr.MapRegAsPointer(rs);
232
ARMReg ar = fpr.QMapReg(vt, V_Single, 0);
233
if (offset) {
234
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
235
VST1_lane(F_32, ar, R0, 0, true);
236
} else {
237
VST1_lane(F_32, ar, gpr.RPtr(rs), 0, true);
238
}
239
break;
240
}
241
242
INFO_LOG(Log::HLE, "SV.S slowmode!");
243
// CC might be set by slow path below, so load regs first.
244
ARMReg ar = fpr.QMapReg(vt, V_Single, 0);
245
if (gpr.IsImm(rs)) {
246
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
247
gpr.SetRegImm(R0, addr + (u32)Memory::base);
248
} else {
249
gpr.MapReg(rs);
250
if (g_Config.bFastMemory) {
251
SetR0ToEffectiveAddress(rs, offset);
252
} else {
253
SetCCAndR0ForSafeAddress(rs, offset, R1);
254
doCheck = true;
255
}
256
ADD(R0, R0, MEMBASEREG);
257
}
258
FixupBranch skip;
259
if (doCheck) {
260
skip = B_CC(CC_EQ);
261
}
262
VST1_lane(F_32, ar, R0, 0, true);
263
if (doCheck) {
264
SetJumpTarget(skip);
265
SetCC(CC_AL);
266
}
267
}
268
break;
269
}
270
fpr.ReleaseSpillLocksAndDiscardTemps();
271
}
272
273
inline int MIPS_GET_VQVT(u32 op) {
274
return (((op >> 16) & 0x1f)) | ((op & 1) << 5);
275
}
276
277
void ArmJit::CompNEON_SVQ(MIPSOpcode op) {
278
CONDITIONAL_DISABLE(LSU_VFPU);
279
CheckMemoryBreakpoint();
280
281
int offset = (signed short)(op & 0xFFFC);
282
int vt = MIPS_GET_VQVT(op.encoding);
283
MIPSGPReg rs = _RS;
284
bool doCheck = false;
285
switch (op >> 26)
286
{
287
case 54: //lv.q
288
{
289
// Check for four-in-a-row
290
const u32 ops[4] = {
291
op.encoding,
292
GetOffsetInstruction(1).encoding,
293
GetOffsetInstruction(2).encoding,
294
GetOffsetInstruction(3).encoding,
295
};
296
if (g_Config.bFastMemory && (ops[1] >> 26) == 54 && (ops[2] >> 26) == 54 && (ops[3] >> 26) == 54) {
297
int offsets[4] = {offset, (s16)(ops[1] & 0xFFFC), (s16)(ops[2] & 0xFFFC), (s16)(ops[3] & 0xFFFC)};
298
int rss[4] = {MIPS_GET_RS(op), MIPS_GET_RS(ops[1]), MIPS_GET_RS(ops[2]), MIPS_GET_RS(ops[3])};
299
if (offsets[1] == offset + 16 && offsets[2] == offsets[1] + 16 && offsets[3] == offsets[2] + 16 &&
300
rss[0] == rss[1] && rss[1] == rss[2] && rss[2] == rss[3]) {
301
int vts[4] = {MIPS_GET_VQVT(op.encoding), MIPS_GET_VQVT(ops[1]), MIPS_GET_VQVT(ops[2]), MIPS_GET_VQVT(ops[3])};
302
// TODO: Also check the destination registers!
303
// Detected four consecutive ones!
304
// gpr.MapRegAsPointer(rs);
305
// fpr.QLoad4x4(vts[4], rs, offset);
306
INFO_LOG(Log::JIT, "Matrix load detected! TODO: optimize");
307
// break;
308
}
309
}
310
311
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && offset < 0x400-16 && offset > -0x400-16) {
312
gpr.MapRegAsPointer(rs);
313
ARMReg ar = fpr.QMapReg(vt, V_Quad, MAP_DIRTY | MAP_NOINIT);
314
if (offset) {
315
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
316
VLD1(F_32, ar, R0, 2, ALIGN_128);
317
} else {
318
VLD1(F_32, ar, gpr.RPtr(rs), 2, ALIGN_128);
319
}
320
break;
321
}
322
323
// CC might be set by slow path below, so load regs first.
324
ARMReg ar = fpr.QMapReg(vt, V_Quad, MAP_DIRTY | MAP_NOINIT);
325
if (gpr.IsImm(rs)) {
326
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
327
gpr.SetRegImm(R0, addr + (u32)Memory::base);
328
} else {
329
gpr.MapReg(rs);
330
if (g_Config.bFastMemory) {
331
SetR0ToEffectiveAddress(rs, offset);
332
} else {
333
SetCCAndR0ForSafeAddress(rs, offset, R1);
334
doCheck = true;
335
}
336
ADD(R0, R0, MEMBASEREG);
337
}
338
339
FixupBranch skip;
340
if (doCheck) {
341
skip = B_CC(CC_EQ);
342
}
343
344
VLD1(F_32, ar, R0, 2, ALIGN_128);
345
346
if (doCheck) {
347
SetJumpTarget(skip);
348
SetCC(CC_AL);
349
}
350
}
351
break;
352
353
case 62: //sv.q
354
{
355
const u32 ops[4] = {
356
op.encoding,
357
GetOffsetInstruction(1).encoding,
358
GetOffsetInstruction(2).encoding,
359
GetOffsetInstruction(3).encoding,
360
};
361
if (g_Config.bFastMemory && (ops[1] >> 26) == 54 && (ops[2] >> 26) == 54 && (ops[3] >> 26) == 54) {
362
int offsets[4] = { offset, (s16)(ops[1] & 0xFFFC), (s16)(ops[2] & 0xFFFC), (s16)(ops[3] & 0xFFFC) };
363
int rss[4] = { MIPS_GET_RS(op), MIPS_GET_RS(ops[1]), MIPS_GET_RS(ops[2]), MIPS_GET_RS(ops[3]) };
364
if (offsets[1] == offset + 16 && offsets[2] == offsets[1] + 16 && offsets[3] == offsets[2] + 16 &&
365
rss[0] == rss[1] && rss[1] == rss[2] && rss[2] == rss[3]) {
366
int vts[4] = { MIPS_GET_VQVT(op.encoding), MIPS_GET_VQVT(ops[1]), MIPS_GET_VQVT(ops[2]), MIPS_GET_VQVT(ops[3]) };
367
// TODO: Also check the destination registers!
368
// Detected four consecutive ones!
369
// gpr.MapRegAsPointer(rs);
370
// fpr.QLoad4x4(vts[4], rs, offset);
371
INFO_LOG(Log::JIT, "Matrix store detected! TODO: optimize");
372
// break;
373
}
374
}
375
376
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && offset < 0x400-16 && offset > -0x400-16) {
377
gpr.MapRegAsPointer(rs);
378
ARMReg ar = fpr.QMapReg(vt, V_Quad, 0);
379
if (offset) {
380
ADDI2R(R0, gpr.RPtr(rs), offset, R1);
381
VST1(F_32, ar, R0, 2, ALIGN_128);
382
} else {
383
VST1(F_32, ar, gpr.RPtr(rs), 2, ALIGN_128);
384
}
385
break;
386
}
387
388
// CC might be set by slow path below, so load regs first.
389
ARMReg ar = fpr.QMapReg(vt, V_Quad, 0);
390
391
if (gpr.IsImm(rs)) {
392
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
393
gpr.SetRegImm(R0, addr + (u32)Memory::base);
394
} else {
395
gpr.MapReg(rs);
396
if (g_Config.bFastMemory) {
397
SetR0ToEffectiveAddress(rs, offset);
398
} else {
399
SetCCAndR0ForSafeAddress(rs, offset, R1);
400
doCheck = true;
401
}
402
ADD(R0, R0, MEMBASEREG);
403
}
404
405
FixupBranch skip;
406
if (doCheck) {
407
skip = B_CC(CC_EQ);
408
}
409
410
VST1(F_32, ar, R0, 2, ALIGN_128);
411
412
if (doCheck) {
413
SetJumpTarget(skip);
414
SetCC(CC_AL);
415
}
416
}
417
break;
418
419
default:
420
DISABLE;
421
break;
422
}
423
fpr.ReleaseSpillLocksAndDiscardTemps();
424
}
425
426
void ArmJit::CompNEON_VVectorInit(MIPSOpcode op) {
427
CONDITIONAL_DISABLE(VFPU_XFER);
428
// WARNING: No prefix support!
429
if (js.HasUnknownPrefix()) {
430
DISABLE_UNKNOWN_PREFIX;
431
}
432
VectorSize sz = GetVecSize(op);
433
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_NOINIT | MAP_DIRTY);
434
435
switch ((op >> 16) & 0xF) {
436
case 6: // vzero
437
VEOR(vd.rd, vd.rd, vd.rd);
438
break;
439
case 7: // vone
440
VMOV_immf(vd.rd, 1.0f);
441
break;
442
default:
443
DISABLE;
444
break;
445
}
446
NEONApplyPrefixD(vd);
447
448
fpr.ReleaseSpillLocksAndDiscardTemps();
449
}
450
451
void ArmJit::CompNEON_VDot(MIPSOpcode op) {
452
CONDITIONAL_DISABLE(VFPU_VEC);
453
if (js.HasUnknownPrefix()) {
454
DISABLE_UNKNOWN_PREFIX;
455
}
456
457
VectorSize sz = GetVecSize(op);
458
MappedRegs r = NEONMapDirtyInIn(op, V_Single, sz, sz);
459
460
switch (sz) {
461
case V_Pair:
462
VMUL(F_32, r.vd, r.vs, r.vt);
463
VPADD(F_32, r.vd, r.vd, r.vd);
464
break;
465
case V_Triple:
466
VMUL(F_32, Q0, r.vs, r.vt);
467
VPADD(F_32, D0, D0, D0);
468
VADD(F_32, r.vd, D0, D1);
469
break;
470
case V_Quad:
471
VMUL(F_32, D0, D_0(r.vs), D_0(r.vt));
472
VMLA(F_32, D0, D_1(r.vs), D_1(r.vt));
473
VPADD(F_32, r.vd, D0, D0);
474
break;
475
case V_Single:
476
case V_Invalid:
477
;
478
}
479
480
NEONApplyPrefixD(r.vd);
481
fpr.ReleaseSpillLocksAndDiscardTemps();
482
}
483
484
485
void ArmJit::CompNEON_VHdp(MIPSOpcode op) {
486
CONDITIONAL_DISABLE(VFPU_VEC);
487
if (js.HasUnknownPrefix()) {
488
DISABLE_UNKNOWN_PREFIX;
489
}
490
491
DISABLE;
492
493
// Similar to VDot but the last component is only s instead of s * t.
494
// A bit tricky on NEON...
495
}
496
497
void ArmJit::CompNEON_VScl(MIPSOpcode op) {
498
CONDITIONAL_DISABLE(VFPU_VEC);
499
if (js.HasUnknownPrefix()) {
500
DISABLE_UNKNOWN_PREFIX;
501
}
502
503
VectorSize sz = GetVecSize(op);
504
MappedRegs r = NEONMapDirtyInIn(op, sz, sz, V_Single);
505
506
ARMReg temp = MatchSize(Q0, r.vt);
507
508
// TODO: VMUL_scalar directly when possible
509
VMOV_neon(temp, r.vt);
510
VMUL_scalar(F_32, r.vd, r.vs, DScalar(Q0, 0));
511
512
NEONApplyPrefixD(r.vd);
513
fpr.ReleaseSpillLocksAndDiscardTemps();
514
}
515
516
void ArmJit::CompNEON_VV2Op(MIPSOpcode op) {
517
CONDITIONAL_DISABLE(VFPU_VEC);
518
if (js.HasUnknownPrefix()) {
519
DISABLE_UNKNOWN_PREFIX;
520
}
521
522
// Pre-processing: Eliminate silly no-op VMOVs, common in Wipeout Pure
523
if (((op >> 16) & 0x1f) == 0 && _VS == _VD && js.HasNoPrefix()) {
524
return;
525
}
526
527
// Must bail before we start mapping registers.
528
switch ((op >> 16) & 0x1f) {
529
case 0: // d[i] = s[i]; break; //vmov
530
case 1: // d[i] = fabsf(s[i]); break; //vabs
531
case 2: // d[i] = -s[i]; break; //vneg
532
case 17: // d[i] = 1.0f / sqrtf(s[i]); break; //vrsq
533
break;
534
535
default:
536
DISABLE;
537
break;
538
}
539
540
VectorSize sz = GetVecSize(op);
541
int n = GetNumVectorElements(sz);
542
543
MappedRegs r = NEONMapDirtyIn(op, sz, sz);
544
545
ARMReg temp = MatchSize(Q0, r.vs);
546
547
switch ((op >> 16) & 0x1f) {
548
case 0: // d[i] = s[i]; break; //vmov
549
// Probably for swizzle.
550
VMOV_neon(r.vd, r.vs);
551
break;
552
case 1: // d[i] = fabsf(s[i]); break; //vabs
553
VABS(F_32, r.vd, r.vs);
554
break;
555
case 2: // d[i] = -s[i]; break; //vneg
556
VNEG(F_32, r.vd, r.vs);
557
break;
558
559
case 4: // if (s[i] < 0) d[i] = 0; else {if(s[i] > 1.0f) d[i] = 1.0f; else d[i] = s[i];} break; // vsat0
560
if (IsD(r.vd)) {
561
VMOV_immf(D0, 0.0f);
562
VMOV_immf(D1, 1.0f);
563
VMAX(F_32, r.vd, r.vs, D0);
564
VMIN(F_32, r.vd, r.vd, D1);
565
} else {
566
VMOV_immf(Q0, 1.0f);
567
VMIN(F_32, r.vd, r.vs, Q0);
568
VMOV_immf(Q0, 0.0f);
569
VMAX(F_32, r.vd, r.vd, Q0);
570
}
571
break;
572
case 5: // if (s[i] < -1.0f) d[i] = -1.0f; else {if(s[i] > 1.0f) d[i] = 1.0f; else d[i] = s[i];} break; // vsat1
573
if (IsD(r.vd)) {
574
VMOV_immf(D0, -1.0f);
575
VMOV_immf(D1, 1.0f);
576
VMAX(F_32, r.vd, r.vs, D0);
577
VMIN(F_32, r.vd, r.vd, D1);
578
} else {
579
VMOV_immf(Q0, 1.0f);
580
VMIN(F_32, r.vd, r.vs, Q0);
581
VMOV_immf(Q0, -1.0f);
582
VMAX(F_32, r.vd, r.vd, Q0);
583
}
584
break;
585
586
case 16: // d[i] = 1.0f / s[i]; break; //vrcp
587
// Can just fallback to VFP and use VDIV.
588
DISABLE;
589
{
590
ARMReg temp2 = fpr.QAllocTemp(sz);
591
// Needs iterations on NEON. And two temps - which is a problem if vs == vd! Argh!
592
VRECPE(F_32, temp, r.vs);
593
VRECPS(temp2, r.vs, temp);
594
VMUL(F_32, temp2, temp2, temp);
595
VRECPS(temp2, r.vs, temp);
596
VMUL(F_32, temp2, temp2, temp);
597
}
598
// http://stackoverflow.com/questions/6759897/how-to-divide-in-neon-intrinsics-by-a-float-number
599
// reciprocal = vrecpeq_f32(b);
600
// reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
601
// reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
602
DISABLE;
603
break;
604
605
case 17: // d[i] = 1.0f / sqrtf(s[i]); break; //vrsq
606
DISABLE;
607
// Needs iterations on NEON
608
{
609
if (true) {
610
// Not-very-accurate estimate
611
VRSQRTE(F_32, r.vd, r.vs);
612
} else {
613
ARMReg temp2 = fpr.QAllocTemp(sz);
614
// TODO: It's likely that some games will require one or two Newton-Raphson
615
// iterations to refine the estimate.
616
VRSQRTE(F_32, temp, r.vs);
617
VRSQRTS(temp2, r.vs, temp);
618
VMUL(F_32, r.vd, temp2, temp);
619
//VRSQRTS(temp2, r.vs, temp);
620
// VMUL(F_32, r.vd, temp2, temp);
621
}
622
}
623
break;
624
case 18: // d[i] = sinf((float)M_PI_2 * s[i]); break; //vsin
625
DISABLE;
626
break;
627
case 19: // d[i] = cosf((float)M_PI_2 * s[i]); break; //vcos
628
DISABLE;
629
break;
630
case 20: // d[i] = powf(2.0f, s[i]); break; //vexp2
631
DISABLE;
632
break;
633
case 21: // d[i] = logf(s[i])/log(2.0f); break; //vlog2
634
DISABLE;
635
break;
636
case 22: // d[i] = sqrtf(s[i]); break; //vsqrt
637
// Let's just defer to VFP for now. Better than calling the interpreter for sure.
638
VMOV_neon(MatchSize(Q0, r.vs), r.vs);
639
for (int i = 0; i < n; i++) {
640
VSQRT((ARMReg)(S0 + i), (ARMReg)(S0 + i));
641
}
642
VMOV_neon(MatchSize(Q0, r.vd), r.vd);
643
break;
644
case 23: // d[i] = asinf(s[i] * (float)M_2_PI); break; //vasin
645
DISABLE;
646
break;
647
case 24: // d[i] = -1.0f / s[i]; break; // vnrcp
648
// Needs iterations on NEON. Just do the same as vrcp and negate.
649
DISABLE;
650
break;
651
case 26: // d[i] = -sinf((float)M_PI_2 * s[i]); break; // vnsin
652
DISABLE;
653
break;
654
case 28: // d[i] = 1.0f / expf(s[i] * (float)M_LOG2E); break; // vrexp2
655
DISABLE;
656
break;
657
default:
658
DISABLE;
659
break;
660
}
661
662
NEONApplyPrefixD(r.vd);
663
664
fpr.ReleaseSpillLocksAndDiscardTemps();
665
}
666
667
void ArmJit::CompNEON_Mftv(MIPSOpcode op) {
668
CONDITIONAL_DISABLE(VFPU_XFER);
669
int imm = op & 0xFF;
670
MIPSGPReg rt = _RT;
671
switch ((op >> 21) & 0x1f) {
672
case 3: //mfv / mfvc
673
// rt = 0, imm = 255 appears to be used as a CPU interlock by some games.
674
if (rt != 0) {
675
if (imm < 128) { //R(rt) = VI(imm);
676
ARMReg r = fpr.QMapReg(imm, V_Single, MAP_READ);
677
gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
678
// TODO: Gotta be a faster way
679
VMOV_neon(MatchSize(Q0, r), r);
680
VMOV(gpr.R(rt), S0);
681
} else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc
682
// In case we have a saved prefix.
683
FlushPrefixV();
684
if (imm - 128 == VFPU_CTRL_CC) {
685
gpr.MapDirtyIn(rt, MIPS_REG_VFPUCC);
686
MOV(gpr.R(rt), gpr.R(MIPS_REG_VFPUCC));
687
} else {
688
gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
689
LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
690
}
691
} else {
692
//ERROR - maybe need to make this value too an "interlock" value?
693
ERROR_LOG(Log::CPU, "mfv - invalid register %i", imm);
694
}
695
}
696
break;
697
698
case 7: // mtv
699
if (imm < 128) {
700
// TODO: It's pretty common that this is preceded by mfc1, that is, a value is being
701
// moved from the regular floating point registers. It would probably be faster to do
702
// the copy directly in the FPRs instead of going through the GPRs.
703
704
ARMReg r = fpr.QMapReg(imm, V_Single, MAP_DIRTY | MAP_NOINIT);
705
if (gpr.IsMapped(rt)) {
706
VMOV(S0, gpr.R(rt));
707
VMOV_neon(r, MatchSize(Q0, r));
708
} else {
709
ADDI2R(R0, CTXREG, gpr.GetMipsRegOffset(rt), R1);
710
VLD1_lane(F_32, r, R0, 0, true);
711
}
712
} else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc //currentMIPS->vfpuCtrl[imm - 128] = R(rt);
713
if (imm - 128 == VFPU_CTRL_CC) {
714
gpr.MapDirtyIn(MIPS_REG_VFPUCC, rt);
715
MOV(gpr.R(MIPS_REG_VFPUCC), rt);
716
} else {
717
gpr.MapReg(rt);
718
STR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
719
}
720
721
// TODO: Optimization if rt is Imm?
722
// Set these BEFORE disable!
723
if (imm - 128 == VFPU_CTRL_SPREFIX) {
724
js.prefixSFlag = JitState::PREFIX_UNKNOWN;
725
js.blockWrotePrefixes = true;
726
} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
727
js.prefixTFlag = JitState::PREFIX_UNKNOWN;
728
js.blockWrotePrefixes = true;
729
} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
730
js.prefixDFlag = JitState::PREFIX_UNKNOWN;
731
js.blockWrotePrefixes = true;
732
}
733
} else {
734
//ERROR
735
_dbg_assert_msg_(false,"mtv - invalid register");
736
}
737
break;
738
739
default:
740
DISABLE;
741
}
742
743
fpr.ReleaseSpillLocksAndDiscardTemps();
744
}
745
746
void ArmJit::CompNEON_Vmfvc(MIPSOpcode op) {
747
DISABLE;
748
}
749
750
void ArmJit::CompNEON_Vmtvc(MIPSOpcode op) {
751
CONDITIONAL_DISABLE(VFPU_XFER);
752
753
int vs = _VS;
754
int imm = op & 0xFF;
755
if (imm >= 128 && imm < 128 + VFPU_CTRL_MAX) {
756
ARMReg r = fpr.QMapReg(vs, V_Single, 0);
757
ADDI2R(R0, CTXREG, offsetof(MIPSState, vfpuCtrl[0]) + (imm - 128) * 4, R1);
758
VST1_lane(F_32, r, R0, 0, true);
759
fpr.ReleaseSpillLocksAndDiscardTemps();
760
761
if (imm - 128 == VFPU_CTRL_SPREFIX) {
762
js.prefixSFlag = JitState::PREFIX_UNKNOWN;
763
js.blockWrotePrefixes = true;
764
} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
765
js.prefixTFlag = JitState::PREFIX_UNKNOWN;
766
js.blockWrotePrefixes = true;
767
} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
768
js.prefixDFlag = JitState::PREFIX_UNKNOWN;
769
js.blockWrotePrefixes = true;
770
}
771
}
772
}
773
774
void ArmJit::CompNEON_VMatrixInit(MIPSOpcode op) {
775
CONDITIONAL_DISABLE(VFPU_XFER);
776
777
MatrixSize msz = GetMtxSize(op);
778
int n = GetMatrixSide(msz);
779
780
ARMReg cols[4];
781
fpr.QMapMatrix(cols, _VD, msz, MAP_NOINIT | MAP_DIRTY);
782
783
switch ((op >> 16) & 0xF) {
784
case 3: // vmidt
785
// There has to be a better way to synthesize: 1.0, 0.0, 0.0, 1.0 in a quad
786
VEOR(D0, D0, D0);
787
VMOV_immf(D1, 1.0f);
788
VTRN(F_32, D0, D1);
789
VREV64(I_32, D0, D0);
790
switch (msz) {
791
case M_2x2:
792
VMOV_neon(cols[0], D0);
793
VMOV_neon(cols[1], D1);
794
break;
795
case M_3x3:
796
VMOV_neon(D_0(cols[0]), D0);
797
VMOV_imm(I_8, D_1(cols[0]), VIMMxxxxxxxx, 0);
798
VMOV_neon(D_0(cols[1]), D1);
799
VMOV_imm(I_8, D_1(cols[1]), VIMMxxxxxxxx, 0);
800
VMOV_imm(I_8, D_0(cols[2]), VIMMxxxxxxxx, 0);
801
VMOV_neon(D_1(cols[2]), D0);
802
break;
803
case M_4x4:
804
VMOV_neon(D_0(cols[0]), D0);
805
VMOV_imm(I_8, D_1(cols[0]), VIMMxxxxxxxx, 0);
806
VMOV_neon(D_0(cols[1]), D1);
807
VMOV_imm(I_8, D_1(cols[1]), VIMMxxxxxxxx, 0);
808
VMOV_imm(I_8, D_0(cols[2]), VIMMxxxxxxxx, 0);
809
VMOV_neon(D_1(cols[2]), D0);
810
VMOV_imm(I_8, D_0(cols[3]), VIMMxxxxxxxx, 0);
811
VMOV_neon(D_1(cols[3]), D1);
812
813
// NEONTranspose4x4(cols);
814
break;
815
default:
816
_assert_msg_(false, "Bad matrix size");
817
break;
818
}
819
break;
820
case 6: // vmzero
821
for (int i = 0; i < n; i++) {
822
VEOR(cols[i], cols[i], cols[i]);
823
}
824
break;
825
case 7: // vmone
826
for (int i = 0; i < n; i++) {
827
VMOV_immf(cols[i], 1.0f);
828
}
829
break;
830
}
831
832
fpr.ReleaseSpillLocksAndDiscardTemps();
833
}
834
835
void ArmJit::CompNEON_Vmmov(MIPSOpcode op) {
836
CONDITIONAL_DISABLE(VFPU_MTX_VMMOV);
837
if (_VS == _VD) {
838
// A lot of these no-op matrix moves in Wipeout... Just drop the instruction entirely.
839
return;
840
}
841
842
MatrixSize msz = GetMtxSize(op);
843
844
MatrixOverlapType overlap = GetMatrixOverlap(_VD, _VS, msz);
845
if (overlap != OVERLAP_NONE) {
846
// Too complicated to bother handling in the JIT.
847
// TODO: Special case for in-place (and other) transpose, etc.
848
DISABLE;
849
}
850
851
ARMReg s_cols[4], d_cols[4];
852
fpr.QMapMatrix(s_cols, _VS, msz, 0);
853
fpr.QMapMatrix(d_cols, _VD, msz, MAP_DIRTY | MAP_NOINIT);
854
855
int n = GetMatrixSide(msz);
856
for (int i = 0; i < n; i++) {
857
VMOV_neon(d_cols[i], s_cols[i]);
858
}
859
860
fpr.ReleaseSpillLocksAndDiscardTemps();
861
}
862
863
void ArmJit::CompNEON_Vmmul(MIPSOpcode op) {
864
CONDITIONAL_DISABLE(VFPU_MTX_VMMUL);
865
866
MatrixSize msz = GetMtxSize(op);
867
int n = GetMatrixSide(msz);
868
869
bool overlap = GetMatrixOverlap(_VD, _VS, msz) || GetMatrixOverlap(_VD, _VT, msz);
870
if (overlap) {
871
// Later. Fortunately, the VFPU also seems to prohibit overlap for matrix mul.
872
INFO_LOG(Log::JIT, "Matrix overlap, ignoring.");
873
DISABLE;
874
}
875
876
// Having problems with 2x2s for some reason.
877
if (msz == M_2x2) {
878
DISABLE;
879
}
880
881
ARMReg s_cols[4], t_cols[4], d_cols[4];
882
883
// For some reason, vmmul is encoded with the first matrix (S) transposed from the real meaning.
884
fpr.QMapMatrix(t_cols, _VT, msz, MAP_FORCE_LOW); // Need to see if we can avoid having to force it low in some sane way. Will need crazy prediction logic for loads otherwise.
885
fpr.QMapMatrix(s_cols, Xpose(_VS), msz, MAP_PREFER_HIGH);
886
fpr.QMapMatrix(d_cols, _VD, msz, MAP_PREFER_HIGH | MAP_NOINIT | MAP_DIRTY);
887
888
// TODO: Getting there but still getting wrong results.
889
for (int i = 0; i < n; i++) {
890
for (int j = 0; j < n; j++) {
891
if (i == 0) {
892
VMUL_scalar(F_32, d_cols[j], s_cols[i], XScalar(t_cols[j], i));
893
} else {
894
VMLA_scalar(F_32, d_cols[j], s_cols[i], XScalar(t_cols[j], i));
895
}
896
}
897
}
898
899
fpr.ReleaseSpillLocksAndDiscardTemps();
900
}
901
902
void ArmJit::CompNEON_Vmscl(MIPSOpcode op) {
903
CONDITIONAL_DISABLE(VFPU_MTX_VMSCL);
904
905
MatrixSize msz = GetMtxSize(op);
906
907
bool overlap = GetMatrixOverlap(_VD, _VS, msz) != OVERLAP_NONE;
908
if (overlap) {
909
DISABLE;
910
}
911
912
int n = GetMatrixSide(msz);
913
914
ARMReg s_cols[4], t, d_cols[4];
915
fpr.QMapMatrix(s_cols, _VS, msz, 0);
916
fpr.QMapMatrix(d_cols, _VD, msz, MAP_NOINIT | MAP_DIRTY);
917
918
t = fpr.QMapReg(_VT, V_Single, 0);
919
VMOV_neon(D0, t);
920
for (int i = 0; i < n; i++) {
921
VMUL_scalar(F_32, d_cols[i], s_cols[i], DScalar(D0, 0));
922
}
923
924
fpr.ReleaseSpillLocksAndDiscardTemps();
925
}
926
927
void ArmJit::CompNEON_Vtfm(MIPSOpcode op) {
928
CONDITIONAL_DISABLE(VFPU_MTX_VTFM);
929
if (js.HasUnknownPrefix()) {
930
DISABLE;
931
}
932
933
if (_VT == _VD) {
934
DISABLE;
935
}
936
937
VectorSize sz = GetVecSize(op);
938
MatrixSize msz = GetMtxSize(op);
939
int n = GetNumVectorElements(sz);
940
int ins = (op >> 23) & 7;
941
942
bool homogenous = false;
943
if (n == ins) {
944
n++;
945
sz = (VectorSize)((int)(sz)+1);
946
msz = (MatrixSize)((int)(msz)+1);
947
homogenous = true;
948
}
949
// Otherwise, n should already be ins + 1.
950
else if (n != ins + 1) {
951
DISABLE;
952
}
953
954
ARMReg s_cols[4], t, d;
955
t = fpr.QMapReg(_VT, sz, MAP_FORCE_LOW);
956
fpr.QMapMatrix(s_cols, Xpose(_VS), msz, MAP_PREFER_HIGH);
957
d = fpr.QMapReg(_VD, sz, MAP_DIRTY | MAP_NOINIT | MAP_PREFER_HIGH);
958
959
VMUL_scalar(F_32, d, s_cols[0], XScalar(t, 0));
960
for (int i = 1; i < n; i++) {
961
if (homogenous && i == n - 1) {
962
VADD(F_32, d, d, s_cols[i]);
963
} else {
964
VMLA_scalar(F_32, d, s_cols[i], XScalar(t, i));
965
}
966
}
967
968
// VTFM does not have prefix support.
969
970
fpr.ReleaseSpillLocksAndDiscardTemps();
971
}
972
973
void ArmJit::CompNEON_VCrs(MIPSOpcode op) {
974
DISABLE;
975
}
976
977
void ArmJit::CompNEON_VDet(MIPSOpcode op) {
978
DISABLE;
979
}
980
981
void ArmJit::CompNEON_Vi2x(MIPSOpcode op) {
982
DISABLE;
983
}
984
985
void ArmJit::CompNEON_Vx2i(MIPSOpcode op) {
986
DISABLE;
987
}
988
989
void ArmJit::CompNEON_Vf2i(MIPSOpcode op) {
990
DISABLE;
991
}
992
993
void ArmJit::CompNEON_Vi2f(MIPSOpcode op) {
994
CONDITIONAL_DISABLE(VFPU_VEC);
995
if (js.HasUnknownPrefix()) {
996
DISABLE;
997
}
998
999
DISABLE;
1000
1001
VectorSize sz = GetVecSize(op);
1002
int n = GetNumVectorElements(sz);
1003
1004
int imm = (op >> 16) & 0x1f;
1005
const float mult = 1.0f / (float)(1UL << imm);
1006
1007
MappedRegs regs = NEONMapDirtyIn(op, sz, sz);
1008
1009
MOVI2F_neon(MatchSize(Q0, regs.vd), mult, R0);
1010
1011
VCVT(F_32, regs.vd, regs.vs);
1012
VMUL(F_32, regs.vd, regs.vd, Q0);
1013
1014
NEONApplyPrefixD(regs.vd);
1015
1016
fpr.ReleaseSpillLocksAndDiscardTemps();
1017
}
1018
1019
void ArmJit::CompNEON_Vh2f(MIPSOpcode op) {
1020
CONDITIONAL_DISABLE(VFPU_VEC);
1021
if (!cpu_info.bHalf) {
1022
// No hardware support for half-to-float, fallback to interpreter
1023
// TODO: Translate the fast SSE solution to standard integer/VFP stuff
1024
// for the weaker CPUs.
1025
DISABLE;
1026
}
1027
1028
VectorSize sz = GetVecSize(op);
1029
1030
VectorSize outsize = V_Pair;
1031
switch (sz) {
1032
case V_Single:
1033
outsize = V_Pair;
1034
break;
1035
case V_Pair:
1036
outsize = V_Quad;
1037
break;
1038
default:
1039
ERROR_LOG(Log::JIT, "Vh2f: Must be pair or quad");
1040
break;
1041
}
1042
1043
ARMReg vs = NEONMapPrefixS(_VS, sz, 0);
1044
// TODO: MAP_NOINIT if they're definitely not overlapping.
1045
DestARMReg vd = NEONMapPrefixD(_VD, outsize, MAP_DIRTY);
1046
1047
VCVTF32F16(vd.rd, vs);
1048
1049
NEONApplyPrefixD(vd);
1050
fpr.ReleaseSpillLocksAndDiscardTemps();
1051
}
1052
1053
void ArmJit::CompNEON_Vcst(MIPSOpcode op) {
1054
CONDITIONAL_DISABLE(VFPU_XFER);
1055
if (js.HasUnknownPrefix()) {
1056
DISABLE_UNKNOWN_PREFIX;
1057
}
1058
1059
int conNum = (op >> 16) & 0x1f;
1060
1061
VectorSize sz = GetVecSize(op);
1062
int n = GetNumVectorElements(sz);
1063
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_DIRTY | MAP_NOINIT);
1064
gpr.SetRegImm(R0, (u32)(void *)&cst_constants[conNum]);
1065
VLD1_all_lanes(F_32, vd, R0, true);
1066
NEONApplyPrefixD(vd); // TODO: Could bake this into the constant we load.
1067
1068
fpr.ReleaseSpillLocksAndDiscardTemps();
1069
}
1070
1071
void ArmJit::CompNEON_Vhoriz(MIPSOpcode op) {
1072
CONDITIONAL_DISABLE(VFPU_VEC);
1073
if (js.HasUnknownPrefix()) {
1074
DISABLE_UNKNOWN_PREFIX;
1075
}
1076
VectorSize sz = GetVecSize(op);
1077
// Do any games use these a noticeable amount?
1078
switch ((op >> 16) & 31) {
1079
case 6: // vfad
1080
{
1081
VMOV_neon(F_32, D1, 0.0f);
1082
MappedRegs r = NEONMapDirtyIn(op, V_Single, sz);
1083
switch (sz) {
1084
case V_Pair:
1085
VPADD(F_32, r.vd, r.vs, r.vs);
1086
break;
1087
case V_Triple:
1088
VPADD(F_32, D0, D_0(r.vs), D_0(r.vs));
1089
VADD(F_32, r.vd, D0, D_1(r.vs));
1090
break;
1091
case V_Quad:
1092
VADD(F_32, D0, D_0(r.vs), D_1(r.vs));
1093
VPADD(F_32, r.vd, D0, D0);
1094
break;
1095
default:
1096
;
1097
}
1098
// This forces the sign of -0.000 to +0.000.
1099
VADD(F_32, r.vd, r.vd, D1);
1100
break;
1101
}
1102
1103
case 7: // vavg
1104
DISABLE;
1105
break;
1106
}
1107
fpr.ReleaseSpillLocksAndDiscardTemps();
1108
}
1109
1110
void ArmJit::CompNEON_VRot(MIPSOpcode op) {
1111
CONDITIONAL_DISABLE(VFPU_VEC);
1112
1113
if (js.HasUnknownPrefix()) {
1114
DISABLE_UNKNOWN_PREFIX;
1115
}
1116
1117
DISABLE;
1118
1119
int vd = _VD;
1120
int vs = _VS;
1121
1122
VectorSize sz = GetVecSize(op);
1123
int n = GetNumVectorElements(sz);
1124
1125
// ...
1126
fpr.ReleaseSpillLocksAndDiscardTemps();
1127
}
1128
1129
void ArmJit::CompNEON_VIdt(MIPSOpcode op) {
1130
CONDITIONAL_DISABLE(VFPU_XFER);
1131
if (js.HasUnknownPrefix()) {
1132
DISABLE_UNKNOWN_PREFIX;
1133
}
1134
1135
VectorSize sz = GetVecSize(op);
1136
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_NOINIT | MAP_DIRTY);
1137
switch (sz) {
1138
case V_Pair:
1139
VMOV_immf(vd, 1.0f);
1140
if ((_VD & 1) == 0) {
1141
// Load with 1.0, 0.0
1142
VMOV_imm(I_64, D0, VIMMbits2bytes, 0x0F);
1143
VAND(vd, vd, D0);
1144
} else {
1145
VMOV_imm(I_64, D0, VIMMbits2bytes, 0xF0);
1146
VAND(vd, vd, D0);
1147
}
1148
break;
1149
case V_Triple:
1150
case V_Quad:
1151
{
1152
// TODO: This can be optimized.
1153
VEOR(vd, vd, vd);
1154
ARMReg dest = (_VD & 2) ? D_1(vd) : D_0(vd);
1155
VMOV_immf(dest, 1.0f);
1156
if ((_VD & 1) == 0) {
1157
// Load with 1.0, 0.0
1158
VMOV_imm(I_64, D0, VIMMbits2bytes, 0x0F);
1159
VAND(dest, dest, D0);
1160
} else {
1161
VMOV_imm(I_64, D0, VIMMbits2bytes, 0xF0);
1162
VAND(dest, dest, D0);
1163
}
1164
}
1165
break;
1166
default:
1167
_dbg_assert_msg_(false,"Bad vidt instruction");
1168
break;
1169
}
1170
1171
NEONApplyPrefixD(vd);
1172
fpr.ReleaseSpillLocksAndDiscardTemps();
1173
}
1174
1175
void ArmJit::CompNEON_Vcmp(MIPSOpcode op) {
1176
CONDITIONAL_DISABLE(VFPU_COMP);
1177
if (js.HasUnknownPrefix())
1178
DISABLE;
1179
1180
// Not a chance that this works on the first try :P
1181
DISABLE;
1182
1183
VectorSize sz = GetVecSize(op);
1184
int n = GetNumVectorElements(sz);
1185
1186
VCondition cond = (VCondition)(op & 0xF);
1187
1188
MappedRegs regs = NEONMapInIn(op, sz, sz);
1189
1190
ARMReg vs = regs.vs, vt = regs.vt;
1191
ARMReg res = fpr.QAllocTemp(sz);
1192
1193
// Some, we just fall back to the interpreter.
1194
// ES is just really equivalent to (value & 0x7F800000) == 0x7F800000.
1195
switch (cond) {
1196
case VC_EI: // c = my_isinf(s[i]); break;
1197
case VC_NI: // c = !my_isinf(s[i]); break;
1198
DISABLE;
1199
case VC_ES: // c = my_isnan(s[i]) || my_isinf(s[i]); break; // Tekken Dark Resurrection
1200
case VC_NS: // c = !my_isnan(s[i]) && !my_isinf(s[i]); break;
1201
case VC_EN: // c = my_isnan(s[i]); break;
1202
case VC_NN: // c = !my_isnan(s[i]); break;
1203
// if (_VS != _VT)
1204
DISABLE;
1205
break;
1206
1207
case VC_EZ:
1208
case VC_NZ:
1209
VMOV_immf(Q0, 0.0f);
1210
break;
1211
default:
1212
;
1213
}
1214
1215
int affected_bits = (1 << 4) | (1 << 5); // 4 and 5
1216
for (int i = 0; i < n; i++) {
1217
affected_bits |= 1 << i;
1218
}
1219
1220
// Preload the pointer to our magic mask
1221
static const u32 collectorBits[4] = { 1, 2, 4, 8 };
1222
MOVP2R(R1, &collectorBits);
1223
1224
// Do the compare
1225
MOVI2R(R0, 0);
1226
CCFlags flag = CC_AL;
1227
1228
bool oneIsFalse = false;
1229
switch (cond) {
1230
case VC_FL: // c = 0;
1231
break;
1232
1233
case VC_TR: // c = 1
1234
MOVI2R(R0, affected_bits);
1235
break;
1236
1237
case VC_ES: // c = my_isnan(s[i]) || my_isinf(s[i]); break; // Tekken Dark Resurrection
1238
case VC_NS: // c = !(my_isnan(s[i]) || my_isinf(s[i])); break;
1239
DISABLE; // TODO: these shouldn't be that hard
1240
break;
1241
1242
case VC_EN: // c = my_isnan(s[i]); break; // Tekken 6
1243
case VC_NN: // c = !my_isnan(s[i]); break;
1244
DISABLE; // TODO: these shouldn't be that hard
1245
break;
1246
1247
case VC_EQ: // c = s[i] == t[i]
1248
VCEQ(F_32, res, vs, vt);
1249
break;
1250
1251
case VC_LT: // c = s[i] < t[i]
1252
VCLT(F_32, res, vs, vt);
1253
break;
1254
1255
case VC_LE: // c = s[i] <= t[i];
1256
VCLE(F_32, res, vs, vt);
1257
break;
1258
1259
case VC_NE: // c = s[i] != t[i]
1260
VCEQ(F_32, res, vs, vt);
1261
oneIsFalse = true;
1262
break;
1263
1264
case VC_GE: // c = s[i] >= t[i]
1265
VCGE(F_32, res, vs, vt);
1266
break;
1267
1268
case VC_GT: // c = s[i] > t[i]
1269
VCGT(F_32, res, vs, vt);
1270
break;
1271
1272
case VC_EZ: // c = s[i] == 0.0f || s[i] == -0.0f
1273
VCEQ(F_32, res, vs);
1274
break;
1275
1276
case VC_NZ: // c = s[i] != 0
1277
VCEQ(F_32, res, vs);
1278
oneIsFalse = true;
1279
break;
1280
1281
default:
1282
DISABLE;
1283
}
1284
if (oneIsFalse) {
1285
VMVN(res, res);
1286
}
1287
// Somehow collect the bits into a mask.
1288
1289
// Collect the bits. Where's my PMOVMSKB? :(
1290
VLD1(I_32, Q0, R1, n < 2 ? 1 : 2);
1291
VAND(Q0, Q0, res);
1292
VPADD(I_32, Q0, Q0, Q0);
1293
VPADD(I_32, D0, D0, D0);
1294
// OK, bits now in S0.
1295
VMOV(R0, S0);
1296
// Zap irrelevant bits (V_Single, V_Triple)
1297
AND(R0, R0, affected_bits);
1298
1299
// TODO: Now, how in the world do we generate the component OR and AND bits without burning tens of ALU instructions?? Lookup-table?
1300
1301
gpr.MapReg(MIPS_REG_VFPUCC, MAP_DIRTY);
1302
BIC(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), affected_bits);
1303
ORR(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), R0);
1304
}
1305
1306
void ArmJit::CompNEON_Vcmov(MIPSOpcode op) {
1307
CONDITIONAL_DISABLE(VFPU_COMP);
1308
if (js.HasUnknownPrefix()) {
1309
DISABLE;
1310
}
1311
1312
DISABLE;
1313
1314
VectorSize sz = GetVecSize(op);
1315
int n = GetNumVectorElements(sz);
1316
1317
ARMReg vs = NEONMapPrefixS(_VS, sz, 0);
1318
DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_DIRTY);
1319
int tf = (op >> 19) & 1;
1320
int imm3 = (op >> 16) & 7;
1321
1322
if (imm3 < 6) {
1323
// Test one bit of CC. This bit decides whether none or all subregisters are copied.
1324
gpr.MapReg(MIPS_REG_VFPUCC);
1325
TST(gpr.R(MIPS_REG_VFPUCC), 1 << imm3);
1326
FixupBranch skip = B_CC(CC_NEQ);
1327
VMOV_neon(vd, vs);
1328
SetJumpTarget(skip);
1329
} else {
1330
// Look at the bottom four bits of CC to individually decide if the subregisters should be copied.
1331
// This is the nasty one! Need to expand those bits into a full NEON register somehow.
1332
DISABLE;
1333
/*
1334
gpr.MapReg(MIPS_REG_VFPUCC);
1335
for (int i = 0; i < n; i++) {
1336
TST(gpr.R(MIPS_REG_VFPUCC), 1 << i);
1337
SetCC(tf ? CC_EQ : CC_NEQ);
1338
VMOV(fpr.V(dregs[i]), fpr.V(sregs[i]));
1339
SetCC(CC_AL);
1340
}
1341
*/
1342
}
1343
1344
NEONApplyPrefixD(vd);
1345
1346
fpr.ReleaseSpillLocksAndDiscardTemps();
1347
}
1348
1349
void ArmJit::CompNEON_Viim(MIPSOpcode op) {
1350
CONDITIONAL_DISABLE(VFPU_XFER);
1351
if (js.HasUnknownPrefix()) {
1352
DISABLE;
1353
}
1354
1355
DestARMReg vt = NEONMapPrefixD(_VT, V_Single, MAP_NOINIT | MAP_DIRTY);
1356
1357
s32 imm = SignExtend16ToS32(op);
1358
// TODO: Optimize for low registers.
1359
MOVI2F(S0, (float)imm, R0);
1360
VMOV_neon(vt.rd, D0);
1361
1362
NEONApplyPrefixD(vt);
1363
fpr.ReleaseSpillLocksAndDiscardTemps();
1364
}
1365
1366
void ArmJit::CompNEON_Vfim(MIPSOpcode op) {
1367
CONDITIONAL_DISABLE(VFPU_XFER);
1368
if (js.HasUnknownPrefix()) {
1369
DISABLE;
1370
}
1371
1372
DestARMReg vt = NEONMapPrefixD(_VT, V_Single, MAP_NOINIT | MAP_DIRTY);
1373
1374
FP16 half;
1375
half.u = op & 0xFFFF;
1376
FP32 fval = half_to_float_fast5(half);
1377
// TODO: Optimize for low registers.
1378
MOVI2F(S0, (float)fval.f, R0);
1379
VMOV_neon(vt.rd, D0);
1380
1381
NEONApplyPrefixD(vt);
1382
fpr.ReleaseSpillLocksAndDiscardTemps();
1383
}
1384
1385
// https://code.google.com/p/bullet/source/browse/branches/PhysicsEffects/include/vecmath/neon/vectormath_neon_assembly_implementations.S?r=2488
1386
void ArmJit::CompNEON_VCrossQuat(MIPSOpcode op) {
1387
// This op does not support prefixes anyway.
1388
CONDITIONAL_DISABLE(VFPU_VEC);
1389
if (js.HasUnknownPrefix()) {
1390
DISABLE_UNKNOWN_PREFIX;
1391
}
1392
1393
VectorSize sz = GetVecSize(op);
1394
if (sz != V_Triple) {
1395
// Quaternion product. Bleh.
1396
DISABLE;
1397
}
1398
1399
MappedRegs r = NEONMapDirtyInIn(op, sz, sz, sz, false);
1400
1401
ARMReg t1 = Q0;
1402
ARMReg t2 = fpr.QAllocTemp(V_Triple);
1403
1404
// There has to be a faster way to do this. This is not really any better than
1405
// scalar.
1406
1407
// d18, d19 (q9) = t1 = r.vt
1408
// d16, d17 (q8) = t2 = r.vs
1409
// d20, d21 (q10) = t
1410
VMOV_neon(t1, r.vs);
1411
VMOV_neon(t2, r.vt);
1412
VTRN(F_32, D_0(t2), D_1(t2)); // vtrn.32 d18,d19 @ q9 = <x2,z2,y2,w2> = d18,d19
1413
VREV64(F_32, D_0(t1), D_0(t1)); // vrev64.32 d16,d16 @ q8 = <y1,x1,z1,w1> = d16,d17
1414
VREV64(F_32, D_0(t2), D_0(t2)); // vrev64.32 d18,d18 @ q9 = <z2,x2,y2,w2> = d18,d19
1415
VTRN(F_32, D_0(t1), D_1(t1)); // vtrn.32 d16,d17 @ q8 = <y1,z1,x1,w1> = d16,d17
1416
// perform first half of cross product using rearranged inputs
1417
VMUL(F_32, r.vd, t1, t2); // vmul.f32 q10, q8, q9 @ q10 = <y1*z2,z1*x2,x1*y2,w1*w2>
1418
// @ rearrange inputs again
1419
VTRN(F_32, D_0(t2), D_1(t2)); // vtrn.32 d18,d19 @ q9 = <z2,y2,x2,w2> = d18,d19
1420
VREV64(F_32, D_0(t1), D_0(t1)); // vrev64.32 d16,d16 @ q8 = <z1,y1,x1,w1> = d16,d17
1421
VREV64(F_32, D_0(t2), D_0(t2)); // vrev64.32 d18,d18 @ q9 = <y2,z2,x2,w2> = d18,d19
1422
VTRN(F_32, D_0(t1), D_1(t1)); // vtrn.32 d16,d17 @ q8 = <z1,x1,y1,w1> = d16,d17
1423
// @ perform last half of cross product using rearranged inputs
1424
VMLS(F_32, r.vd, t1, t2); // vmls.f32 q10, q8, q9 @ q10 = <y1*z2-y2*z1,z1*x2-z2*x1,x1*y2-x2*y1,w1*w2-w2*w1>
1425
1426
fpr.ReleaseSpillLocksAndDiscardTemps();
1427
}
1428
1429
void ArmJit::CompNEON_Vsgn(MIPSOpcode op) {
1430
DISABLE;
1431
1432
// This will be a bunch of bit magic.
1433
}
1434
1435
void ArmJit::CompNEON_Vocp(MIPSOpcode op) {
1436
CONDITIONAL_DISABLE(VFPU_VEC);
1437
if (js.HasUnknownPrefix()) {
1438
DISABLE;
1439
}
1440
1441
// TODO: Handle T prefix. Right now it uses 1.0f always.
1442
1443
// This is a hack that modifies prefixes. We eat them later, so just overwrite.
1444
// S prefix forces the negate flags.
1445
js.prefixS |= 0x000F0000;
1446
// T prefix forces constants on and regnum to 1.
1447
// That means negate still works, and abs activates a different constant.
1448
js.prefixT = (js.prefixT & ~0x000000FF) | 0x00000055 | 0x0000F000;
1449
1450
VectorSize sz = GetVecSize(op);
1451
int n = GetNumVectorElements(sz);
1452
1453
MappedRegs regs = NEONMapDirtyIn(op, sz, sz);
1454
MOVI2F_neon(Q0, 1.0f, R0);
1455
VADD(F_32, regs.vd, Q0, regs.vs);
1456
NEONApplyPrefixD(regs.vd);
1457
1458
fpr.ReleaseSpillLocksAndDiscardTemps();
1459
}
1460
1461
void ArmJit::CompNEON_ColorConv(MIPSOpcode op) {
1462
DISABLE;
1463
}
1464
1465
void ArmJit::CompNEON_Vbfy(MIPSOpcode op) {
1466
DISABLE;
1467
}
1468
1469
}
1470
// namespace MIPSComp
1471
1472
#endif // PPSSPP_ARCH(ARM)
1473
1474