CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/ARM/ArmCompVFPU.cpp
Views: 1401
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
#if PPSSPP_ARCH(ARM)
20
21
#include <cmath>
22
#include "Common/CPUDetect.h"
23
#include "Common/Data/Convert/SmallDataConvert.h"
24
#include "Common/Math/math_util.h"
25
26
#include "Core/Compatibility.h"
27
#include "Core/Config.h"
28
#include "Core/MemMap.h"
29
#include "Core/Reporting.h"
30
#include "Core/System.h"
31
#include "Core/MIPS/MIPS.h"
32
#include "Core/MIPS/MIPSTables.h"
33
#include "Core/MIPS/MIPSAnalyst.h"
34
#include "Core/MIPS/MIPSCodeUtils.h"
35
36
#include "Core/MIPS/ARM/ArmJit.h"
37
#include "Core/MIPS/ARM/ArmRegCache.h"
38
39
// Cool NEON references:
40
// http://www.delmarnorth.com/microwave/requirements/neon-test-tutorial.pdf
41
42
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
43
// Currently known non working ones should have DISABLE.
44
45
// #define CONDITIONAL_DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
46
#define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
47
#define DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
48
49
#define NEON_IF_AVAILABLE(func) { if (jo.useNEONVFPU) { func(op); return; } }
50
#define _RS MIPS_GET_RS(op)
51
#define _RT MIPS_GET_RT(op)
52
#define _RD MIPS_GET_RD(op)
53
#define _FS MIPS_GET_FS(op)
54
#define _FT MIPS_GET_FT(op)
55
#define _FD MIPS_GET_FD(op)
56
#define _SA MIPS_GET_SA(op)
57
#define _POS ((op>> 6) & 0x1F)
58
#define _SIZE ((op>>11) & 0x1F)
59
#define _IMM16 (signed short)(op & 0xFFFF)
60
#define _IMM26 (op & 0x03FFFFFF)
61
62
namespace MIPSComp
63
{
64
using namespace ArmGen;
65
using namespace ArmJitConstants;
66
67
// Vector regs can overlap in all sorts of swizzled ways.
68
// This does allow a single overlap in sregs[i].
69
static bool IsOverlapSafeAllowS(int dreg, int di, int sn, u8 sregs[], int tn = 0, u8 tregs[] = NULL)
70
{
71
for (int i = 0; i < sn; ++i)
72
{
73
if (sregs[i] == dreg && i != di)
74
return false;
75
}
76
for (int i = 0; i < tn; ++i)
77
{
78
if (tregs[i] == dreg)
79
return false;
80
}
81
82
// Hurray, no overlap, we can write directly.
83
return true;
84
}
85
86
static bool IsOverlapSafe(int dreg, int di, int sn, u8 sregs[], int tn = 0, u8 tregs[] = NULL)
87
{
88
return IsOverlapSafeAllowS(dreg, di, sn, sregs, tn, tregs) && sregs[di] != dreg;
89
}
90
91
void ArmJit::Comp_VPFX(MIPSOpcode op)
92
{
93
CONDITIONAL_DISABLE(VFPU_XFER);
94
int data = op & 0xFFFFF;
95
int regnum = (op >> 24) & 3;
96
switch (regnum) {
97
case 0: // S
98
js.prefixS = data;
99
js.prefixSFlag = JitState::PREFIX_KNOWN_DIRTY;
100
break;
101
case 1: // T
102
js.prefixT = data;
103
js.prefixTFlag = JitState::PREFIX_KNOWN_DIRTY;
104
break;
105
case 2: // D
106
js.prefixD = data & 0x00000FFF;
107
js.prefixDFlag = JitState::PREFIX_KNOWN_DIRTY;
108
break;
109
default:
110
ERROR_LOG(Log::CPU, "VPFX - bad regnum %i : data=%08x", regnum, data);
111
break;
112
}
113
}
114
115
void ArmJit::ApplyPrefixST(u8 *vregs, u32 prefix, VectorSize sz) {
116
if (prefix == 0xE4)
117
return;
118
119
int n = GetNumVectorElements(sz);
120
u8 origV[4];
121
static const float constantArray[8] = {0.f, 1.f, 2.f, 0.5f, 3.f, 1.f/3.f, 0.25f, 1.f/6.f};
122
123
for (int i = 0; i < n; i++)
124
origV[i] = vregs[i];
125
126
for (int i = 0; i < n; i++) {
127
int regnum = (prefix >> (i*2)) & 3;
128
int abs = (prefix >> (8+i)) & 1;
129
int negate = (prefix >> (16+i)) & 1;
130
int constants = (prefix >> (12+i)) & 1;
131
132
// Unchanged, hurray.
133
if (!constants && regnum == i && !abs && !negate)
134
continue;
135
136
// This puts the value into a temp reg, so we won't write the modified value back.
137
vregs[i] = fpr.GetTempV();
138
if (!constants) {
139
fpr.MapDirtyInV(vregs[i], origV[regnum]);
140
fpr.SpillLockV(vregs[i]);
141
142
// Prefix may say "z, z, z, z" but if this is a pair, we force to x.
143
// TODO: But some ops seem to use const 0 instead?
144
if (regnum >= n) {
145
WARN_LOG(Log::CPU, "JIT: Invalid VFPU swizzle: %08x : %d / %d at PC = %08x (%s)", prefix, regnum, n, GetCompilerPC(), MIPSDisasmAt(GetCompilerPC()).c_str());
146
regnum = 0;
147
}
148
149
if (abs) {
150
VABS(fpr.V(vregs[i]), fpr.V(origV[regnum]));
151
if (negate)
152
VNEG(fpr.V(vregs[i]), fpr.V(vregs[i]));
153
} else {
154
if (negate)
155
VNEG(fpr.V(vregs[i]), fpr.V(origV[regnum]));
156
else
157
VMOV(fpr.V(vregs[i]), fpr.V(origV[regnum]));
158
}
159
} else {
160
fpr.MapRegV(vregs[i], MAP_DIRTY | MAP_NOINIT);
161
fpr.SpillLockV(vregs[i]);
162
MOVI2F(fpr.V(vregs[i]), constantArray[regnum + (abs<<2)], SCRATCHREG1, negate != 0);
163
}
164
}
165
}
166
167
void ArmJit::GetVectorRegsPrefixD(u8 *regs, VectorSize sz, int vectorReg) {
168
_assert_(js.prefixDFlag & JitState::PREFIX_KNOWN);
169
170
GetVectorRegs(regs, sz, vectorReg);
171
if (js.prefixD == 0)
172
return;
173
174
int n = GetNumVectorElements(sz);
175
for (int i = 0; i < n; i++) {
176
// Hopefully this is rare, we'll just write it into a reg we drop.
177
if (js.VfpuWriteMask(i))
178
regs[i] = fpr.GetTempV();
179
}
180
}
181
182
void ArmJit::ApplyPrefixD(const u8 *vregs, VectorSize sz) {
183
_assert_(js.prefixDFlag & JitState::PREFIX_KNOWN);
184
if (!js.prefixD)
185
return;
186
187
int n = GetNumVectorElements(sz);
188
for (int i = 0; i < n; i++) {
189
if (js.VfpuWriteMask(i))
190
continue;
191
192
int sat = (js.prefixD >> (i * 2)) & 3;
193
if (sat == 1) {
194
// clamped = x < 0 ? (x > 1 ? 1 : x) : x [0, 1]
195
fpr.MapRegV(vregs[i], MAP_DIRTY);
196
197
MOVI2F(S0, 0.0f, SCRATCHREG1);
198
MOVI2F(S1, 1.0f, SCRATCHREG1);
199
VCMP(fpr.V(vregs[i]), S0);
200
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
201
SetCC(CC_LS);
202
VMOV(fpr.V(vregs[i]), S0);
203
SetCC(CC_AL);
204
VCMP(fpr.V(vregs[i]), S1);
205
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
206
SetCC(CC_GT);
207
VMOV(fpr.V(vregs[i]), S1);
208
SetCC(CC_AL);
209
} else if (sat == 3) {
210
// clamped = x < -1 ? (x > 1 ? 1 : x) : x [-1, 1]
211
fpr.MapRegV(vregs[i], MAP_DIRTY);
212
213
MOVI2F(S0, -1.0f, SCRATCHREG1);
214
MOVI2F(S1, 1.0f, SCRATCHREG1);
215
VCMP(fpr.V(vregs[i]), S0);
216
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
217
SetCC(CC_LO);
218
VMOV(fpr.V(vregs[i]), S0);
219
SetCC(CC_AL);
220
VCMP(fpr.V(vregs[i]), S1);
221
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
222
SetCC(CC_GT);
223
VMOV(fpr.V(vregs[i]), S1);
224
SetCC(CC_AL);
225
}
226
}
227
}
228
229
void ArmJit::Comp_SV(MIPSOpcode op) {
230
NEON_IF_AVAILABLE(CompNEON_SV);
231
CONDITIONAL_DISABLE(LSU_VFPU);
232
CheckMemoryBreakpoint();
233
234
s32 offset = (signed short)(op & 0xFFFC);
235
int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5);
236
MIPSGPReg rs = _RS;
237
238
bool doCheck = false;
239
switch (op >> 26)
240
{
241
case 50: //lv.s // VI(vt) = Memory::Read_U32(addr);
242
{
243
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
244
gpr.MapRegAsPointer(rs);
245
fpr.MapRegV(vt, MAP_NOINIT | MAP_DIRTY);
246
VLDR(fpr.V(vt), gpr.RPtr(rs), offset);
247
break;
248
}
249
250
// CC might be set by slow path below, so load regs first.
251
fpr.MapRegV(vt, MAP_DIRTY | MAP_NOINIT);
252
if (gpr.IsImm(rs)) {
253
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
254
gpr.SetRegImm(R0, addr + (u32)Memory::base);
255
} else {
256
gpr.MapReg(rs);
257
if (g_Config.bFastMemory) {
258
SetR0ToEffectiveAddress(rs, offset);
259
} else {
260
SetCCAndR0ForSafeAddress(rs, offset, SCRATCHREG2);
261
doCheck = true;
262
}
263
ADD(R0, R0, MEMBASEREG);
264
}
265
#ifdef __ARM_ARCH_7S__
266
FixupBranch skip;
267
if (doCheck) {
268
skip = B_CC(CC_EQ);
269
}
270
VLDR(fpr.V(vt), R0, 0);
271
if (doCheck) {
272
SetJumpTarget(skip);
273
SetCC(CC_AL);
274
}
275
#else
276
VLDR(fpr.V(vt), R0, 0);
277
if (doCheck) {
278
SetCC(CC_EQ);
279
MOVI2F(fpr.V(vt), 0.0f, SCRATCHREG1);
280
SetCC(CC_AL);
281
}
282
#endif
283
}
284
break;
285
286
case 58: //sv.s // Memory::Write_U32(VI(vt), addr);
287
{
288
if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
289
gpr.MapRegAsPointer(rs);
290
fpr.MapRegV(vt, 0);
291
VSTR(fpr.V(vt), gpr.RPtr(rs), offset);
292
break;
293
}
294
295
// CC might be set by slow path below, so load regs first.
296
fpr.MapRegV(vt);
297
if (gpr.IsImm(rs)) {
298
u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
299
gpr.SetRegImm(R0, addr + (u32)Memory::base);
300
} else {
301
gpr.MapReg(rs);
302
if (g_Config.bFastMemory) {
303
SetR0ToEffectiveAddress(rs, offset);
304
} else {
305
SetCCAndR0ForSafeAddress(rs, offset, SCRATCHREG2);
306
doCheck = true;
307
}
308
ADD(R0, R0, MEMBASEREG);
309
}
310
#ifdef __ARM_ARCH_7S__
311
FixupBranch skip;
312
if (doCheck) {
313
skip = B_CC(CC_EQ);
314
}
315
VSTR(fpr.V(vt), R0, 0);
316
if (doCheck) {
317
SetJumpTarget(skip);
318
SetCC(CC_AL);
319
}
320
#else
321
VSTR(fpr.V(vt), R0, 0);
322
if (doCheck) {
323
SetCC(CC_AL);
324
}
325
#endif
326
}
327
break;
328
329
330
default:
331
DISABLE;
332
}
333
}
334
335
void ArmJit::Comp_SVQ(MIPSOpcode op)
336
{
337
NEON_IF_AVAILABLE(CompNEON_SVQ);
338
CONDITIONAL_DISABLE(LSU_VFPU);
339
CheckMemoryBreakpoint();
340
341
int imm = (signed short)(op&0xFFFC);
342
int vt = (((op >> 16) & 0x1f)) | ((op&1) << 5);
343
MIPSGPReg rs = _RS;
344
345
bool doCheck = false;
346
switch (op >> 26)
347
{
348
case 54: //lv.q
349
{
350
// CC might be set by slow path below, so load regs first.
351
u8 vregs[4];
352
GetVectorRegs(vregs, V_Quad, vt);
353
fpr.MapRegsAndSpillLockV(vregs, V_Quad, MAP_DIRTY | MAP_NOINIT);
354
355
if (gpr.IsImm(rs)) {
356
u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF;
357
gpr.SetRegImm(R0, addr + (u32)Memory::base);
358
} else {
359
gpr.MapReg(rs);
360
if (g_Config.bFastMemory) {
361
SetR0ToEffectiveAddress(rs, imm);
362
} else {
363
SetCCAndR0ForSafeAddress(rs, imm, SCRATCHREG2);
364
doCheck = true;
365
}
366
ADD(R0, R0, MEMBASEREG);
367
}
368
369
#ifdef __ARM_ARCH_7S__
370
FixupBranch skip;
371
if (doCheck) {
372
skip = B_CC(CC_EQ);
373
}
374
375
bool consecutive = true;
376
for (int i = 0; i < 3 && consecutive; i++)
377
if ((fpr.V(vregs[i]) + 1) != fpr.V(vregs[i+1]))
378
consecutive = false;
379
if (consecutive) {
380
VLDMIA(R0, false, fpr.V(vregs[0]), 4);
381
} else {
382
for (int i = 0; i < 4; i++)
383
VLDR(fpr.V(vregs[i]), R0, i * 4);
384
}
385
386
if (doCheck) {
387
SetJumpTarget(skip);
388
SetCC(CC_AL);
389
}
390
#else
391
bool consecutive = true;
392
for (int i = 0; i < 3 && consecutive; i++)
393
if ((fpr.V(vregs[i]) + 1) != fpr.V(vregs[i+1]))
394
consecutive = false;
395
if (consecutive) {
396
VLDMIA(R0, false, fpr.V(vregs[0]), 4);
397
} else {
398
for (int i = 0; i < 4; i++)
399
VLDR(fpr.V(vregs[i]), R0, i * 4);
400
}
401
402
if (doCheck) {
403
SetCC(CC_EQ);
404
MOVI2R(SCRATCHREG1, 0);
405
for (int i = 0; i < 4; i++)
406
VMOV(fpr.V(vregs[i]), SCRATCHREG1);
407
SetCC(CC_AL);
408
}
409
#endif
410
}
411
break;
412
413
case 62: //sv.q
414
{
415
// CC might be set by slow path below, so load regs first.
416
u8 vregs[4];
417
GetVectorRegs(vregs, V_Quad, vt);
418
fpr.MapRegsAndSpillLockV(vregs, V_Quad, 0);
419
420
if (gpr.IsImm(rs)) {
421
u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF;
422
gpr.SetRegImm(R0, addr + (u32)Memory::base);
423
} else {
424
gpr.MapReg(rs);
425
if (g_Config.bFastMemory) {
426
SetR0ToEffectiveAddress(rs, imm);
427
} else {
428
SetCCAndR0ForSafeAddress(rs, imm, SCRATCHREG2);
429
doCheck = true;
430
}
431
ADD(R0, R0, MEMBASEREG);
432
}
433
434
#ifdef __ARM_ARCH_7S__
435
FixupBranch skip;
436
if (doCheck) {
437
skip = B_CC(CC_EQ);
438
}
439
440
bool consecutive = true;
441
for (int i = 0; i < 3 && consecutive; i++)
442
if ((fpr.V(vregs[i]) + 1) != fpr.V(vregs[i+1]))
443
consecutive = false;
444
if (consecutive) {
445
VSTMIA(R0, false, fpr.V(vregs[0]), 4);
446
} else {
447
for (int i = 0; i < 4; i++)
448
VSTR(fpr.V(vregs[i]), R0, i * 4);
449
}
450
451
if (doCheck) {
452
SetJumpTarget(skip);
453
SetCC(CC_AL);
454
}
455
#else
456
bool consecutive = true;
457
for (int i = 0; i < 3 && consecutive; i++)
458
if ((fpr.V(vregs[i]) + 1) != fpr.V(vregs[i+1]))
459
consecutive = false;
460
if (consecutive) {
461
VSTMIA(R0, false, fpr.V(vregs[0]), 4);
462
} else {
463
for (int i = 0; i < 4; i++)
464
VSTR(fpr.V(vregs[i]), R0, i * 4);
465
}
466
467
if (doCheck) {
468
SetCC(CC_AL);
469
}
470
#endif
471
}
472
break;
473
474
default:
475
DISABLE;
476
break;
477
}
478
fpr.ReleaseSpillLocksAndDiscardTemps();
479
}
480
481
void ArmJit::Comp_VVectorInit(MIPSOpcode op)
482
{
483
NEON_IF_AVAILABLE(CompNEON_VVectorInit);
484
CONDITIONAL_DISABLE(VFPU_XFER);
485
// WARNING: No prefix support!
486
if (js.HasUnknownPrefix()) {
487
DISABLE;
488
}
489
490
switch ((op >> 16) & 0xF)
491
{
492
case 6: // v=zeros; break; //vzero
493
MOVI2F(S0, 0.0f, SCRATCHREG1);
494
break;
495
case 7: // v=ones; break; //vone
496
MOVI2F(S0, 1.0f, SCRATCHREG1);
497
break;
498
default:
499
DISABLE;
500
break;
501
}
502
503
VectorSize sz = GetVecSize(op);
504
int n = GetNumVectorElements(sz);
505
506
u8 dregs[4];
507
GetVectorRegsPrefixD(dregs, sz, _VD);
508
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_NOINIT | MAP_DIRTY);
509
510
for (int i = 0; i < n; ++i)
511
VMOV(fpr.V(dregs[i]), S0);
512
513
ApplyPrefixD(dregs, sz);
514
515
fpr.ReleaseSpillLocksAndDiscardTemps();
516
}
517
518
void ArmJit::Comp_VIdt(MIPSOpcode op) {
519
NEON_IF_AVAILABLE(CompNEON_VIdt);
520
521
CONDITIONAL_DISABLE(VFPU_XFER);
522
if (js.HasUnknownPrefix()) {
523
DISABLE;
524
}
525
526
int vd = _VD;
527
VectorSize sz = GetVecSize(op);
528
int n = GetNumVectorElements(sz);
529
MOVI2F(S0, 0.0f, SCRATCHREG1);
530
MOVI2F(S1, 1.0f, SCRATCHREG1);
531
u8 dregs[4];
532
GetVectorRegsPrefixD(dregs, sz, _VD);
533
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_NOINIT | MAP_DIRTY);
534
switch (sz)
535
{
536
case V_Pair:
537
VMOV(fpr.V(dregs[0]), (vd&1)==0 ? S1 : S0);
538
VMOV(fpr.V(dregs[1]), (vd&1)==1 ? S1 : S0);
539
break;
540
case V_Quad:
541
VMOV(fpr.V(dregs[0]), (vd&3)==0 ? S1 : S0);
542
VMOV(fpr.V(dregs[1]), (vd&3)==1 ? S1 : S0);
543
VMOV(fpr.V(dregs[2]), (vd&3)==2 ? S1 : S0);
544
VMOV(fpr.V(dregs[3]), (vd&3)==3 ? S1 : S0);
545
break;
546
default:
547
_dbg_assert_msg_(false,"Trying to interpret instruction that can't be interpreted");
548
break;
549
}
550
551
ApplyPrefixD(dregs, sz);
552
553
fpr.ReleaseSpillLocksAndDiscardTemps();
554
}
555
556
void ArmJit::Comp_VMatrixInit(MIPSOpcode op)
557
{
558
NEON_IF_AVAILABLE(CompNEON_VMatrixInit);
559
CONDITIONAL_DISABLE(VFPU_XFER);
560
if (js.HasUnknownPrefix()) {
561
// Don't think matrix init ops care about prefixes.
562
// DISABLE;
563
}
564
565
MatrixSize sz = GetMtxSize(op);
566
int n = GetMatrixSide(sz);
567
568
u8 dregs[16];
569
GetMatrixRegs(dregs, sz, _VD);
570
571
switch ((op >> 16) & 0xF) {
572
case 3: // vmidt
573
MOVI2F(S0, 0.0f, SCRATCHREG1);
574
MOVI2F(S1, 1.0f, SCRATCHREG1);
575
for (int a = 0; a < n; a++) {
576
for (int b = 0; b < n; b++) {
577
fpr.MapRegV(dregs[a * 4 + b], MAP_DIRTY | MAP_NOINIT);
578
VMOV(fpr.V(dregs[a * 4 + b]), a == b ? S1 : S0);
579
}
580
}
581
break;
582
case 6: // vmzero
583
MOVI2F(S0, 0.0f, SCRATCHREG1);
584
for (int a = 0; a < n; a++) {
585
for (int b = 0; b < n; b++) {
586
fpr.MapRegV(dregs[a * 4 + b], MAP_DIRTY | MAP_NOINIT);
587
VMOV(fpr.V(dregs[a * 4 + b]), S0);
588
}
589
}
590
break;
591
case 7: // vmone
592
MOVI2F(S1, 1.0f, SCRATCHREG1);
593
for (int a = 0; a < n; a++) {
594
for (int b = 0; b < n; b++) {
595
fpr.MapRegV(dregs[a * 4 + b], MAP_DIRTY | MAP_NOINIT);
596
VMOV(fpr.V(dregs[a * 4 + b]), S1);
597
}
598
}
599
break;
600
}
601
602
fpr.ReleaseSpillLocksAndDiscardTemps();
603
}
604
605
void ArmJit::Comp_VHdp(MIPSOpcode op) {
606
NEON_IF_AVAILABLE(CompNEON_VHdp);
607
CONDITIONAL_DISABLE(VFPU_VEC);
608
if (js.HasUnknownPrefix()) {
609
DISABLE;
610
}
611
612
int vd = _VD;
613
int vs = _VS;
614
int vt = _VT;
615
VectorSize sz = GetVecSize(op);
616
617
// TODO: Force read one of them into regs? probably not.
618
u8 sregs[4], tregs[4], dregs[1];
619
GetVectorRegsPrefixS(sregs, sz, vs);
620
GetVectorRegsPrefixT(tregs, sz, vt);
621
GetVectorRegsPrefixD(dregs, V_Single, vd);
622
623
// TODO: applyprefixST here somehow (shuffle, etc...)
624
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
625
fpr.MapRegsAndSpillLockV(tregs, sz, 0);
626
VMUL(S0, fpr.V(sregs[0]), fpr.V(tregs[0]));
627
628
int n = GetNumVectorElements(sz);
629
for (int i = 1; i < n; i++) {
630
// sum += s[i]*t[i];
631
if (i == n - 1) {
632
VADD(S0, S0, fpr.V(tregs[i]));
633
} else {
634
VMLA(S0, fpr.V(sregs[i]), fpr.V(tregs[i]));
635
}
636
}
637
fpr.ReleaseSpillLocksAndDiscardTemps();
638
639
fpr.MapRegV(dregs[0], MAP_NOINIT | MAP_DIRTY);
640
641
VMOV(fpr.V(dregs[0]), S0);
642
ApplyPrefixD(dregs, V_Single);
643
fpr.ReleaseSpillLocksAndDiscardTemps();
644
}
645
646
alignas(16) static const float vavg_table[4] = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
647
648
void ArmJit::Comp_Vhoriz(MIPSOpcode op) {
649
NEON_IF_AVAILABLE(CompNEON_Vhoriz);
650
CONDITIONAL_DISABLE(VFPU_VEC);
651
if (js.HasUnknownPrefix()) {
652
DISABLE;
653
}
654
655
int vd = _VD;
656
int vs = _VS;
657
int vt = _VT;
658
VectorSize sz = GetVecSize(op);
659
660
// TODO: Force read one of them into regs? probably not.
661
u8 sregs[4], dregs[1];
662
GetVectorRegsPrefixS(sregs, sz, vs);
663
GetVectorRegsPrefixD(dregs, V_Single, vd);
664
665
// TODO: applyprefixST here somehow (shuffle, etc...)
666
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
667
668
int n = GetNumVectorElements(sz);
669
670
bool is_vavg = ((op >> 16) & 0x1f) == 7;
671
if (is_vavg) {
672
MOVI2F(S1, vavg_table[n - 1], R0);
673
}
674
// Have to start at +0.000 for the correct sign.
675
MOVI2F(S0, 0.0f, SCRATCHREG1);
676
for (int i = 0; i < n; i++) {
677
// sum += s[i];
678
VADD(S0, S0, fpr.V(sregs[i]));
679
}
680
681
fpr.MapRegV(dregs[0], MAP_NOINIT | MAP_DIRTY);
682
if (is_vavg) {
683
VMUL(fpr.V(dregs[0]), S0, S1);
684
} else {
685
VMOV(fpr.V(dregs[0]), S0);
686
}
687
ApplyPrefixD(dregs, V_Single);
688
fpr.ReleaseSpillLocksAndDiscardTemps();
689
}
690
691
void ArmJit::Comp_VDot(MIPSOpcode op) {
692
NEON_IF_AVAILABLE(CompNEON_VDot);
693
CONDITIONAL_DISABLE(VFPU_VEC);
694
if (js.HasUnknownPrefix()) {
695
DISABLE;
696
}
697
698
int vd = _VD;
699
int vs = _VS;
700
int vt = _VT;
701
VectorSize sz = GetVecSize(op);
702
703
// TODO: Force read one of them into regs? probably not.
704
u8 sregs[4], tregs[4], dregs[1];
705
GetVectorRegsPrefixS(sregs, sz, vs);
706
GetVectorRegsPrefixT(tregs, sz, vt);
707
GetVectorRegsPrefixD(dregs, V_Single, vd);
708
709
// TODO: applyprefixST here somehow (shuffle, etc...)
710
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
711
fpr.MapRegsAndSpillLockV(tregs, sz, 0);
712
VMUL(S0, fpr.V(sregs[0]), fpr.V(tregs[0]));
713
714
int n = GetNumVectorElements(sz);
715
for (int i = 1; i < n; i++) {
716
// sum += s[i]*t[i];
717
VMLA(S0, fpr.V(sregs[i]), fpr.V(tregs[i]));
718
}
719
fpr.ReleaseSpillLocksAndDiscardTemps();
720
721
fpr.MapRegV(dregs[0], MAP_NOINIT | MAP_DIRTY);
722
723
VMOV(fpr.V(dregs[0]), S0);
724
ApplyPrefixD(dregs, V_Single);
725
fpr.ReleaseSpillLocksAndDiscardTemps();
726
}
727
728
void ArmJit::Comp_VecDo3(MIPSOpcode op) {
729
NEON_IF_AVAILABLE(CompNEON_VecDo3);
730
CONDITIONAL_DISABLE(VFPU_VEC);
731
if (js.HasUnknownPrefix()) {
732
DISABLE;
733
}
734
735
int vd = _VD;
736
int vs = _VS;
737
int vt = _VT;
738
739
VectorSize sz = GetVecSize(op);
740
int n = GetNumVectorElements(sz);
741
742
u8 sregs[4], tregs[4], dregs[4];
743
GetVectorRegsPrefixS(sregs, sz, _VS);
744
GetVectorRegsPrefixT(tregs, sz, _VT);
745
GetVectorRegsPrefixD(dregs, sz, _VD);
746
747
MIPSReg tempregs[4];
748
for (int i = 0; i < n; i++) {
749
if (!IsOverlapSafe(dregs[i], i, n, sregs, n, tregs)) {
750
tempregs[i] = fpr.GetTempV();
751
} else {
752
tempregs[i] = dregs[i];
753
}
754
}
755
756
// Map first, then work. This will allow us to use VLDMIA more often
757
// (when we add the appropriate map function) and the instruction ordering
758
// will improve.
759
// Note that mapping like this (instead of first all sregs, first all tregs etc)
760
// reduces the amount of continuous registers a lot :(
761
for (int i = 0; i < n; i++) {
762
fpr.MapDirtyInInV(tempregs[i], sregs[i], tregs[i]);
763
fpr.SpillLockV(tempregs[i]);
764
fpr.SpillLockV(sregs[i]);
765
fpr.SpillLockV(tregs[i]);
766
}
767
768
for (int i = 0; i < n; i++) {
769
switch (op >> 26) {
770
case 24: //VFPU0
771
switch ((op >> 23)&7) {
772
case 0: // d[i] = s[i] + t[i]; break; //vadd
773
VADD(fpr.V(tempregs[i]), fpr.V(sregs[i]), fpr.V(tregs[i]));
774
break;
775
case 1: // d[i] = s[i] - t[i]; break; //vsub
776
VSUB(fpr.V(tempregs[i]), fpr.V(sregs[i]), fpr.V(tregs[i]));
777
break;
778
case 7: // d[i] = s[i] / t[i]; break; //vdiv
779
VDIV(fpr.V(tempregs[i]), fpr.V(sregs[i]), fpr.V(tregs[i]));
780
break;
781
default:
782
DISABLE;
783
}
784
break;
785
case 25: //VFPU1
786
switch ((op >> 23) & 7) {
787
case 0: // d[i] = s[i] * t[i]; break; //vmul
788
VMUL(fpr.V(tempregs[i]), fpr.V(sregs[i]), fpr.V(tregs[i]));
789
break;
790
default:
791
DISABLE;
792
}
793
break;
794
// Unfortunately there is no VMIN/VMAX on ARM without NEON.
795
case 27: //VFPU3
796
switch ((op >> 23) & 7) {
797
case 2: // vmin
798
{
799
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
800
VMRS_APSR();
801
FixupBranch skipNAN = B_CC(CC_VC);
802
VMOV(SCRATCHREG1, fpr.V(sregs[i]));
803
VMOV(SCRATCHREG2, fpr.V(tregs[i]));
804
// If both are negative, we reverse the comparison. We want the highest mantissa then.
805
// Also, between -NAN and -5.0, we want -NAN to be less.
806
TST(SCRATCHREG1, SCRATCHREG2);
807
FixupBranch cmpPositive = B_CC(CC_PL);
808
CMP(SCRATCHREG2, SCRATCHREG1);
809
FixupBranch skipPositive = B();
810
SetJumpTarget(cmpPositive);
811
CMP(SCRATCHREG1, SCRATCHREG2);
812
SetJumpTarget(skipPositive);
813
SetCC(CC_AL);
814
SetJumpTarget(skipNAN);
815
SetCC(CC_LT);
816
VMOV(fpr.V(tempregs[i]), fpr.V(sregs[i]));
817
SetCC(CC_GE);
818
VMOV(fpr.V(tempregs[i]), fpr.V(tregs[i]));
819
SetCC(CC_AL);
820
break;
821
}
822
case 3: // vmax
823
{
824
VCMP(fpr.V(tregs[i]), fpr.V(sregs[i]));
825
VMRS_APSR();
826
FixupBranch skipNAN = B_CC(CC_VC);
827
VMOV(SCRATCHREG1, fpr.V(sregs[i]));
828
VMOV(SCRATCHREG2, fpr.V(tregs[i]));
829
// If both are negative, we reverse the comparison. We want the lowest mantissa then.
830
// Also, between -NAN and -5.0, we want -5.0 to be greater.
831
TST(SCRATCHREG2, SCRATCHREG1);
832
FixupBranch cmpPositive = B_CC(CC_PL);
833
CMP(SCRATCHREG1, SCRATCHREG2);
834
FixupBranch skipPositive = B();
835
SetJumpTarget(cmpPositive);
836
CMP(SCRATCHREG2, SCRATCHREG1);
837
SetJumpTarget(skipPositive);
838
SetCC(CC_AL);
839
SetJumpTarget(skipNAN);
840
SetCC(CC_LT);
841
VMOV(fpr.V(tempregs[i]), fpr.V(sregs[i]));
842
SetCC(CC_GE);
843
VMOV(fpr.V(tempregs[i]), fpr.V(tregs[i]));
844
SetCC(CC_AL);
845
break;
846
}
847
case 6: // vsge
848
DISABLE; // pending testing
849
VCMP(fpr.V(tregs[i]), fpr.V(sregs[i]));
850
VMRS_APSR();
851
// Unordered is always 0.
852
SetCC(CC_GE);
853
MOVI2F(fpr.V(tempregs[i]), 1.0f, SCRATCHREG1);
854
SetCC(CC_LT);
855
MOVI2F(fpr.V(tempregs[i]), 0.0f, SCRATCHREG1);
856
SetCC(CC_AL);
857
break;
858
case 7: // vslt
859
DISABLE; // pending testing
860
VCMP(fpr.V(tregs[i]), fpr.V(sregs[i]));
861
VMRS_APSR();
862
// Unordered is always 0.
863
SetCC(CC_LO);
864
MOVI2F(fpr.V(tempregs[i]), 1.0f, SCRATCHREG1);
865
SetCC(CC_HS);
866
MOVI2F(fpr.V(tempregs[i]), 0.0f, SCRATCHREG1);
867
SetCC(CC_AL);
868
break;
869
}
870
break;
871
872
default:
873
DISABLE;
874
}
875
}
876
877
for (int i = 0; i < n; i++) {
878
if (dregs[i] != tempregs[i]) {
879
fpr.MapDirtyInV(dregs[i], tempregs[i]);
880
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
881
}
882
}
883
ApplyPrefixD(dregs, sz);
884
885
fpr.ReleaseSpillLocksAndDiscardTemps();
886
}
887
888
void ArmJit::Comp_VV2Op(MIPSOpcode op) {
889
NEON_IF_AVAILABLE(CompNEON_VV2Op);
890
CONDITIONAL_DISABLE(VFPU_VEC);
891
if (js.HasUnknownPrefix()) {
892
DISABLE;
893
}
894
895
// Pre-processing: Eliminate silly no-op VMOVs, common in Wipeout Pure
896
if (((op >> 16) & 0x1f) == 0 && _VS == _VD && js.HasNoPrefix()) {
897
return;
898
}
899
900
// Catch the disabled operations immediately so we don't map registers unnecessarily later.
901
// Move these down to the big switch below as they are implemented.
902
switch ((op >> 16) & 0x1f) {
903
case 18: // d[i] = sinf((float)M_PI_2 * s[i]); break; //vsin
904
DISABLE;
905
break;
906
case 19: // d[i] = cosf((float)M_PI_2 * s[i]); break; //vcos
907
DISABLE;
908
break;
909
case 20: // d[i] = powf(2.0f, s[i]); break; //vexp2
910
DISABLE;
911
break;
912
case 21: // d[i] = logf(s[i])/log(2.0f); break; //vlog2
913
DISABLE;
914
break;
915
case 26: // d[i] = -sinf((float)M_PI_2 * s[i]); break; // vnsin
916
DISABLE;
917
break;
918
case 28: // d[i] = 1.0f / expf(s[i] * (float)M_LOG2E); break; // vrexp2
919
DISABLE;
920
break;
921
default:
922
;
923
}
924
925
VectorSize sz = GetVecSize(op);
926
int n = GetNumVectorElements(sz);
927
928
u8 sregs[4], dregs[4];
929
GetVectorRegsPrefixS(sregs, sz, _VS);
930
GetVectorRegsPrefixD(dregs, sz, _VD);
931
932
MIPSReg tempregs[4];
933
for (int i = 0; i < n; ++i) {
934
if (!IsOverlapSafe(dregs[i], i, n, sregs)) {
935
tempregs[i] = fpr.GetTempV();
936
} else {
937
tempregs[i] = dregs[i];
938
}
939
}
940
941
// Get some extra temps, used by vasin only.
942
ARMReg t2 = INVALID_REG, t3 = INVALID_REG, t4 = INVALID_REG;
943
if (((op >> 16) & 0x1f) == 23) {
944
// Only get here on vasin.
945
int t[3] = { fpr.GetTempV(), fpr.GetTempV(), fpr.GetTempV() };
946
fpr.MapRegV(t[0], MAP_NOINIT);
947
fpr.MapRegV(t[1], MAP_NOINIT);
948
fpr.MapRegV(t[2], MAP_NOINIT);
949
t2 = fpr.V(t[0]);
950
t3 = fpr.V(t[1]);
951
t4 = fpr.V(t[2]);
952
}
953
954
// Pre map the registers to get better instruction ordering.
955
// Note that mapping like this (instead of first all sregs, first all tempregs etc)
956
// reduces the amount of continuous registers a lot :(
957
for (int i = 0; i < n; i++) {
958
fpr.MapDirtyInV(tempregs[i], sregs[i]);
959
fpr.SpillLockV(tempregs[i]);
960
fpr.SpillLockV(sregs[i]);
961
}
962
963
// Warning: sregs[i] and tempxregs[i] may be the same reg.
964
// Helps for vmov, hurts for vrcp, etc.
965
for (int i = 0; i < n; i++) {
966
switch ((op >> 16) & 0x1f) {
967
case 0: // d[i] = s[i]; break; //vmov
968
// Probably for swizzle.
969
VMOV(fpr.V(tempregs[i]), fpr.V(sregs[i]));
970
break;
971
case 1: // d[i] = fabsf(s[i]); break; //vabs
972
VABS(fpr.V(tempregs[i]), fpr.V(sregs[i]));
973
break;
974
case 2: // d[i] = -s[i]; break; //vneg
975
VNEG(fpr.V(tempregs[i]), fpr.V(sregs[i]));
976
break;
977
case 4: // if (s[i] < 0) d[i] = 0; else {if(s[i] > 1.0f) d[i] = 1.0f; else d[i] = s[i];} break; // vsat0
978
if (i == 0) {
979
MOVI2F(S0, 0.0f, SCRATCHREG1);
980
MOVI2F(S1, 1.0f, SCRATCHREG1);
981
}
982
VCMP(fpr.V(sregs[i]), S0);
983
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
984
VMOV(fpr.V(tempregs[i]), fpr.V(sregs[i]));
985
SetCC(CC_LS);
986
VMOV(fpr.V(tempregs[i]), S0);
987
SetCC(CC_AL);
988
VCMP(fpr.V(sregs[i]), S1);
989
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
990
SetCC(CC_GT);
991
VMOV(fpr.V(tempregs[i]), S1);
992
SetCC(CC_AL);
993
break;
994
case 5: // if (s[i] < -1.0f) d[i] = -1.0f; else {if(s[i] > 1.0f) d[i] = 1.0f; else d[i] = s[i];} break; // vsat1
995
if (i == 0) {
996
MOVI2F(S0, -1.0f, SCRATCHREG1);
997
MOVI2F(S1, 1.0f, SCRATCHREG1);
998
}
999
VCMP(fpr.V(sregs[i]), S0);
1000
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
1001
VMOV(fpr.V(tempregs[i]), fpr.V(sregs[i]));
1002
SetCC(CC_LO);
1003
VMOV(fpr.V(tempregs[i]), S0);
1004
SetCC(CC_AL);
1005
VCMP(fpr.V(sregs[i]), S1);
1006
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
1007
SetCC(CC_GT);
1008
VMOV(fpr.V(tempregs[i]), S1);
1009
SetCC(CC_AL);
1010
break;
1011
case 16: // d[i] = 1.0f / s[i]; break; //vrcp
1012
if (i == 0) {
1013
MOVI2F(S0, 1.0f, SCRATCHREG1);
1014
}
1015
VDIV(fpr.V(tempregs[i]), S0, fpr.V(sregs[i]));
1016
break;
1017
case 17: // d[i] = 1.0f / sqrtf(s[i]); break; //vrsq
1018
if (i == 0) {
1019
MOVI2F(S0, 1.0f, SCRATCHREG1);
1020
}
1021
VSQRT(S1, fpr.V(sregs[i]));
1022
VDIV(fpr.V(tempregs[i]), S0, S1);
1023
break;
1024
case 22: // d[i] = sqrtf(s[i]); break; //vsqrt
1025
VSQRT(fpr.V(tempregs[i]), fpr.V(sregs[i]));
1026
VABS(fpr.V(tempregs[i]), fpr.V(tempregs[i]));
1027
break;
1028
case 23: // d[i] = asinf(s[i] * (float)M_2_PI); break; //vasin
1029
// Seems to work well enough but can disable if it becomes a problem.
1030
// Should be easy enough to translate to NEON. There we can load all the constants
1031
// in one go of course.
1032
VCMP(fpr.V(sregs[i])); // flags = sign(sregs[i])
1033
VMRS_APSR();
1034
MOVI2F(S0, 1.0f, SCRATCHREG1);
1035
VABS(t4, fpr.V(sregs[i])); // t4 = |sregs[i]|
1036
VSUB(t3, S0, t4);
1037
VSQRT(t3, t3); // t3 = sqrt(1 - |sregs[i]|)
1038
MOVI2F(S1, -0.0187293f, SCRATCHREG1);
1039
MOVI2F(t2, 0.0742610f, SCRATCHREG1);
1040
VMLA(t2, t4, S1);
1041
MOVI2F(S1, -0.2121144f, SCRATCHREG1);
1042
VMLA(S1, t4, t2);
1043
MOVI2F(t2, 1.5707288f, SCRATCHREG1);
1044
VMLA(t2, t4, S1);
1045
MOVI2F(fpr.V(tempregs[i]), M_PI / 2, SCRATCHREG1);
1046
VMLS(fpr.V(tempregs[i]), t2, t3); // tr[i] = M_PI / 2 - t2 * t3
1047
{
1048
FixupBranch br = B_CC(CC_GE);
1049
VNEG(fpr.V(tempregs[i]), fpr.V(tempregs[i]));
1050
SetJumpTarget(br);
1051
}
1052
// Correction factor for PSP range. Could be baked into the calculation above?
1053
MOVI2F(S1, 1.0f / (M_PI / 2), SCRATCHREG1);
1054
VMUL(fpr.V(tempregs[i]), fpr.V(tempregs[i]), S1);
1055
break;
1056
case 24: // d[i] = -1.0f / s[i]; break; // vnrcp
1057
if (i == 0) {
1058
MOVI2F(S0, -1.0f, SCRATCHREG1);
1059
}
1060
VDIV(fpr.V(tempregs[i]), S0, fpr.V(sregs[i]));
1061
break;
1062
default:
1063
ERROR_LOG(Log::JIT, "case missing in vfpu vv2op");
1064
DISABLE;
1065
break;
1066
}
1067
}
1068
1069
for (int i = 0; i < n; ++i) {
1070
if (dregs[i] != tempregs[i]) {
1071
fpr.MapDirtyInV(dregs[i], tempregs[i]);
1072
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
1073
}
1074
}
1075
1076
ApplyPrefixD(dregs, sz);
1077
1078
fpr.ReleaseSpillLocksAndDiscardTemps();
1079
}
1080
1081
void ArmJit::Comp_Vi2f(MIPSOpcode op) {
1082
NEON_IF_AVAILABLE(CompNEON_Vi2f);
1083
CONDITIONAL_DISABLE(VFPU_VEC);
1084
if (js.HasUnknownPrefix()) {
1085
DISABLE;
1086
}
1087
1088
VectorSize sz = GetVecSize(op);
1089
int n = GetNumVectorElements(sz);
1090
1091
int imm = (op >> 16) & 0x1f;
1092
const float mult = 1.0f / (float)(1UL << imm);
1093
1094
u8 sregs[4], dregs[4];
1095
GetVectorRegsPrefixS(sregs, sz, _VS);
1096
GetVectorRegsPrefixD(dregs, sz, _VD);
1097
1098
MIPSReg tempregs[4];
1099
for (int i = 0; i < n; ++i) {
1100
if (!IsOverlapSafe(dregs[i], i, n, sregs)) {
1101
tempregs[i] = fpr.GetTempV();
1102
} else {
1103
tempregs[i] = dregs[i];
1104
}
1105
}
1106
1107
if (mult != 1.0f)
1108
MOVI2F(S0, mult, SCRATCHREG1);
1109
1110
for (int i = 0; i < n; i++) {
1111
fpr.MapDirtyInV(tempregs[i], sregs[i]);
1112
VCVT(fpr.V(tempregs[i]), fpr.V(sregs[i]), TO_FLOAT | IS_SIGNED);
1113
if (mult != 1.0f)
1114
VMUL(fpr.V(tempregs[i]), fpr.V(tempregs[i]), S0);
1115
}
1116
1117
for (int i = 0; i < n; ++i) {
1118
if (dregs[i] != tempregs[i]) {
1119
fpr.MapDirtyInV(dregs[i], tempregs[i]);
1120
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
1121
}
1122
}
1123
1124
ApplyPrefixD(dregs, sz);
1125
fpr.ReleaseSpillLocksAndDiscardTemps();
1126
}
1127
1128
void ArmJit::Comp_Vh2f(MIPSOpcode op) {
1129
NEON_IF_AVAILABLE(CompNEON_Vh2f);
1130
CONDITIONAL_DISABLE(VFPU_VEC);
1131
if (js.HasUnknownPrefix()) {
1132
DISABLE;
1133
}
1134
1135
// This multi-VCVT.F32.F16 is only available in the VFPv4 extension.
1136
// The VFPv3 one is VCVTB, VCVTT which we don't yet have support for.
1137
if (!(cpu_info.bHalf && cpu_info.bVFPv4)) {
1138
// No hardware support for half-to-float, fallback to interpreter
1139
// TODO: Translate the fast SSE solution to standard integer/VFP stuff
1140
// for the weaker CPUs.
1141
DISABLE;
1142
}
1143
1144
u8 sregs[4], dregs[4];
1145
VectorSize sz = GetVecSize(op);
1146
VectorSize outSz;
1147
1148
switch (sz) {
1149
case V_Single:
1150
outSz = V_Pair;
1151
break;
1152
case V_Pair:
1153
outSz = V_Quad;
1154
break;
1155
default:
1156
DISABLE;
1157
}
1158
1159
int n = GetNumVectorElements(sz);
1160
int nOut = n * 2;
1161
GetVectorRegsPrefixS(sregs, sz, _VS);
1162
GetVectorRegsPrefixD(dregs, outSz, _VD);
1163
1164
static const ARMReg tmp[4] = { S0, S1, S2, S3 };
1165
1166
for (int i = 0; i < n; i++) {
1167
fpr.MapRegV(sregs[i], sz);
1168
VMOV(tmp[i], fpr.V(sregs[i]));
1169
}
1170
1171
// This always converts four 16-bit floats in D0 to four 32-bit floats
1172
// in Q0. If we are dealing with a pair here, we just ignore the upper two outputs.
1173
// There are also a couple of other instructions that do it one at a time but doesn't
1174
// seem worth the trouble.
1175
VCVTF32F16(Q0, D0);
1176
1177
for (int i = 0; i < nOut; i++) {
1178
fpr.MapRegV(dregs[i], MAP_DIRTY | MAP_NOINIT);
1179
VMOV(fpr.V(dregs[i]), tmp[i]);
1180
}
1181
1182
ApplyPrefixD(dregs, sz);
1183
fpr.ReleaseSpillLocksAndDiscardTemps();
1184
}
1185
1186
void ArmJit::Comp_Vf2i(MIPSOpcode op) {
1187
NEON_IF_AVAILABLE(CompNEON_Vf2i);
1188
CONDITIONAL_DISABLE(VFPU_VEC);
1189
1190
if (js.HasUnknownPrefix()) {
1191
DISABLE;
1192
}
1193
DISABLE;
1194
1195
VectorSize sz = GetVecSize(op);
1196
int n = GetNumVectorElements(sz);
1197
1198
int imm = (op >> 16) & 0x1f;
1199
float mult = (float)(1ULL << imm);
1200
1201
switch ((op >> 21) & 0x1f)
1202
{
1203
case 17:
1204
break; //z - truncate. Easy to support.
1205
case 16:
1206
case 18:
1207
case 19:
1208
DISABLE;
1209
break;
1210
}
1211
1212
u8 sregs[4], dregs[4];
1213
GetVectorRegsPrefixS(sregs, sz, _VS);
1214
GetVectorRegsPrefixD(dregs, sz, _VD);
1215
1216
MIPSReg tempregs[4];
1217
for (int i = 0; i < n; ++i) {
1218
if (!IsOverlapSafe(dregs[i], i, n, sregs)) {
1219
tempregs[i] = fpr.GetTempV();
1220
} else {
1221
tempregs[i] = dregs[i];
1222
}
1223
}
1224
1225
if (mult != 1.0f)
1226
MOVI2F(S1, mult, SCRATCHREG1);
1227
1228
for (int i = 0; i < n; i++) {
1229
fpr.MapDirtyInV(tempregs[i], sregs[i]);
1230
switch ((op >> 21) & 0x1f) {
1231
case 16: /* TODO */ break; //n
1232
case 17:
1233
if (mult != 1.0f) {
1234
VMUL(S0, fpr.V(sregs[i]), S1);
1235
VCVT(fpr.V(tempregs[i]), S0, TO_INT | ROUND_TO_ZERO);
1236
} else {
1237
VCVT(fpr.V(tempregs[i]), fpr.V(sregs[i]), TO_INT | ROUND_TO_ZERO);
1238
}
1239
break;
1240
case 18: /* TODO */ break; //u
1241
case 19: /* TODO */ break; //d
1242
}
1243
}
1244
1245
for (int i = 0; i < n; ++i) {
1246
if (dregs[i] != tempregs[i]) {
1247
fpr.MapDirtyInV(dregs[i], tempregs[i]);
1248
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
1249
}
1250
}
1251
1252
ApplyPrefixD(dregs, sz);
1253
fpr.ReleaseSpillLocksAndDiscardTemps();
1254
}
1255
1256
void ArmJit::Comp_Mftv(MIPSOpcode op) {
1257
NEON_IF_AVAILABLE(CompNEON_Mftv);
1258
CONDITIONAL_DISABLE(VFPU_XFER);
1259
1260
int imm = op & 0xFF;
1261
MIPSGPReg rt = _RT;
1262
switch ((op >> 21) & 0x1f) {
1263
case 3: //mfv / mfvc
1264
// rt = 0, imm = 255 appears to be used as a CPU interlock by some games.
1265
if (rt != 0) {
1266
if (imm < 128) { //R(rt) = VI(imm);
1267
fpr.MapRegV(imm, 0);
1268
gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
1269
VMOV(gpr.R(rt), fpr.V(imm));
1270
} else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc
1271
if (imm - 128 == VFPU_CTRL_CC) {
1272
if (gpr.IsImm(MIPS_REG_VFPUCC)) {
1273
gpr.SetImm(rt, gpr.GetImm(MIPS_REG_VFPUCC));
1274
} else {
1275
gpr.MapDirtyIn(rt, MIPS_REG_VFPUCC);
1276
MOV(gpr.R(rt), gpr.R(MIPS_REG_VFPUCC));
1277
}
1278
} else {
1279
// In case we have a saved prefix.
1280
FlushPrefixV();
1281
gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
1282
LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
1283
}
1284
} else {
1285
//ERROR - maybe need to make this value too an "interlock" value?
1286
ERROR_LOG(Log::CPU, "mfv - invalid register %i", imm);
1287
}
1288
}
1289
break;
1290
1291
case 7: // mtv
1292
if (imm < 128) {
1293
gpr.MapReg(rt);
1294
fpr.MapRegV(imm, MAP_DIRTY | MAP_NOINIT);
1295
VMOV(fpr.V(imm), gpr.R(rt));
1296
} else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc //currentMIPS->vfpuCtrl[imm - 128] = R(rt);
1297
if (imm - 128 == VFPU_CTRL_CC) {
1298
if (gpr.IsImm(rt)) {
1299
gpr.SetImm(MIPS_REG_VFPUCC, gpr.GetImm(rt));
1300
} else {
1301
gpr.MapDirtyIn(MIPS_REG_VFPUCC, rt);
1302
MOV(gpr.R(MIPS_REG_VFPUCC), gpr.R(rt));
1303
}
1304
} else {
1305
gpr.MapReg(rt);
1306
STR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
1307
}
1308
1309
// TODO: Optimization if rt is Imm?
1310
// Set these BEFORE disable!
1311
if (imm - 128 == VFPU_CTRL_SPREFIX) {
1312
js.prefixSFlag = JitState::PREFIX_UNKNOWN;
1313
js.blockWrotePrefixes = true;
1314
} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
1315
js.prefixTFlag = JitState::PREFIX_UNKNOWN;
1316
js.blockWrotePrefixes = true;
1317
} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
1318
js.prefixDFlag = JitState::PREFIX_UNKNOWN;
1319
js.blockWrotePrefixes = true;
1320
}
1321
} else {
1322
//ERROR
1323
_dbg_assert_msg_(false,"mtv - invalid register");
1324
}
1325
break;
1326
1327
default:
1328
DISABLE;
1329
}
1330
1331
fpr.ReleaseSpillLocksAndDiscardTemps();
1332
}
1333
1334
void ArmJit::Comp_Vmfvc(MIPSOpcode op) {
1335
NEON_IF_AVAILABLE(CompNEON_Vmtvc);
1336
CONDITIONAL_DISABLE(VFPU_XFER);
1337
1338
int vd = _VD;
1339
int imm = (op >> 8) & 0x7F;
1340
if (imm < VFPU_CTRL_MAX) {
1341
fpr.MapRegV(vd);
1342
if (imm == VFPU_CTRL_CC) {
1343
gpr.MapReg(MIPS_REG_VFPUCC, 0);
1344
VMOV(fpr.V(vd), gpr.R(MIPS_REG_VFPUCC));
1345
} else {
1346
ADDI2R(SCRATCHREG1, CTXREG, offsetof(MIPSState, vfpuCtrl[0]) + imm * 4, SCRATCHREG2);
1347
VLDR(fpr.V(vd), SCRATCHREG1, 0);
1348
}
1349
fpr.ReleaseSpillLocksAndDiscardTemps();
1350
} else {
1351
fpr.MapRegV(vd);
1352
MOVI2F(fpr.V(vd), 0.0f, SCRATCHREG1);
1353
}
1354
}
1355
1356
void ArmJit::Comp_Vmtvc(MIPSOpcode op) {
1357
NEON_IF_AVAILABLE(CompNEON_Vmtvc);
1358
CONDITIONAL_DISABLE(VFPU_XFER);
1359
1360
int vs = _VS;
1361
int imm = op & 0x7F;
1362
if (imm < VFPU_CTRL_MAX) {
1363
fpr.MapRegV(vs);
1364
if (imm == VFPU_CTRL_CC) {
1365
gpr.MapReg(MIPS_REG_VFPUCC, MAP_DIRTY | MAP_NOINIT);
1366
VMOV(gpr.R(MIPS_REG_VFPUCC), fpr.V(vs));
1367
} else {
1368
ADDI2R(SCRATCHREG1, CTXREG, offsetof(MIPSState, vfpuCtrl[0]) + imm * 4, SCRATCHREG2);
1369
VSTR(fpr.V(vs), SCRATCHREG1, 0);
1370
}
1371
fpr.ReleaseSpillLocksAndDiscardTemps();
1372
1373
if (imm == VFPU_CTRL_SPREFIX) {
1374
js.prefixSFlag = JitState::PREFIX_UNKNOWN;
1375
js.blockWrotePrefixes = true;
1376
} else if (imm == VFPU_CTRL_TPREFIX) {
1377
js.prefixTFlag = JitState::PREFIX_UNKNOWN;
1378
js.blockWrotePrefixes = true;
1379
} else if (imm == VFPU_CTRL_DPREFIX) {
1380
js.prefixDFlag = JitState::PREFIX_UNKNOWN;
1381
js.blockWrotePrefixes = true;
1382
}
1383
}
1384
}
1385
1386
void ArmJit::Comp_Vmmov(MIPSOpcode op) {
1387
NEON_IF_AVAILABLE(CompNEON_Vmmov);
1388
CONDITIONAL_DISABLE(VFPU_MTX_VMMOV);
1389
1390
// This probably ignores prefixes for all sane intents and purposes.
1391
if (_VS == _VD) {
1392
// A lot of these no-op matrix moves in Wipeout... Just drop the instruction entirely.
1393
return;
1394
}
1395
1396
MatrixSize sz = GetMtxSize(op);
1397
int n = GetMatrixSide(sz);
1398
1399
u8 sregs[16], dregs[16];
1400
GetMatrixRegs(sregs, sz, _VS);
1401
GetMatrixRegs(dregs, sz, _VD);
1402
1403
// Rough overlap check.
1404
bool overlap = false;
1405
if (GetMtx(_VS) == GetMtx(_VD)) {
1406
// Potential overlap (guaranteed for 3x3 or more).
1407
overlap = true;
1408
}
1409
1410
if (overlap) {
1411
// Not so common, fallback.
1412
DISABLE;
1413
} else {
1414
for (int a = 0; a < n; a++) {
1415
for (int b = 0; b < n; b++) {
1416
fpr.MapDirtyInV(dregs[a * 4 + b], sregs[a * 4 + b]);
1417
VMOV(fpr.V(dregs[a * 4 + b]), fpr.V(sregs[a * 4 + b]));
1418
}
1419
}
1420
fpr.ReleaseSpillLocksAndDiscardTemps();
1421
}
1422
}
1423
1424
void ArmJit::Comp_VScl(MIPSOpcode op) {
1425
NEON_IF_AVAILABLE(CompNEON_VScl);
1426
CONDITIONAL_DISABLE(VFPU_VEC);
1427
if (js.HasUnknownPrefix()) {
1428
DISABLE;
1429
}
1430
1431
VectorSize sz = GetVecSize(op);
1432
int n = GetNumVectorElements(sz);
1433
1434
u8 sregs[4], dregs[4], treg;
1435
GetVectorRegsPrefixS(sregs, sz, _VS);
1436
// TODO: Prefixes seem strange...
1437
GetVectorRegsPrefixT(&treg, V_Single, _VT);
1438
GetVectorRegsPrefixD(dregs, sz, _VD);
1439
1440
// Move to S0 early, so we don't have to worry about overlap with scale.
1441
fpr.LoadToRegV(S0, treg);
1442
1443
// For prefixes to work, we just have to ensure that none of the output registers spill
1444
// and that there's no overlap.
1445
MIPSReg tempregs[4];
1446
for (int i = 0; i < n; ++i) {
1447
if (!IsOverlapSafe(dregs[i], i, n, sregs)) {
1448
// Need to use temp regs
1449
tempregs[i] = fpr.GetTempV();
1450
} else {
1451
tempregs[i] = dregs[i];
1452
}
1453
}
1454
1455
// The meat of the function!
1456
for (int i = 0; i < n; i++) {
1457
fpr.MapDirtyInV(tempregs[i], sregs[i]);
1458
VMUL(fpr.V(tempregs[i]), fpr.V(sregs[i]), S0);
1459
}
1460
1461
for (int i = 0; i < n; i++) {
1462
// All must be mapped for prefixes to work.
1463
if (dregs[i] != tempregs[i]) {
1464
fpr.MapDirtyInV(dregs[i], tempregs[i]);
1465
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
1466
}
1467
}
1468
1469
ApplyPrefixD(dregs, sz);
1470
1471
fpr.ReleaseSpillLocksAndDiscardTemps();
1472
}
1473
1474
void ArmJit::Comp_Vmmul(MIPSOpcode op) {
1475
CONDITIONAL_DISABLE(VFPU_MTX_VMMUL);
1476
if (!js.HasNoPrefix()) {
1477
DISABLE;
1478
}
1479
NEON_IF_AVAILABLE(CompNEON_Vmmul);
1480
1481
if (PSP_CoreParameter().compat.flags().MoreAccurateVMMUL) {
1482
// Fall back to interpreter, which has the accurate implementation.
1483
// Later we might do something more optimized here.
1484
DISABLE;
1485
}
1486
1487
MatrixSize sz = GetMtxSize(op);
1488
int n = GetMatrixSide(sz);
1489
1490
u8 sregs[16], tregs[16], dregs[16];
1491
GetMatrixRegs(sregs, sz, _VS);
1492
GetMatrixRegs(tregs, sz, _VT);
1493
GetMatrixRegs(dregs, sz, _VD);
1494
1495
// Rough overlap check.
1496
bool overlap = false;
1497
if (GetMtx(_VS) == GetMtx(_VD) || GetMtx(_VT) == GetMtx(_VD)) {
1498
// Potential overlap (guaranteed for 3x3 or more).
1499
overlap = true;
1500
}
1501
1502
if (overlap) {
1503
DISABLE;
1504
} else {
1505
for (int a = 0; a < n; a++) {
1506
for (int b = 0; b < n; b++) {
1507
fpr.MapInInV(sregs[b * 4], tregs[a * 4]);
1508
VMUL(S0, fpr.V(sregs[b * 4]), fpr.V(tregs[a * 4]));
1509
for (int c = 1; c < n; c++) {
1510
fpr.MapInInV(sregs[b * 4 + c], tregs[a * 4 + c]);
1511
VMLA(S0, fpr.V(sregs[b * 4 + c]), fpr.V(tregs[a * 4 + c]));
1512
}
1513
fpr.MapRegV(dregs[a * 4 + b], MAP_DIRTY | MAP_NOINIT);
1514
VMOV(fpr.V(dregs[a * 4 + b]), S0);
1515
}
1516
}
1517
fpr.ReleaseSpillLocksAndDiscardTemps();
1518
}
1519
}
1520
1521
void ArmJit::Comp_Vmscl(MIPSOpcode op) {
1522
NEON_IF_AVAILABLE(CompNEON_Vmscl);
1523
DISABLE;
1524
}
1525
1526
void ArmJit::Comp_Vtfm(MIPSOpcode op) {
1527
NEON_IF_AVAILABLE(CompNEON_Vtfm);
1528
CONDITIONAL_DISABLE(VFPU_MTX_VTFM);
1529
if (js.HasUnknownPrefix()) {
1530
DISABLE;
1531
}
1532
1533
// TODO: This probably ignores prefixes? Or maybe uses D?
1534
1535
VectorSize sz = GetVecSize(op);
1536
MatrixSize msz = GetMtxSize(op);
1537
int n = GetNumVectorElements(sz);
1538
int ins = (op >> 23) & 7;
1539
1540
bool homogenous = false;
1541
if (n == ins) {
1542
n++;
1543
sz = (VectorSize)((int)(sz) + 1);
1544
msz = (MatrixSize)((int)(msz) + 1);
1545
homogenous = true;
1546
}
1547
// Otherwise, n should already be ins + 1.
1548
else if (n != ins + 1) {
1549
DISABLE;
1550
}
1551
1552
u8 sregs[16], dregs[4], tregs[4];
1553
GetMatrixRegs(sregs, msz, _VS);
1554
GetVectorRegs(tregs, sz, _VT);
1555
GetVectorRegs(dregs, sz, _VD);
1556
1557
// TODO: test overlap, optimize.
1558
int tempregs[4];
1559
for (int i = 0; i < n; i++) {
1560
fpr.MapInInV(sregs[i * 4], tregs[0]);
1561
VMUL(S0, fpr.V(sregs[i * 4]), fpr.V(tregs[0]));
1562
for (int k = 1; k < n; k++) {
1563
if (!homogenous || k != n - 1) {
1564
fpr.MapInInV(sregs[i * 4 + k], tregs[k]);
1565
VMLA(S0, fpr.V(sregs[i * 4 + k]), fpr.V(tregs[k]));
1566
} else {
1567
fpr.MapRegV(sregs[i * 4 + k]);
1568
VADD(S0, S0, fpr.V(sregs[i * 4 + k]));
1569
}
1570
}
1571
1572
int temp = fpr.GetTempV();
1573
fpr.MapRegV(temp, MAP_NOINIT | MAP_DIRTY);
1574
fpr.SpillLockV(temp);
1575
VMOV(fpr.V(temp), S0);
1576
tempregs[i] = temp;
1577
}
1578
for (int i = 0; i < n; i++) {
1579
u8 temp = tempregs[i];
1580
fpr.MapRegV(dregs[i], MAP_NOINIT | MAP_DIRTY);
1581
VMOV(fpr.V(dregs[i]), fpr.V(temp));
1582
}
1583
1584
fpr.ReleaseSpillLocksAndDiscardTemps();
1585
}
1586
1587
void ArmJit::Comp_VCrs(MIPSOpcode op) {
1588
NEON_IF_AVAILABLE(CompNEON_VCrs);
1589
DISABLE;
1590
}
1591
1592
void ArmJit::Comp_VDet(MIPSOpcode op) {
1593
NEON_IF_AVAILABLE(CompNEON_VDet);
1594
DISABLE;
1595
}
1596
1597
void ArmJit::Comp_Vi2x(MIPSOpcode op) {
1598
NEON_IF_AVAILABLE(CompNEON_Vi2x);
1599
CONDITIONAL_DISABLE(VFPU_VEC);
1600
if (js.HasUnknownPrefix()) {
1601
DISABLE;
1602
}
1603
1604
int bits = ((op >> 16) & 2) == 0 ? 8 : 16; // vi2uc/vi2c (0/1), vi2us/vi2s (2/3)
1605
bool unsignedOp = ((op >> 16) & 1) == 0; // vi2uc (0), vi2us (2)
1606
1607
if (unsignedOp) {
1608
// Requires a tricky clamp operation that we can't do without more temps, see below
1609
DISABLE;
1610
}
1611
1612
// These instructions pack pairs or quads of integers into 32 bits.
1613
// The unsigned (u) versions skip the sign bit when packing.
1614
VectorSize sz = GetVecSize(op);
1615
VectorSize outsize;
1616
if (bits == 8) {
1617
outsize = V_Single;
1618
if (sz != V_Quad) {
1619
DISABLE;
1620
}
1621
} else {
1622
switch (sz) {
1623
case V_Pair:
1624
outsize = V_Single;
1625
break;
1626
case V_Quad:
1627
outsize = V_Pair;
1628
break;
1629
default:
1630
DISABLE;
1631
}
1632
}
1633
1634
u8 sregs[4], dregs[4];
1635
GetVectorRegsPrefixS(sregs, sz, _VS);
1636
GetVectorRegsPrefixD(dregs, outsize, _VD);
1637
1638
// First, let's assemble the sregs into lanes of either D0 (pair) or Q0 (quad).
1639
bool quad = sz == V_Quad;
1640
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
1641
VMOV(S0, fpr.V(sregs[0]));
1642
VMOV(S1, fpr.V(sregs[1]));
1643
if (quad) {
1644
VMOV(S2, fpr.V(sregs[2]));
1645
VMOV(S3, fpr.V(sregs[3]));
1646
}
1647
1648
// TODO: For "u" type ops, we clamp to zero and shift off the sign bit first.
1649
// Need some temp regs to do that efficiently, right?
1650
1651
// At this point, we simply need to collect the high bits of each 32-bit lane into one register.
1652
if (bits == 8) {
1653
// Really want to do a VSHRN(..., 24) but that can't be encoded. So we synthesize it.
1654
VSHR(I_32, Q0, Q0, 16);
1655
VSHRN(I_32, D0, Q0, 8);
1656
VMOVN(I_16, D0, Q0);
1657
} else {
1658
VSHRN(I_32, D0, Q0, 16);
1659
}
1660
1661
fpr.MapRegsAndSpillLockV(dregs, outsize, MAP_DIRTY|MAP_NOINIT);
1662
VMOV(fpr.V(dregs[0]), S0);
1663
if (outsize == V_Pair) {
1664
VMOV(fpr.V(dregs[1]), S1);
1665
}
1666
1667
ApplyPrefixD(dregs, outsize);
1668
fpr.ReleaseSpillLocksAndDiscardTemps();
1669
}
1670
1671
void ArmJit::Comp_Vx2i(MIPSOpcode op) {
1672
NEON_IF_AVAILABLE(CompNEON_Vx2i);
1673
CONDITIONAL_DISABLE(VFPU_VEC);
1674
if (js.HasUnknownPrefix()) {
1675
DISABLE;
1676
}
1677
1678
int bits = ((op >> 16) & 2) == 0 ? 8 : 16; // vuc2i/vc2i (0/1), vus2i/vs2i (2/3)
1679
bool unsignedOp = ((op >> 16) & 1) == 0; // vuc2i (0), vus2i (2)
1680
1681
if (bits == 8 && unsignedOp) {
1682
// vuc2i is odd and needs temp registers for implementation.
1683
DISABLE;
1684
}
1685
// vs2i or vus2i unpack pairs of 16-bit integers into 32-bit integers, with the values
1686
// at the top. vus2i shifts it an extra bit right afterward.
1687
// vc2i and vuc2i unpack quads of 8-bit integers into 32-bit integers, with the values
1688
// at the top too. vuc2i is a bit special (see below.)
1689
// Let's do this similarly as h2f - we do a solution that works for both singles and pairs
1690
// then use it for both.
1691
1692
VectorSize sz = GetVecSize(op);
1693
VectorSize outsize;
1694
if (bits == 8) {
1695
outsize = V_Quad;
1696
} else {
1697
switch (sz) {
1698
case V_Single:
1699
outsize = V_Pair;
1700
break;
1701
case V_Pair:
1702
outsize = V_Quad;
1703
break;
1704
default:
1705
DISABLE;
1706
}
1707
}
1708
1709
u8 sregs[4], dregs[4];
1710
GetVectorRegsPrefixS(sregs, sz, _VS);
1711
GetVectorRegsPrefixD(dregs, outsize, _VD);
1712
1713
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
1714
if (sz == V_Single) {
1715
VMOV(S0, fpr.V(sregs[0]));
1716
} else if (sz == V_Pair) {
1717
VMOV(S0, fpr.V(sregs[0]));
1718
VMOV(S1, fpr.V(sregs[1]));
1719
} else if (bits == 8) {
1720
// For some reason, sz is quad on vc2i.
1721
VMOV(S0, fpr.V(sregs[0]));
1722
}
1723
1724
1725
if (bits == 16) {
1726
// Simply expand, to upper bits.
1727
VSHLL(I_16, Q0, D0, 16);
1728
} else if (bits == 8) {
1729
if (unsignedOp) {
1730
// vuc2i is a bit special. It spreads out the bits like this:
1731
// s[0] = 0xDDCCBBAA -> d[0] = (0xAAAAAAAA >> 1), d[1] = (0xBBBBBBBB >> 1), etc.
1732
// TODO
1733
} else {
1734
VSHLL(I_8, Q0, D0, 8);
1735
VSHLL(I_16, Q0, D0, 16);
1736
}
1737
}
1738
1739
// At this point we have the regs in the 4 lanes.
1740
// In the "u" mode, we need to shift it out of the sign bit.
1741
if (unsignedOp) {
1742
ArmGen::ARMReg reg = (outsize == V_Quad) ? Q0 : D0;
1743
VSHR(I_32 | I_UNSIGNED, reg, reg, 1);
1744
}
1745
1746
fpr.MapRegsAndSpillLockV(dregs, outsize, MAP_NOINIT);
1747
1748
VMOV(fpr.V(dregs[0]), S0);
1749
VMOV(fpr.V(dregs[1]), S1);
1750
if (outsize == V_Quad) {
1751
VMOV(fpr.V(dregs[2]), S2);
1752
VMOV(fpr.V(dregs[3]), S3);
1753
}
1754
1755
ApplyPrefixD(dregs, outsize);
1756
fpr.ReleaseSpillLocksAndDiscardTemps();
1757
}
1758
1759
void ArmJit::Comp_VCrossQuat(MIPSOpcode op) {
1760
NEON_IF_AVAILABLE(CompNEON_VCrossQuat);
1761
// This op does not support prefixes anyway.
1762
CONDITIONAL_DISABLE(VFPU_VEC);
1763
if (js.HasUnknownPrefix())
1764
DISABLE;
1765
1766
VectorSize sz = GetVecSize(op);
1767
int n = GetNumVectorElements(sz);
1768
1769
u8 sregs[4], tregs[4], dregs[4];
1770
GetVectorRegs(sregs, sz, _VS);
1771
GetVectorRegs(tregs, sz, _VT);
1772
GetVectorRegs(dregs, sz, _VD);
1773
1774
// Map everything into registers.
1775
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
1776
fpr.MapRegsAndSpillLockV(tregs, sz, 0);
1777
1778
if (sz == V_Triple) {
1779
MIPSReg temp3 = fpr.GetTempV();
1780
fpr.MapRegV(temp3, MAP_DIRTY | MAP_NOINIT);
1781
// Cross product vcrsp.t
1782
1783
// Compute X
1784
VMUL(S0, fpr.V(sregs[1]), fpr.V(tregs[2]));
1785
VMLS(S0, fpr.V(sregs[2]), fpr.V(tregs[1]));
1786
1787
// Compute Y
1788
VMUL(S1, fpr.V(sregs[2]), fpr.V(tregs[0]));
1789
VMLS(S1, fpr.V(sregs[0]), fpr.V(tregs[2]));
1790
1791
// Compute Z
1792
VMUL(fpr.V(temp3), fpr.V(sregs[0]), fpr.V(tregs[1]));
1793
VMLS(fpr.V(temp3), fpr.V(sregs[1]), fpr.V(tregs[0]));
1794
1795
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_NOINIT);
1796
VMOV(fpr.V(dregs[0]), S0);
1797
VMOV(fpr.V(dregs[1]), S1);
1798
VMOV(fpr.V(dregs[2]), fpr.V(temp3));
1799
} else if (sz == V_Quad) {
1800
MIPSReg temp3 = fpr.GetTempV();
1801
MIPSReg temp4 = fpr.GetTempV();
1802
fpr.MapRegV(temp3, MAP_DIRTY | MAP_NOINIT);
1803
fpr.MapRegV(temp4, MAP_DIRTY | MAP_NOINIT);
1804
1805
// Quaternion product vqmul.q untested
1806
// d[0] = s[0] * t[3] + s[1] * t[2] - s[2] * t[1] + s[3] * t[0];
1807
VMUL(S0, fpr.V(sregs[0]), fpr.V(tregs[3]));
1808
VMLA(S0, fpr.V(sregs[1]), fpr.V(tregs[2]));
1809
VMLS(S0, fpr.V(sregs[2]), fpr.V(tregs[1]));
1810
VMLA(S0, fpr.V(sregs[3]), fpr.V(tregs[0]));
1811
1812
//d[1] = -s[0] * t[2] + s[1] * t[3] + s[2] * t[0] + s[3] * t[1];
1813
VNMUL(S1, fpr.V(sregs[0]), fpr.V(tregs[2]));
1814
VMLA(S1, fpr.V(sregs[1]), fpr.V(tregs[3]));
1815
VMLA(S1, fpr.V(sregs[2]), fpr.V(tregs[0]));
1816
VMLA(S1, fpr.V(sregs[3]), fpr.V(tregs[1]));
1817
1818
//d[2] = s[0] * t[1] - s[1] * t[0] + s[2] * t[3] + s[3] * t[2];
1819
VMUL(fpr.V(temp3), fpr.V(sregs[0]), fpr.V(tregs[1]));
1820
VMLS(fpr.V(temp3), fpr.V(sregs[1]), fpr.V(tregs[0]));
1821
VMLA(fpr.V(temp3), fpr.V(sregs[2]), fpr.V(tregs[3]));
1822
VMLA(fpr.V(temp3), fpr.V(sregs[3]), fpr.V(tregs[2]));
1823
1824
//d[3] = -s[0] * t[0] - s[1] * t[1] - s[2] * t[2] + s[3] * t[3];
1825
VNMUL(fpr.V(temp4), fpr.V(sregs[0]), fpr.V(tregs[0]));
1826
VMLS(fpr.V(temp4), fpr.V(sregs[1]), fpr.V(tregs[1]));
1827
VMLS(fpr.V(temp4), fpr.V(sregs[2]), fpr.V(tregs[2]));
1828
VMLA(fpr.V(temp4), fpr.V(sregs[3]), fpr.V(tregs[3]));
1829
1830
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_NOINIT);
1831
VMOV(fpr.V(dregs[0]), S0);
1832
VMOV(fpr.V(dregs[1]), S1);
1833
VMOV(fpr.V(dregs[2]), fpr.V(temp3));
1834
VMOV(fpr.V(dregs[3]), fpr.V(temp4));
1835
}
1836
1837
fpr.ReleaseSpillLocksAndDiscardTemps();
1838
}
1839
1840
void ArmJit::Comp_Vcmp(MIPSOpcode op) {
1841
NEON_IF_AVAILABLE(CompNEON_Vcmp);
1842
CONDITIONAL_DISABLE(VFPU_COMP);
1843
if (js.HasUnknownPrefix())
1844
DISABLE;
1845
1846
VectorSize sz = GetVecSize(op);
1847
int n = GetNumVectorElements(sz);
1848
1849
VCondition cond = (VCondition)(op & 0xF);
1850
1851
u8 sregs[4], tregs[4];
1852
GetVectorRegsPrefixS(sregs, sz, _VS);
1853
GetVectorRegsPrefixT(tregs, sz, _VT);
1854
1855
// Some, we just fall back to the interpreter.
1856
// ES is just really equivalent to (value & 0x7F800000) == 0x7F800000.
1857
1858
switch (cond) {
1859
case VC_EI: // c = my_isinf(s[i]); break;
1860
case VC_NI: // c = !my_isinf(s[i]); break;
1861
DISABLE;
1862
case VC_ES: // c = my_isnan(s[i]) || my_isinf(s[i]); break; // Tekken Dark Resurrection
1863
case VC_NS: // c = !my_isnan(s[i]) && !my_isinf(s[i]); break;
1864
case VC_EN: // c = my_isnan(s[i]); break;
1865
case VC_NN: // c = !my_isnan(s[i]); break;
1866
if (_VS != _VT)
1867
DISABLE;
1868
break;
1869
1870
case VC_EZ:
1871
case VC_NZ:
1872
break;
1873
default:
1874
;
1875
}
1876
1877
// First, let's get the trivial ones.
1878
int affected_bits = (1 << 4) | (1 << 5); // 4 and 5
1879
1880
MOVI2R(SCRATCHREG1, 0);
1881
for (int i = 0; i < n; ++i) {
1882
// Let's only handle the easy ones, and fall back on the interpreter for the rest.
1883
CCFlags flag = CC_AL;
1884
switch (cond) {
1885
case VC_FL: // c = 0;
1886
break;
1887
1888
case VC_TR: // c = 1
1889
if (i == 0) {
1890
if (n == 1) {
1891
MOVI2R(SCRATCHREG1, 0x31);
1892
} else {
1893
MOVI2R(SCRATCHREG1, 1 << i);
1894
}
1895
} else {
1896
ORR(SCRATCHREG1, SCRATCHREG1, 1 << i);
1897
}
1898
break;
1899
1900
case VC_ES: // c = my_isnan(s[i]) || my_isinf(s[i]); break; // Tekken Dark Resurrection
1901
case VC_NS: // c = !(my_isnan(s[i]) || my_isinf(s[i])); break;
1902
// For these, we use the integer ALU as there is no support on ARM for testing for INF.
1903
// Testing for nan or inf is the same as testing for &= 0x7F800000 == 0x7F800000.
1904
// We need an extra temporary register so we store away SCRATCHREG1.
1905
STR(SCRATCHREG1, CTXREG, offsetof(MIPSState, temp));
1906
fpr.MapRegV(sregs[i], 0);
1907
MOVI2R(SCRATCHREG1, 0x7F800000);
1908
VMOV(SCRATCHREG2, fpr.V(sregs[i]));
1909
AND(SCRATCHREG2, SCRATCHREG2, SCRATCHREG1);
1910
CMP(SCRATCHREG2, SCRATCHREG1); // (SCRATCHREG2 & 0x7F800000) == 0x7F800000
1911
flag = cond == VC_ES ? CC_EQ : CC_NEQ;
1912
LDR(SCRATCHREG1, CTXREG, offsetof(MIPSState, temp));
1913
break;
1914
1915
case VC_EN: // c = my_isnan(s[i]); break; // Tekken 6
1916
// Should we involve T? Where I found this used, it compared a register with itself so should be fine.
1917
fpr.MapInInV(sregs[i], tregs[i]);
1918
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1919
VMRS_APSR();
1920
flag = CC_VS; // overflow = unordered : http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204j/Chdhcfbc.html
1921
break;
1922
1923
case VC_NN: // c = !my_isnan(s[i]); break;
1924
// Should we involve T? Where I found this used, it compared a register with itself so should be fine.
1925
fpr.MapInInV(sregs[i], tregs[i]);
1926
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1927
VMRS_APSR();
1928
flag = CC_VC; // !overflow = !unordered : http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204j/Chdhcfbc.html
1929
break;
1930
1931
case VC_EQ: // c = s[i] == t[i]
1932
fpr.MapInInV(sregs[i], tregs[i]);
1933
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1934
VMRS_APSR();
1935
flag = CC_EQ;
1936
break;
1937
1938
case VC_LT: // c = s[i] < t[i]
1939
fpr.MapInInV(sregs[i], tregs[i]);
1940
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1941
VMRS_APSR();
1942
flag = CC_LO;
1943
break;
1944
1945
case VC_LE: // c = s[i] <= t[i];
1946
fpr.MapInInV(sregs[i], tregs[i]);
1947
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1948
VMRS_APSR();
1949
flag = CC_LS;
1950
break;
1951
1952
case VC_NE: // c = s[i] != t[i]
1953
fpr.MapInInV(sregs[i], tregs[i]);
1954
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1955
VMRS_APSR();
1956
flag = CC_NEQ;
1957
break;
1958
1959
case VC_GE: // c = s[i] >= t[i]
1960
fpr.MapInInV(sregs[i], tregs[i]);
1961
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1962
VMRS_APSR();
1963
flag = CC_GE;
1964
break;
1965
1966
case VC_GT: // c = s[i] > t[i]
1967
fpr.MapInInV(sregs[i], tregs[i]);
1968
VCMP(fpr.V(sregs[i]), fpr.V(tregs[i]));
1969
VMRS_APSR();
1970
flag = CC_GT;
1971
break;
1972
1973
case VC_EZ: // c = s[i] == 0.0f || s[i] == -0.0f
1974
fpr.MapRegV(sregs[i]);
1975
VCMP(fpr.V(sregs[i])); // vcmp(sregs[i], #0.0)
1976
VMRS_APSR();
1977
flag = CC_EQ;
1978
break;
1979
1980
case VC_NZ: // c = s[i] != 0
1981
fpr.MapRegV(sregs[i]);
1982
VCMP(fpr.V(sregs[i])); // vcmp(sregs[i], #0.0)
1983
VMRS_APSR();
1984
flag = CC_NEQ;
1985
break;
1986
1987
default:
1988
DISABLE;
1989
}
1990
if (flag != CC_AL) {
1991
SetCC(flag);
1992
if (i == 0) {
1993
if (n == 1) {
1994
MOVI2R(SCRATCHREG1, 0x31);
1995
} else {
1996
MOVI2R(SCRATCHREG1, 1); // 1 << i, but i == 0
1997
}
1998
} else {
1999
ORR(SCRATCHREG1, SCRATCHREG1, 1 << i);
2000
}
2001
SetCC(CC_AL);
2002
}
2003
2004
affected_bits |= 1 << i;
2005
}
2006
2007
// Aggregate the bits. Urgh, expensive. Can optimize for the case of one comparison, which is the most common
2008
// after all.
2009
if (n > 1) {
2010
CMP(SCRATCHREG1, affected_bits & 0xF);
2011
SetCC(CC_EQ);
2012
ORR(SCRATCHREG1, SCRATCHREG1, 1 << 5);
2013
SetCC(CC_AL);
2014
2015
CMP(SCRATCHREG1, 0);
2016
SetCC(CC_NEQ);
2017
ORR(SCRATCHREG1, SCRATCHREG1, 1 << 4);
2018
SetCC(CC_AL);
2019
}
2020
2021
gpr.MapReg(MIPS_REG_VFPUCC, MAP_DIRTY);
2022
BIC(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), affected_bits);
2023
ORR(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), SCRATCHREG1);
2024
2025
fpr.ReleaseSpillLocksAndDiscardTemps();
2026
}
2027
2028
void ArmJit::Comp_Vcmov(MIPSOpcode op) {
2029
NEON_IF_AVAILABLE(CompNEON_Vcmov);
2030
CONDITIONAL_DISABLE(VFPU_COMP);
2031
if (js.HasUnknownPrefix()) {
2032
DISABLE;
2033
}
2034
2035
VectorSize sz = GetVecSize(op);
2036
int n = GetNumVectorElements(sz);
2037
2038
u8 sregs[4], dregs[4];
2039
GetVectorRegsPrefixS(sregs, sz, _VS);
2040
GetVectorRegsPrefixD(dregs, sz, _VD);
2041
int tf = (op >> 19) & 1;
2042
int imm3 = (op >> 16) & 7;
2043
2044
for (int i = 0; i < n; ++i) {
2045
// Simplification: Disable if overlap unsafe
2046
if (!IsOverlapSafeAllowS(dregs[i], i, n, sregs)) {
2047
DISABLE;
2048
}
2049
}
2050
2051
if (imm3 < 6) {
2052
// Test one bit of CC. This bit decides whether none or all subregisters are copied.
2053
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_DIRTY);
2054
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
2055
gpr.MapReg(MIPS_REG_VFPUCC);
2056
TST(gpr.R(MIPS_REG_VFPUCC), 1 << imm3);
2057
SetCC(tf ? CC_EQ : CC_NEQ);
2058
for (int i = 0; i < n; i++) {
2059
VMOV(fpr.V(dregs[i]), fpr.V(sregs[i]));
2060
}
2061
SetCC(CC_AL);
2062
} else {
2063
// Look at the bottom four bits of CC to individually decide if the subregisters should be copied.
2064
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_DIRTY);
2065
fpr.MapRegsAndSpillLockV(sregs, sz, 0);
2066
gpr.MapReg(MIPS_REG_VFPUCC);
2067
for (int i = 0; i < n; i++) {
2068
TST(gpr.R(MIPS_REG_VFPUCC), 1 << i);
2069
SetCC(tf ? CC_EQ : CC_NEQ);
2070
VMOV(fpr.V(dregs[i]), fpr.V(sregs[i]));
2071
SetCC(CC_AL);
2072
}
2073
}
2074
2075
ApplyPrefixD(dregs, sz);
2076
fpr.ReleaseSpillLocksAndDiscardTemps();
2077
}
2078
2079
void ArmJit::Comp_Viim(MIPSOpcode op) {
2080
NEON_IF_AVAILABLE(CompNEON_Viim);
2081
CONDITIONAL_DISABLE(VFPU_XFER);
2082
if (js.HasUnknownPrefix()) {
2083
DISABLE;
2084
}
2085
2086
u8 dreg;
2087
GetVectorRegs(&dreg, V_Single, _VT);
2088
2089
s32 imm = SignExtend16ToS32(op);
2090
fpr.MapRegV(dreg, MAP_DIRTY | MAP_NOINIT);
2091
MOVI2F(fpr.V(dreg), (float)imm, SCRATCHREG1);
2092
2093
ApplyPrefixD(&dreg, V_Single);
2094
fpr.ReleaseSpillLocksAndDiscardTemps();
2095
}
2096
2097
void ArmJit::Comp_Vfim(MIPSOpcode op) {
2098
NEON_IF_AVAILABLE(CompNEON_Vfim);
2099
CONDITIONAL_DISABLE(VFPU_XFER);
2100
if (js.HasUnknownPrefix()) {
2101
DISABLE;
2102
}
2103
2104
u8 dreg;
2105
GetVectorRegs(&dreg, V_Single, _VT);
2106
2107
FP16 half;
2108
half.u = op & 0xFFFF;
2109
FP32 fval = half_to_float_fast5(half);
2110
fpr.MapRegV(dreg, MAP_DIRTY | MAP_NOINIT);
2111
MOVI2F(fpr.V(dreg), fval.f, SCRATCHREG1);
2112
2113
ApplyPrefixD(&dreg, V_Single);
2114
fpr.ReleaseSpillLocksAndDiscardTemps();
2115
}
2116
2117
void ArmJit::Comp_Vcst(MIPSOpcode op) {
2118
NEON_IF_AVAILABLE(CompNEON_Vcst);
2119
CONDITIONAL_DISABLE(VFPU_XFER);
2120
if (js.HasUnknownPrefix()) {
2121
DISABLE;
2122
}
2123
2124
int conNum = (op >> 16) & 0x1f;
2125
int vd = _VD;
2126
2127
VectorSize sz = GetVecSize(op);
2128
int n = GetNumVectorElements(sz);
2129
2130
u8 dregs[4];
2131
GetVectorRegsPrefixD(dregs, sz, _VD);
2132
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_NOINIT | MAP_DIRTY);
2133
2134
gpr.SetRegImm(SCRATCHREG1, (u32)(void *)&cst_constants[conNum]);
2135
VLDR(S0, SCRATCHREG1, 0);
2136
for (int i = 0; i < n; ++i)
2137
VMOV(fpr.V(dregs[i]), S0);
2138
2139
ApplyPrefixD(dregs, sz);
2140
fpr.ReleaseSpillLocksAndDiscardTemps();
2141
}
2142
2143
static double SinCos(float angle) {
2144
union { struct { float sin; float cos; }; double out; } sincos;
2145
vfpu_sincos(angle, sincos.sin, sincos.cos);
2146
return sincos.out;
2147
}
2148
2149
static double SinCosNegSin(float angle) {
2150
union { struct { float sin; float cos; }; double out; } sincos;
2151
vfpu_sincos(angle, sincos.sin, sincos.cos);
2152
sincos.sin = -sincos.sin;
2153
return sincos.out;
2154
}
2155
2156
void ArmJit::CompVrotShuffle(u8 *dregs, int imm, VectorSize sz, bool negSin) {
2157
int n = GetNumVectorElements(sz);
2158
char what[4] = {'0', '0', '0', '0'};
2159
if (((imm >> 2) & 3) == (imm & 3)) {
2160
for (int i = 0; i < 4; i++)
2161
what[i] = 'S';
2162
}
2163
what[(imm >> 2) & 3] = 'S';
2164
what[imm & 3] = 'C';
2165
2166
fpr.MapRegsAndSpillLockV(dregs, sz, MAP_DIRTY | MAP_NOINIT);
2167
for (int i = 0; i < n; i++) {
2168
switch (what[i]) {
2169
case 'C': VMOV(fpr.V(dregs[i]), S1); break;
2170
case 'S': if (negSin) VNEG(fpr.V(dregs[i]), S0); else VMOV(fpr.V(dregs[i]), S0); break;
2171
case '0':
2172
{
2173
MOVI2F(fpr.V(dregs[i]), 0.0f, SCRATCHREG1);
2174
break;
2175
}
2176
default:
2177
ERROR_LOG(Log::JIT, "Bad what in vrot");
2178
break;
2179
}
2180
}
2181
}
2182
2183
// Very heavily used by FF:CC. Should be replaced by a fast approximation instead of
2184
// calling the math library.
2185
// Apparently this may not work on hardfp. I don't think we have any platforms using this though.
2186
void ArmJit::Comp_VRot(MIPSOpcode op) {
2187
NEON_IF_AVAILABLE(CompNEON_VRot);
2188
// VRot probably doesn't accept prefixes anyway.
2189
CONDITIONAL_DISABLE(VFPU_VEC);
2190
if (js.HasUnknownPrefix()) {
2191
DISABLE;
2192
}
2193
2194
#if PPSSPP_ARCH(ARM_HARDFP)
2195
DISABLE;
2196
#endif
2197
2198
int vd = _VD;
2199
int vs = _VS;
2200
2201
VectorSize sz = GetVecSize(op);
2202
int n = GetNumVectorElements(sz);
2203
2204
u8 dregs[4];
2205
u8 dregs2[4];
2206
2207
MIPSOpcode nextOp = GetOffsetInstruction(1);
2208
int vd2 = -1;
2209
int imm2 = -1;
2210
if ((nextOp >> 26) == 60 && ((nextOp >> 21) & 0x1F) == 29 && _VS == MIPS_GET_VS(nextOp)) {
2211
// Pair of vrot. Let's join them.
2212
vd2 = MIPS_GET_VD(nextOp);
2213
imm2 = (nextOp >> 16) & 0x1f;
2214
// NOTICE_LOG(Log::JIT, "Joint VFPU at %08x", js.blockStart);
2215
}
2216
u8 sreg;
2217
GetVectorRegs(dregs, sz, vd);
2218
if (vd2 >= 0)
2219
GetVectorRegs(dregs2, sz, vd2);
2220
GetVectorRegs(&sreg, V_Single, vs);
2221
2222
int imm = (op >> 16) & 0x1f;
2223
2224
gpr.FlushBeforeCall();
2225
fpr.FlushAll();
2226
2227
bool negSin1 = (imm & 0x10) ? true : false;
2228
2229
fpr.MapRegV(sreg);
2230
// We should write a custom pure-asm function instead.
2231
#if defined(__ARM_PCS_VFP) // Hardfp
2232
VMOV(S0, fpr.V(sreg));
2233
#else // Softfp
2234
VMOV(R0, fpr.V(sreg));
2235
#endif
2236
// FlushBeforeCall saves R1.
2237
QuickCallFunction(R1, negSin1 ? (void *)&SinCosNegSin : (void *)&SinCos);
2238
#if !defined(__ARM_PCS_VFP)
2239
// Returns D0 on hardfp and R0,R1 on softfp due to union joining the two floats
2240
VMOV(D0, R0, R1);
2241
#endif
2242
CompVrotShuffle(dregs, imm, sz, false);
2243
if (vd2 != -1) {
2244
// If the negsin setting differs between the two joint invocations, we need to flip the second one.
2245
bool negSin2 = (imm2 & 0x10) ? true : false;
2246
CompVrotShuffle(dregs2, imm2, sz, negSin1 != negSin2);
2247
EatInstruction(nextOp);
2248
}
2249
2250
fpr.ReleaseSpillLocksAndDiscardTemps();
2251
}
2252
2253
void ArmJit::Comp_Vsgn(MIPSOpcode op) {
2254
NEON_IF_AVAILABLE(CompNEON_Vsgn);
2255
CONDITIONAL_DISABLE(VFPU_VEC);
2256
if (js.HasUnknownPrefix()) {
2257
DISABLE;
2258
}
2259
2260
VectorSize sz = GetVecSize(op);
2261
int n = GetNumVectorElements(sz);
2262
2263
u8 sregs[4], dregs[4];
2264
GetVectorRegsPrefixS(sregs, sz, _VS);
2265
GetVectorRegsPrefixD(dregs, sz, _VD);
2266
2267
MIPSReg tempregs[4];
2268
for (int i = 0; i < n; ++i) {
2269
if (!IsOverlapSafe(dregs[i], i, n, sregs)) {
2270
tempregs[i] = fpr.GetTempV();
2271
} else {
2272
tempregs[i] = dregs[i];
2273
}
2274
}
2275
2276
for (int i = 0; i < n; ++i) {
2277
fpr.MapDirtyInV(tempregs[i], sregs[i]);
2278
VCMP(fpr.V(sregs[i])); // vcmp(sregs[i], #0.0)
2279
VMOV(SCRATCHREG1, fpr.V(sregs[i]));
2280
VMRS_APSR(); // Move FP flags from FPSCR to APSR (regular flags).
2281
SetCC(CC_NEQ);
2282
AND(SCRATCHREG1, SCRATCHREG1, AssumeMakeOperand2(0x80000000));
2283
ORR(SCRATCHREG1, SCRATCHREG1, AssumeMakeOperand2(0x3F800000));
2284
SetCC(CC_EQ);
2285
MOV(SCRATCHREG1, AssumeMakeOperand2(0x0));
2286
SetCC(CC_AL);
2287
VMOV(fpr.V(tempregs[i]), SCRATCHREG1);
2288
}
2289
2290
for (int i = 0; i < n; ++i) {
2291
if (dregs[i] != tempregs[i]) {
2292
fpr.MapDirtyInV(dregs[i], tempregs[i]);
2293
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
2294
}
2295
}
2296
2297
ApplyPrefixD(dregs, sz);
2298
2299
fpr.ReleaseSpillLocksAndDiscardTemps();
2300
}
2301
2302
void ArmJit::Comp_Vocp(MIPSOpcode op) {
2303
NEON_IF_AVAILABLE(CompNEON_Vocp);
2304
CONDITIONAL_DISABLE(VFPU_VEC);
2305
if (js.HasUnknownPrefix()) {
2306
DISABLE;
2307
}
2308
2309
VectorSize sz = GetVecSize(op);
2310
int n = GetNumVectorElements(sz);
2311
2312
// This is a hack that modifies prefixes. We eat them later, so just overwrite.
2313
// S prefix forces the negate flags.
2314
js.prefixS |= 0x000F0000;
2315
// T prefix forces constants on and regnum to 1.
2316
// That means negate still works, and abs activates a different constant.
2317
js.prefixT = (js.prefixT & ~0x000000FF) | 0x00000055 | 0x0000F000;
2318
2319
u8 sregs[4], tregs[4], dregs[4];
2320
GetVectorRegsPrefixS(sregs, sz, _VS);
2321
GetVectorRegsPrefixT(tregs, sz, _VS);
2322
GetVectorRegsPrefixD(dregs, sz, _VD);
2323
2324
MIPSReg tempregs[4];
2325
for (int i = 0; i < n; ++i) {
2326
if (!IsOverlapSafe(dregs[i], i, n, sregs)) {
2327
tempregs[i] = fpr.GetTempV();
2328
} else {
2329
tempregs[i] = dregs[i];
2330
}
2331
}
2332
2333
for (int i = 0; i < n; ++i) {
2334
fpr.MapDirtyInInV(tempregs[i], sregs[i], tregs[i]);
2335
VADD(fpr.V(tempregs[i]), fpr.V(tregs[i]), fpr.V(sregs[i]));
2336
}
2337
2338
for (int i = 0; i < n; ++i) {
2339
if (dregs[i] != tempregs[i]) {
2340
fpr.MapDirtyInV(dregs[i], tempregs[i]);
2341
VMOV(fpr.V(dregs[i]), fpr.V(tempregs[i]));
2342
}
2343
}
2344
2345
ApplyPrefixD(dregs, sz);
2346
2347
fpr.ReleaseSpillLocksAndDiscardTemps();
2348
}
2349
2350
void ArmJit::Comp_ColorConv(MIPSOpcode op) {
2351
DISABLE;
2352
}
2353
2354
void ArmJit::Comp_Vbfy(MIPSOpcode op) {
2355
DISABLE;
2356
}
2357
}
2358
2359
#endif // PPSSPP_ARCH(ARM)
2360
2361