CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/ARM64/Arm64IRCompVec.cpp
Views: 1401
1
// Copyright (c) 2023- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
// In other words, PPSSPP_ARCH(ARM64) || DISASM_ALL.
20
#if PPSSPP_ARCH(ARM64) || (PPSSPP_PLATFORM(WINDOWS) && !defined(__LIBRETRO__))
21
22
#include <algorithm>
23
#include "Common/CPUDetect.h"
24
#include "Core/MemMap.h"
25
#include "Core/MIPS/ARM64/Arm64IRJit.h"
26
#include "Core/MIPS/ARM64/Arm64IRRegCache.h"
27
28
// This file contains compilation for vector instructions.
29
//
30
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
31
// Currently known non working ones should have DISABLE. No flags because that's in IR already.
32
33
// #define CONDITIONAL_DISABLE { CompIR_Generic(inst); return; }
34
#define CONDITIONAL_DISABLE {}
35
#define DISABLE { CompIR_Generic(inst); return; }
36
#define INVALIDOP { _assert_msg_(false, "Invalid IR inst %d", (int)inst.op); CompIR_Generic(inst); return; }
37
38
namespace MIPSComp {
39
40
using namespace Arm64Gen;
41
using namespace Arm64IRJitConstants;
42
43
static bool Overlap(IRReg r1, int l1, IRReg r2, int l2) {
44
return r1 < r2 + l2 && r1 + l1 > r2;
45
}
46
47
void Arm64JitBackend::CompIR_VecArith(IRInst inst) {
48
CONDITIONAL_DISABLE;
49
50
switch (inst.op) {
51
case IROp::Vec4Add:
52
regs_.Map(inst);
53
fp_.FADD(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2));
54
break;
55
56
case IROp::Vec4Sub:
57
regs_.Map(inst);
58
fp_.FSUB(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2));
59
break;
60
61
case IROp::Vec4Mul:
62
regs_.Map(inst);
63
fp_.FMUL(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2));
64
break;
65
66
case IROp::Vec4Div:
67
regs_.Map(inst);
68
fp_.FDIV(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2));
69
break;
70
71
case IROp::Vec4Scale:
72
if (Overlap(inst.dest, 4, inst.src2, 1) || Overlap(inst.src1, 4, inst.src2, 1)) {
73
// ARM64 can handle this, but we have to map specially.
74
regs_.SpillLockFPR(inst.dest, inst.src1);
75
regs_.MapVec4(inst.src1);
76
regs_.MapVec4(inst.src2 & ~3);
77
regs_.MapVec4(inst.dest, MIPSMap::NOINIT);
78
fp_.FMUL(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2 & ~3), inst.src2 & 3);
79
} else {
80
regs_.Map(inst);
81
fp_.FMUL(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2), 0);
82
}
83
break;
84
85
case IROp::Vec4Neg:
86
regs_.Map(inst);
87
fp_.FNEG(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1));
88
break;
89
90
case IROp::Vec4Abs:
91
regs_.Map(inst);
92
fp_.FABS(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1));
93
break;
94
95
default:
96
INVALIDOP;
97
break;
98
}
99
}
100
101
enum class Arm64Shuffle {
102
DUP0_AAAA,
103
DUP1_BBBB,
104
DUP2_CCCC,
105
DUP3_DDDD,
106
MOV_ABCD,
107
TRN1_AACC,
108
TRN2_BBDD,
109
UZP1_ACAC,
110
UZP2_BDBD,
111
ZIP1_AABB,
112
ZIP2_CCDD,
113
REV64_BADC,
114
EXT4_BCDA,
115
EXT8_CDAB,
116
EXT12_DABC,
117
118
// These steps are more expensive and use a temp.
119
REV64_EXT8_CDBA,
120
REV64_EXT8_DCAB,
121
EXT4_UZP1_BDAC,
122
EXT4_UZP2_CABD,
123
EXT8_ZIP1_ACBD,
124
EXT8_ZIP2_CADB,
125
126
// Any that don't fully replace dest must be after this point.
127
INS0_TO_1,
128
INS0_TO_2,
129
INS0_TO_3,
130
INS1_TO_0,
131
INS1_TO_2,
132
INS1_TO_3,
133
INS2_TO_0,
134
INS2_TO_1,
135
INS2_TO_3,
136
INS3_TO_0,
137
INS3_TO_1,
138
INS3_TO_2,
139
XTN2,
140
141
// These hacks to prevent 4 instructions, but scoring isn't smart enough to avoid.
142
EXT12_ZIP1_ADBA,
143
DUP3_UZP1_DDAC,
144
145
COUNT_NORMAL = EXT12_ZIP1_ADBA,
146
COUNT_SIMPLE = REV64_EXT8_CDBA,
147
COUNT_NOPREV = INS0_TO_1,
148
};
149
150
uint8_t Arm64ShuffleMask(Arm64Shuffle method) {
151
// Hopefully optimized into a lookup table, this is a bit less confusing to read...
152
switch (method) {
153
case Arm64Shuffle::DUP0_AAAA: return 0x00;
154
case Arm64Shuffle::DUP1_BBBB: return 0x55;
155
case Arm64Shuffle::DUP2_CCCC: return 0xAA;
156
case Arm64Shuffle::DUP3_DDDD: return 0xFF;
157
case Arm64Shuffle::MOV_ABCD: return 0xE4;
158
case Arm64Shuffle::TRN1_AACC: return 0xA0;
159
case Arm64Shuffle::TRN2_BBDD: return 0xF5;
160
case Arm64Shuffle::UZP1_ACAC: return 0x88;
161
case Arm64Shuffle::UZP2_BDBD: return 0xDD;
162
case Arm64Shuffle::ZIP1_AABB: return 0x50;
163
case Arm64Shuffle::ZIP2_CCDD: return 0xFA;
164
case Arm64Shuffle::REV64_BADC: return 0xB1;
165
case Arm64Shuffle::EXT4_BCDA: return 0x39;
166
case Arm64Shuffle::EXT8_CDAB: return 0x4E;
167
case Arm64Shuffle::EXT12_DABC: return 0x93;
168
case Arm64Shuffle::REV64_EXT8_CDBA: return 0x1E;
169
case Arm64Shuffle::REV64_EXT8_DCAB: return 0x4B;
170
case Arm64Shuffle::EXT4_UZP1_BDAC: return 0x8D;
171
case Arm64Shuffle::EXT4_UZP2_CABD: return 0xD2;
172
case Arm64Shuffle::EXT8_ZIP1_ACBD: return 0xD8;
173
case Arm64Shuffle::EXT8_ZIP2_CADB: return 0x72;
174
case Arm64Shuffle::INS0_TO_1: return 0xE0;
175
case Arm64Shuffle::INS0_TO_2: return 0xC4;
176
case Arm64Shuffle::INS0_TO_3: return 0x24;
177
case Arm64Shuffle::INS1_TO_0: return 0xE5;
178
case Arm64Shuffle::INS1_TO_2: return 0xD4;
179
case Arm64Shuffle::INS1_TO_3: return 0x64;
180
case Arm64Shuffle::INS2_TO_0: return 0xE6;
181
case Arm64Shuffle::INS2_TO_1: return 0xE8;
182
case Arm64Shuffle::INS2_TO_3: return 0xA4;
183
case Arm64Shuffle::INS3_TO_0: return 0xE7;
184
case Arm64Shuffle::INS3_TO_1: return 0xEC;
185
case Arm64Shuffle::INS3_TO_2: return 0xF4;
186
case Arm64Shuffle::XTN2: return 0x84;
187
case Arm64Shuffle::EXT12_ZIP1_ADBA: return 0x1C;
188
case Arm64Shuffle::DUP3_UZP1_DDAC: return 0x8F;
189
default:
190
_assert_(false);
191
return 0;
192
}
193
}
194
195
void Arm64ShuffleApply(ARM64FloatEmitter &fp, Arm64Shuffle method, ARM64Reg vd, ARM64Reg vs) {
196
switch (method) {
197
case Arm64Shuffle::DUP0_AAAA: fp.DUP(32, vd, vs, 0); return;
198
case Arm64Shuffle::DUP1_BBBB: fp.DUP(32, vd, vs, 1); return;
199
case Arm64Shuffle::DUP2_CCCC: fp.DUP(32, vd, vs, 2); return;
200
case Arm64Shuffle::DUP3_DDDD: fp.DUP(32, vd, vs, 3); return;
201
case Arm64Shuffle::MOV_ABCD: _assert_(vd != vs); fp.MOV(vd, vs); return;
202
case Arm64Shuffle::TRN1_AACC: fp.TRN1(32, vd, vs, vs); return;
203
case Arm64Shuffle::TRN2_BBDD: fp.TRN2(32, vd, vs, vs); return;
204
case Arm64Shuffle::UZP1_ACAC: fp.UZP1(32, vd, vs, vs); return;
205
case Arm64Shuffle::UZP2_BDBD: fp.UZP2(32, vd, vs, vs); return;
206
case Arm64Shuffle::ZIP1_AABB: fp.ZIP1(32, vd, vs, vs); return;
207
case Arm64Shuffle::ZIP2_CCDD: fp.ZIP2(32, vd, vs, vs); return;
208
case Arm64Shuffle::REV64_BADC: fp.REV64(32, vd, vs); return;
209
case Arm64Shuffle::EXT4_BCDA: fp.EXT(vd, vs, vs, 4); return;
210
case Arm64Shuffle::EXT8_CDAB: fp.EXT(vd, vs, vs, 8); return;
211
case Arm64Shuffle::EXT12_DABC: fp.EXT(vd, vs, vs, 12); return;
212
213
case Arm64Shuffle::REV64_EXT8_CDBA:
214
fp.REV64(32, EncodeRegToQuad(SCRATCHF1), vs);
215
fp.EXT(vd, vs, EncodeRegToQuad(SCRATCHF1), 8);
216
return;
217
218
case Arm64Shuffle::REV64_EXT8_DCAB:
219
fp.REV64(32, EncodeRegToQuad(SCRATCHF1), vs);
220
fp.EXT(vd, EncodeRegToQuad(SCRATCHF1), vs, 8);
221
return;
222
223
case Arm64Shuffle::EXT4_UZP1_BDAC:
224
fp.EXT(EncodeRegToQuad(SCRATCHF1), vs, vs, 4);
225
fp.UZP1(32, vd, EncodeRegToQuad(SCRATCHF1), vs);
226
return;
227
228
case Arm64Shuffle::EXT4_UZP2_CABD:
229
fp.EXT(EncodeRegToQuad(SCRATCHF1), vs, vs, 4);
230
fp.UZP2(32, vd, EncodeRegToQuad(SCRATCHF1), vs);
231
return;
232
233
case Arm64Shuffle::EXT8_ZIP1_ACBD:
234
fp.EXT(EncodeRegToQuad(SCRATCHF1), vs, vs, 8);
235
fp.ZIP1(32, vd, vs, EncodeRegToQuad(SCRATCHF1));
236
return;
237
238
case Arm64Shuffle::EXT8_ZIP2_CADB:
239
fp.EXT(EncodeRegToQuad(SCRATCHF1), vs, vs, 8);
240
fp.ZIP2(32, vd, vs, EncodeRegToQuad(SCRATCHF1));
241
return;
242
243
case Arm64Shuffle::INS0_TO_1: fp.INS(32, vd, 1, vs, 0); return;
244
case Arm64Shuffle::INS0_TO_2: fp.INS(32, vd, 2, vs, 0); return;
245
case Arm64Shuffle::INS0_TO_3: fp.INS(32, vd, 3, vs, 0); return;
246
case Arm64Shuffle::INS1_TO_0: fp.INS(32, vd, 0, vs, 1); return;
247
case Arm64Shuffle::INS1_TO_2: fp.INS(32, vd, 2, vs, 1); return;
248
case Arm64Shuffle::INS1_TO_3: fp.INS(32, vd, 3, vs, 1); return;
249
case Arm64Shuffle::INS2_TO_0: fp.INS(32, vd, 0, vs, 2); return;
250
case Arm64Shuffle::INS2_TO_1: fp.INS(32, vd, 1, vs, 2); return;
251
case Arm64Shuffle::INS2_TO_3: fp.INS(32, vd, 3, vs, 2); return;
252
case Arm64Shuffle::INS3_TO_0: fp.INS(32, vd, 0, vs, 3); return;
253
case Arm64Shuffle::INS3_TO_1: fp.INS(32, vd, 1, vs, 3); return;
254
case Arm64Shuffle::INS3_TO_2: fp.INS(32, vd, 2, vs, 3); return;
255
256
case Arm64Shuffle::XTN2: fp.XTN2(32, vd, vs); return;
257
258
case Arm64Shuffle::EXT12_ZIP1_ADBA:
259
fp.EXT(EncodeRegToQuad(SCRATCHF1), vs, vs, 12);
260
fp.ZIP1(32, vd, vs, EncodeRegToQuad(SCRATCHF1));
261
return;
262
263
case Arm64Shuffle::DUP3_UZP1_DDAC:
264
fp.DUP(32, EncodeRegToQuad(SCRATCHF1), vs, 3);
265
fp.UZP1(32, vd, EncodeRegToQuad(SCRATCHF1), vs);
266
return;
267
268
default:
269
_assert_(false);
270
return;
271
}
272
}
273
274
uint8_t Arm64ShuffleResult(uint8_t mask, uint8_t prev) {
275
if (prev == 0xE4)
276
return mask;
277
278
uint8_t result = 0;
279
for (int i = 0; i < 4; ++i) {
280
int takeLane = (mask >> (i * 2)) & 3;
281
int lane = (prev >> (takeLane * 2)) & 3;
282
result |= lane << (i * 2);
283
}
284
return result;
285
}
286
287
int Arm64ShuffleScore(uint8_t shuf, uint8_t goal, int steps = 1) {
288
if (shuf == goal)
289
return 100;
290
291
int score = 0;
292
bool needs[4]{};
293
bool gets[4]{};
294
for (int i = 0; i < 4; ++i) {
295
uint8_t mask = 3 << (i * 2);
296
needs[(goal & mask) >> (i * 2)] = true;
297
gets[(shuf & mask) >> (i * 2)] = true;
298
if ((shuf & mask) == (goal & mask))
299
score += 4;
300
}
301
302
for (int i = 0; i < 4; ++i) {
303
if (needs[i] && !gets[i])
304
return 0;
305
}
306
307
// We need to look one level deeper to solve some, such as 1B (common) well.
308
if (steps > 0) {
309
int bestNextScore = 0;
310
for (int m = 0; m < (int)Arm64Shuffle::COUNT_NORMAL; ++m) {
311
uint8_t next = Arm64ShuffleResult(Arm64ShuffleMask((Arm64Shuffle)m), shuf);
312
int nextScore = Arm64ShuffleScore(next, goal, steps - 1);
313
if (nextScore > score) {
314
bestNextScore = nextScore;
315
if (bestNextScore == 100) {
316
// Take the earliest that gives us two steps, it's cheaper (not 2 instructions.)
317
score = 0;
318
break;
319
}
320
}
321
}
322
323
score += bestNextScore / 2;
324
}
325
326
return score;
327
}
328
329
Arm64Shuffle Arm64BestShuffle(uint8_t goal, uint8_t prev, bool needsCopy) {
330
// A couple special cases for optimal shuffles.
331
if (goal == 0x7C && prev == 0xE4)
332
return Arm64Shuffle::REV64_BADC;
333
if (goal == 0x2B && prev == 0xE4)
334
return Arm64Shuffle::EXT8_CDAB;
335
if ((goal == 0x07 || goal == 0x1C) && prev == 0xE4)
336
return Arm64Shuffle::EXT12_ZIP1_ADBA;
337
if ((goal == 0x8F || goal == 0x2F) && prev == 0xE4)
338
return Arm64Shuffle::DUP3_UZP1_DDAC;
339
340
// needsCopy true means insert isn't possible.
341
int attempts = needsCopy ? (int)Arm64Shuffle::COUNT_NOPREV : (int)Arm64Shuffle::COUNT_NORMAL;
342
343
Arm64Shuffle best = Arm64Shuffle::MOV_ABCD;
344
int bestScore = 0;
345
for (int m = 0; m < attempts; ++m) {
346
uint8_t result = Arm64ShuffleResult(Arm64ShuffleMask((Arm64Shuffle)m), prev);
347
int score = Arm64ShuffleScore(result, goal);
348
// Slightly discount options that involve an extra instruction.
349
if (m >= (int)Arm64Shuffle::COUNT_SIMPLE && m < (int)Arm64Shuffle::COUNT_NOPREV)
350
score--;
351
if (score > bestScore) {
352
best = (Arm64Shuffle)m;
353
bestScore = score;
354
}
355
}
356
357
_assert_(bestScore > 0);
358
return best;
359
}
360
361
362
static void Arm64ShufflePerform(ARM64FloatEmitter &fp, ARM64Reg vd, ARM64Reg vs, u8 shuf) {
363
// This performs all shuffles within 3 "steps" (some are two instructions, though.)
364
_assert_msg_(shuf != 0xE4, "Non-shuffles shouldn't get here");
365
366
uint8_t state = 0xE4;
367
// If they're not the same, the first step needs to be a copy.
368
bool needsCopy = vd != vs;
369
for (int i = 0; i < 4 && state != shuf; ++i) {
370
// Figure out the next step and write it out.
371
Arm64Shuffle method = Arm64BestShuffle(shuf, state, needsCopy);
372
Arm64ShuffleApply(fp, method, vd, needsCopy ? vs : vd);
373
374
// Update our state to where we've ended up, for next time.
375
needsCopy = false;
376
state = Arm64ShuffleResult(Arm64ShuffleMask(method), state);
377
}
378
379
_assert_msg_(state == shuf, "Arm64ShufflePerform failed to resolve shuffle");
380
}
381
382
void Arm64JitBackend::CompIR_VecAssign(IRInst inst) {
383
CONDITIONAL_DISABLE;
384
385
switch (inst.op) {
386
case IROp::Vec4Init:
387
regs_.Map(inst);
388
switch (Vec4Init(inst.src1)) {
389
case Vec4Init::AllZERO:
390
fp_.MOVI(32, regs_.FQ(inst.dest), 0);
391
break;
392
393
case Vec4Init::AllONE:
394
case Vec4Init::AllMinusONE:
395
fp_.MOVI2FDUP(regs_.FQ(inst.dest), 1.0f, INVALID_REG, Vec4Init(inst.src1) == Vec4Init::AllMinusONE);
396
break;
397
398
case Vec4Init::Set_1000:
399
case Vec4Init::Set_0100:
400
case Vec4Init::Set_0010:
401
case Vec4Init::Set_0001:
402
fp_.MOVI(32, regs_.FQ(inst.dest), 0);
403
fp_.MOVI2FDUP(EncodeRegToQuad(SCRATCHF1), 1.0f);
404
fp_.INS(32, regs_.FQ(inst.dest), inst.src1 - (int)Vec4Init::Set_1000, EncodeRegToQuad(SCRATCHF1), inst.src1 - (int)Vec4Init::Set_1000);
405
break;
406
407
default:
408
_assert_msg_(false, "Unexpected Vec4Init value %d", inst.src1);
409
DISABLE;
410
}
411
break;
412
413
case IROp::Vec4Shuffle:
414
// There's not really an easy shuffle op on ARM64...
415
if (regs_.GetFPRLaneCount(inst.src1) == 1 && (inst.src1 & 3) == 0 && inst.src2 == 0x00) {
416
// This is a broadcast. If dest == src1, this won't clear it.
417
regs_.SpillLockFPR(inst.src1);
418
regs_.MapVec4(inst.dest, MIPSMap::NOINIT);
419
fp_.DUP(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), 0);
420
} else if (inst.src2 == 0xE4) {
421
if (inst.dest != inst.src1) {
422
regs_.Map(inst);
423
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
424
}
425
} else {
426
regs_.Map(inst);
427
Arm64ShufflePerform(fp_, regs_.FQ(inst.dest), regs_.FQ(inst.src1), inst.src2);
428
}
429
break;
430
431
case IROp::Vec4Blend:
432
regs_.Map(inst);
433
if (inst.src1 == inst.src2) {
434
// Shouldn't really happen, just making sure the below doesn't have to think about it.
435
if (inst.dest != inst.src1)
436
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
437
break;
438
}
439
440
// To reduce overlap cases to consider, let's inverse src1/src2 if dest == src2.
441
// Thus, dest could be src1, but no other overlap is possible.
442
if (inst.dest == inst.src2) {
443
std::swap(inst.src1, inst.src2);
444
inst.constant ^= 0xF;
445
}
446
447
switch (inst.constant & 0xF) {
448
case 0b0000:
449
if (inst.dest != inst.src1)
450
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
451
break;
452
453
case 0b0001:
454
if (inst.dest != inst.src1)
455
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
456
fp_.INS(32, regs_.FQ(inst.dest), 0, regs_.FQ(inst.src2), 0);
457
break;
458
459
case 0b0010:
460
if (inst.dest != inst.src1)
461
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
462
fp_.INS(32, regs_.FQ(inst.dest), 1, regs_.FQ(inst.src2), 1);
463
break;
464
465
case 0b0011:
466
if (inst.dest != inst.src1)
467
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
468
fp_.INS(64, regs_.FQ(inst.dest), 0, regs_.FQ(inst.src2), 0);
469
break;
470
471
case 0b0100:
472
if (inst.dest != inst.src1)
473
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
474
fp_.INS(32, regs_.FQ(inst.dest), 2, regs_.FQ(inst.src2), 2);
475
break;
476
477
case 0b0101:
478
// To get AbCd: REV64 to BADC, then TRN2 xAxC, xbxd.
479
fp_.REV64(32, EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src2));
480
fp_.TRN2(32, regs_.FQ(inst.dest), EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1));
481
break;
482
483
case 0b0110:
484
if (inst.dest != inst.src1)
485
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
486
fp_.INS(32, regs_.FQ(inst.dest), 1, regs_.FQ(inst.src2), 1);
487
fp_.INS(32, regs_.FQ(inst.dest), 2, regs_.FQ(inst.src2), 2);
488
break;
489
490
case 0b0111:
491
if (inst.dest != inst.src1) {
492
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
493
fp_.INS(32, regs_.FQ(inst.dest), 3, regs_.FQ(inst.src1), 3);
494
} else {
495
fp_.MOV(EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1));
496
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
497
fp_.INS(32, regs_.FQ(inst.dest), 3, EncodeRegToQuad(SCRATCHF1), 3);
498
}
499
break;
500
501
case 0b1000:
502
if (inst.dest != inst.src1)
503
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
504
fp_.INS(32, regs_.FQ(inst.dest), 3, regs_.FQ(inst.src2), 3);
505
break;
506
507
case 0b1001:
508
if (inst.dest != inst.src1)
509
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
510
fp_.INS(32, regs_.FQ(inst.dest), 0, regs_.FQ(inst.src2), 0);
511
fp_.INS(32, regs_.FQ(inst.dest), 3, regs_.FQ(inst.src2), 3);
512
break;
513
514
case 0b1010:
515
// To get aBcD: REV64 to badc, then TRN2 xaxc, xBxD.
516
fp_.REV64(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1));
517
fp_.TRN2(32, regs_.FQ(inst.dest), regs_.FQ(inst.dest), regs_.FQ(inst.src2));
518
break;
519
520
case 0b1011:
521
if (inst.dest != inst.src1) {
522
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
523
fp_.INS(32, regs_.FQ(inst.dest), 2, regs_.FQ(inst.src1), 2);
524
} else {
525
fp_.MOV(EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1));
526
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
527
fp_.INS(32, regs_.FQ(inst.dest), 2, EncodeRegToQuad(SCRATCHF1), 2);
528
}
529
break;
530
531
case 0b1100:
532
if (inst.dest != inst.src1)
533
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
534
fp_.INS(64, regs_.FQ(inst.dest), 1, regs_.FQ(inst.src2), 1);
535
break;
536
537
case 0b1101:
538
if (inst.dest != inst.src1) {
539
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
540
fp_.INS(32, regs_.FQ(inst.dest), 1, regs_.FQ(inst.src1), 1);
541
} else {
542
fp_.MOV(EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1));
543
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
544
fp_.INS(32, regs_.FQ(inst.dest), 1, EncodeRegToQuad(SCRATCHF1), 1);
545
}
546
break;
547
548
case 0b1110:
549
if (inst.dest != inst.src1) {
550
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
551
fp_.INS(32, regs_.FQ(inst.dest), 0, regs_.FQ(inst.src1), 0);
552
} else {
553
fp_.MOV(EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1));
554
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
555
fp_.INS(32, regs_.FQ(inst.dest), 0, EncodeRegToQuad(SCRATCHF1), 0);
556
}
557
break;
558
559
case 0b1111:
560
if (inst.dest != inst.src2)
561
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src2));
562
break;
563
}
564
break;
565
566
case IROp::Vec4Mov:
567
if (inst.dest != inst.src1) {
568
regs_.Map(inst);
569
fp_.MOV(regs_.FQ(inst.dest), regs_.FQ(inst.src1));
570
}
571
break;
572
573
default:
574
INVALIDOP;
575
break;
576
}
577
}
578
579
void Arm64JitBackend::CompIR_VecClamp(IRInst inst) {
580
CONDITIONAL_DISABLE;
581
582
switch (inst.op) {
583
case IROp::Vec4ClampToZero:
584
regs_.Map(inst);
585
fp_.MOVI(32, EncodeRegToQuad(SCRATCHF1), 0);
586
fp_.SMAX(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), EncodeRegToQuad(SCRATCHF1));
587
break;
588
589
case IROp::Vec2ClampToZero:
590
regs_.Map(inst);
591
fp_.MOVI(32, EncodeRegToDouble(SCRATCHF1), 0);
592
fp_.SMAX(32, regs_.FD(inst.dest), regs_.FD(inst.src1), EncodeRegToDouble(SCRATCHF1));
593
break;
594
595
default:
596
INVALIDOP;
597
break;
598
}
599
}
600
601
void Arm64JitBackend::CompIR_VecHoriz(IRInst inst) {
602
CONDITIONAL_DISABLE;
603
604
switch (inst.op) {
605
case IROp::Vec4Dot:
606
if (Overlap(inst.dest, 1, inst.src1, 4) || Overlap(inst.dest, 1, inst.src2, 4)) {
607
// To avoid overlap problems, map a little carefully.
608
regs_.SpillLockFPR(inst.src1, inst.src2);
609
regs_.MapVec4(inst.src1);
610
regs_.MapVec4(inst.src2);
611
regs_.MapVec4(inst.dest & ~3, MIPSMap::DIRTY);
612
fp_.FMUL(32, EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1), regs_.FQ(inst.src2));
613
fp_.FADDP(32, EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
614
fp_.FADDP(32, EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
615
fp_.INS(32, regs_.FQ(inst.dest & ~3), inst.dest & 3, EncodeRegToQuad(SCRATCHF1), 0);
616
} else {
617
regs_.Map(inst);
618
fp_.FMUL(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), regs_.FQ(inst.src2));
619
fp_.FADDP(32, regs_.FQ(inst.dest), regs_.FQ(inst.dest), regs_.FQ(inst.dest));
620
fp_.FADDP(32, regs_.FQ(inst.dest), regs_.FQ(inst.dest), regs_.FQ(inst.dest));
621
}
622
break;
623
624
default:
625
INVALIDOP;
626
break;
627
}
628
}
629
630
void Arm64JitBackend::CompIR_VecPack(IRInst inst) {
631
CONDITIONAL_DISABLE;
632
633
switch (inst.op) {
634
case IROp::Vec4DuplicateUpperBitsAndShift1:
635
// This operation swizzles the high 8 bits and converts to a signed int.
636
// It's always after Vec4Unpack8To32.
637
// 000A000B000C000D -> AAAABBBBCCCCDDDD and then shift right one (to match INT_MAX.)
638
regs_.Map(inst);
639
// First, USHR+ORR to get 0A0A0B0B0C0C0D0D.
640
fp_.USHR(32, EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1), 16);
641
fp_.ORR(EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1));
642
// Now again, but by 8.
643
fp_.USHR(32, regs_.FQ(inst.dest), EncodeRegToQuad(SCRATCHF1), 8);
644
fp_.ORR(regs_.FQ(inst.dest), regs_.FQ(inst.dest), EncodeRegToQuad(SCRATCHF1));
645
// Finally, shift away the sign. The goal is to saturate 0xFF -> 0x7FFFFFFF.
646
fp_.USHR(32, regs_.FQ(inst.dest), regs_.FQ(inst.dest), 1);
647
break;
648
649
case IROp::Vec2Pack31To16:
650
// Same as Vec2Pack32To16, but we shift left 1 first to nuke the sign bit.
651
if (Overlap(inst.dest, 1, inst.src1, 2)) {
652
regs_.MapVec2(inst.src1, MIPSMap::DIRTY);
653
fp_.SHL(32, EncodeRegToDouble(SCRATCHF1), regs_.FD(inst.src1), 1);
654
fp_.UZP2(16, EncodeRegToDouble(SCRATCHF1), EncodeRegToDouble(SCRATCHF1), EncodeRegToDouble(SCRATCHF1));
655
fp_.INS(32, regs_.FD(inst.dest & ~1), inst.dest & 1, EncodeRegToDouble(SCRATCHF1), 0);
656
} else {
657
regs_.Map(inst);
658
fp_.SHL(32, regs_.FD(inst.dest), regs_.FD(inst.src1), 1);
659
fp_.UZP2(16, regs_.FD(inst.dest), regs_.FD(inst.dest), regs_.FD(inst.dest));
660
}
661
break;
662
663
case IROp::Vec2Pack32To16:
664
// Viewed as 16 bit lanes: xAxB -> AB00... that's UZP2.
665
if (Overlap(inst.dest, 1, inst.src1, 2)) {
666
regs_.MapVec2(inst.src1, MIPSMap::DIRTY);
667
fp_.UZP2(16, EncodeRegToDouble(SCRATCHF1), regs_.FD(inst.src1), regs_.FD(inst.src1));
668
fp_.INS(32, regs_.FD(inst.dest & ~1), inst.dest & 1, EncodeRegToDouble(SCRATCHF1), 0);
669
} else {
670
regs_.Map(inst);
671
fp_.UZP2(16, regs_.FD(inst.dest), regs_.FD(inst.src1), regs_.FD(inst.src1));
672
}
673
break;
674
675
case IROp::Vec4Pack31To8:
676
if (Overlap(inst.dest, 1, inst.src1, 4)) {
677
regs_.MapVec4(inst.src1, MIPSMap::DIRTY);
678
} else {
679
regs_.Map(inst);
680
}
681
682
// Viewed as 8-bit lanes, after a shift by 23: AxxxBxxxCxxxDxxx.
683
// So: UZP1 -> AxBxCxDx -> UZP1 again -> ABCD
684
fp_.USHR(32, EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1), 23);
685
fp_.UZP1(8, EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
686
// Second one directly to dest, if we can.
687
if (Overlap(inst.dest, 1, inst.src1, 4)) {
688
fp_.UZP1(8, EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
689
fp_.INS(32, regs_.FQ(inst.dest & ~3), inst.dest & 3, EncodeRegToQuad(SCRATCHF1), 0);
690
} else {
691
fp_.UZP1(8, regs_.FQ(inst.dest), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
692
}
693
break;
694
695
case IROp::Vec4Pack32To8:
696
if (Overlap(inst.dest, 1, inst.src1, 4)) {
697
regs_.MapVec4(inst.src1, MIPSMap::DIRTY);
698
} else {
699
regs_.Map(inst);
700
}
701
702
// Viewed as 8-bit lanes, after a shift by 24: AxxxBxxxCxxxDxxx.
703
// Same as Vec4Pack31To8, just a different shift.
704
fp_.USHR(32, EncodeRegToQuad(SCRATCHF1), regs_.FQ(inst.src1), 24);
705
fp_.UZP1(8, EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
706
// Second one directly to dest, if we can.
707
if (Overlap(inst.dest, 1, inst.src1, 4)) {
708
fp_.UZP1(8, EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
709
fp_.INS(32, regs_.FQ(inst.dest & ~3), inst.dest & 3, EncodeRegToQuad(SCRATCHF1), 0);
710
} else {
711
fp_.UZP1(8, regs_.FQ(inst.dest), EncodeRegToQuad(SCRATCHF1), EncodeRegToQuad(SCRATCHF1));
712
}
713
break;
714
715
case IROp::Vec2Unpack16To31:
716
// Viewed as 16-bit: ABxx -> 0A0B, then shift a zero into the sign place.
717
if (Overlap(inst.dest, 2, inst.src1, 1)) {
718
regs_.MapVec2(inst.dest, MIPSMap::DIRTY);
719
} else {
720
regs_.Map(inst);
721
}
722
if (inst.src1 == inst.dest + 1) {
723
fp_.USHLL2(16, regs_.FQ(inst.dest), regs_.FD(inst.src1), 15);
724
} else {
725
fp_.USHLL(16, regs_.FQ(inst.dest), regs_.FD(inst.src1), 15);
726
}
727
break;
728
729
case IROp::Vec2Unpack16To32:
730
// Just Vec2Unpack16To31, without the shift.
731
if (Overlap(inst.dest, 2, inst.src1, 1)) {
732
regs_.MapVec2(inst.dest, MIPSMap::DIRTY);
733
} else {
734
regs_.Map(inst);
735
}
736
if (inst.src1 == inst.dest + 1) {
737
fp_.SHLL2(16, regs_.FQ(inst.dest), regs_.FD(inst.src1));
738
} else {
739
fp_.SHLL(16, regs_.FQ(inst.dest), regs_.FD(inst.src1));
740
}
741
break;
742
743
case IROp::Vec4Unpack8To32:
744
// Viewed as 8-bit: ABCD -> 000A000B000C000D.
745
if (Overlap(inst.dest, 4, inst.src1, 1)) {
746
regs_.MapVec4(inst.dest, MIPSMap::DIRTY);
747
if (inst.dest == inst.src1 + 2) {
748
fp_.SHLL2(8, regs_.FQ(inst.dest), regs_.FD(inst.src1 & ~3));
749
} else if (inst.dest != inst.src1) {
750
fp_.DUP(32, regs_.FQ(inst.dest), regs_.FQ(inst.src1), inst.src1 & 3);
751
fp_.SHLL(8, regs_.FQ(inst.dest), regs_.FD(inst.dest));
752
} else {
753
fp_.SHLL(8, regs_.FQ(inst.dest), regs_.FD(inst.src1));
754
}
755
fp_.SHLL(16, regs_.FQ(inst.dest), regs_.FD(inst.dest));
756
} else {
757
regs_.Map(inst);
758
// Two steps: ABCD -> 0A0B0C0D, then to 000A000B000C000D.
759
fp_.SHLL(8, regs_.FQ(inst.dest), regs_.FD(inst.src1));
760
fp_.SHLL(16, regs_.FQ(inst.dest), regs_.FD(inst.dest));
761
}
762
break;
763
764
default:
765
INVALIDOP;
766
break;
767
}
768
}
769
770
} // namespace MIPSComp
771
772
#endif
773
774