CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Common/Arm64Emitter.cpp
Views: 1401
1
// Copyright 2013 Dolphin Emulator Project
2
// Licensed under GPLv2
3
// Refer to the license.txt file included.
4
5
#include "ppsspp_config.h"
6
7
#include <limits>
8
#include <vector>
9
#include <cmath>
10
#include <cinttypes>
11
12
#include <cstdlib>
13
#include <cstring>
14
15
#include "Common/Arm64Emitter.h"
16
#include "Common/Math/math_util.h"
17
#include "Common/CommonTypes.h"
18
#include "Common/CommonWindows.h"
19
#include "Common/CPUDetect.h"
20
#include "Common/Log.h"
21
22
#if PPSSPP_PLATFORM(IOS) || PPSSPP_PLATFORM(MAC)
23
#include <libkern/OSCacheControl.h>
24
#endif
25
26
namespace Arm64Gen
27
{
28
29
const int kWRegSizeInBits = 32;
30
const int kXRegSizeInBits = 64;
31
32
// The below few functions are taken from V8.
33
int CountLeadingZeros(uint64_t value, int width) {
34
// TODO(jbramley): Optimize this for ARM64 hosts.
35
int count = 0;
36
uint64_t bit_test = 1ULL << (width - 1);
37
while ((count < width) && ((bit_test & value) == 0)) {
38
count++;
39
bit_test >>= 1;
40
}
41
return count;
42
}
43
44
uint64_t LargestPowerOf2Divisor(uint64_t value) {
45
return value & -(int64_t)value;
46
}
47
48
bool IsPowerOfTwo(uint64_t x) {
49
return (x != 0) && ((x & (x - 1)) == 0);
50
}
51
52
#define V8_UINT64_C(x) ((uint64_t)(x))
53
54
bool IsImmArithmetic(uint64_t input, u32 *val, bool *shift) {
55
if (input < 4096) {
56
if (val) *val = (uint32_t)input;
57
if (shift) *shift = false;
58
return true;
59
} else if ((input & 0xFFF000) == input) {
60
if (val) *val = (uint32_t)(input >> 12);
61
if (shift) *shift = true;
62
return true;
63
}
64
return false;
65
}
66
67
bool IsImmLogical(uint64_t value, unsigned int width, unsigned int *n, unsigned int *imm_s, unsigned int *imm_r) {
68
//DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
69
// DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
70
71
bool negate = false;
72
73
// Logical immediates are encoded using parameters n, imm_s and imm_r using
74
// the following table:
75
//
76
// N imms immr size S R
77
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
78
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
79
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
80
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
81
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
82
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
83
// (s bits must not be all set)
84
//
85
// A pattern is constructed of size bits, where the least significant S+1 bits
86
// are set. The pattern is rotated right by R, and repeated across a 32 or
87
// 64-bit value, depending on destination register width.
88
//
89
// Put another way: the basic format of a logical immediate is a single
90
// contiguous stretch of 1 bits, repeated across the whole word at intervals
91
// given by a power of 2. To identify them quickly, we first locate the
92
// lowest stretch of 1 bits, then the next 1 bit above that; that combination
93
// is different for every logical immediate, so it gives us all the
94
// information we need to identify the only logical immediate that our input
95
// could be, and then we simply check if that's the value we actually have.
96
//
97
// (The rotation parameter does give the possibility of the stretch of 1 bits
98
// going 'round the end' of the word. To deal with that, we observe that in
99
// any situation where that happens the bitwise NOT of the value is also a
100
// valid logical immediate. So we simply invert the input whenever its low bit
101
// is set, and then we know that the rotated case can't arise.)
102
103
if (value & 1) {
104
// If the low bit is 1, negate the value, and set a flag to remember that we
105
// did (so that we can adjust the return values appropriately).
106
negate = true;
107
value = ~value;
108
}
109
110
if (width == kWRegSizeInBits) {
111
// To handle 32-bit logical immediates, the very easiest thing is to repeat
112
// the input value twice to make a 64-bit word. The correct encoding of that
113
// as a logical immediate will also be the correct encoding of the 32-bit
114
// value.
115
116
// The most-significant 32 bits may not be zero (ie. negate is true) so
117
// shift the value left before duplicating it.
118
value <<= kWRegSizeInBits;
119
value |= value >> kWRegSizeInBits;
120
}
121
122
// The basic analysis idea: imagine our input word looks like this.
123
//
124
// 0011111000111110001111100011111000111110001111100011111000111110
125
// c b a
126
// |<--d-->|
127
//
128
// We find the lowest set bit (as an actual power-of-2 value, not its index)
129
// and call it a. Then we add a to our original number, which wipes out the
130
// bottommost stretch of set bits and replaces it with a 1 carried into the
131
// next zero bit. Then we look for the new lowest set bit, which is in
132
// position b, and subtract it, so now our number is just like the original
133
// but with the lowest stretch of set bits completely gone. Now we find the
134
// lowest set bit again, which is position c in the diagram above. Then we'll
135
// measure the distance d between bit positions a and c (using CLZ), and that
136
// tells us that the only valid logical immediate that could possibly be equal
137
// to this number is the one in which a stretch of bits running from a to just
138
// below b is replicated every d bits.
139
uint64_t a = LargestPowerOf2Divisor(value);
140
uint64_t value_plus_a = value + a;
141
uint64_t b = LargestPowerOf2Divisor(value_plus_a);
142
uint64_t value_plus_a_minus_b = value_plus_a - b;
143
uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
144
145
int d, clz_a, out_n;
146
uint64_t mask;
147
148
if (c != 0) {
149
// The general case, in which there is more than one stretch of set bits.
150
// Compute the repeat distance d, and set up a bitmask covering the basic
151
// unit of repetition (i.e. a word with the bottom d bits set). Also, in all
152
// of these cases the N bit of the output will be zero.
153
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
154
int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
155
d = clz_a - clz_c;
156
mask = ((V8_UINT64_C(1) << d) - 1);
157
out_n = 0;
158
} else {
159
// Handle degenerate cases.
160
//
161
// If any of those 'find lowest set bit' operations didn't find a set bit at
162
// all, then the word will have been zero thereafter, so in particular the
163
// last lowest_set_bit operation will have returned zero. So we can test for
164
// all the special case conditions in one go by seeing if c is zero.
165
if (a == 0) {
166
// The input was zero (or all 1 bits, which will come to here too after we
167
// inverted it at the start of the function), for which we just return
168
// false.
169
return false;
170
} else {
171
// Otherwise, if c was zero but a was not, then there's just one stretch
172
// of set bits in our word, meaning that we have the trivial case of
173
// d == 64 and only one 'repetition'. Set up all the same variables as in
174
// the general case above, and set the N bit in the output.
175
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
176
d = 64;
177
mask = ~V8_UINT64_C(0);
178
out_n = 1;
179
}
180
}
181
182
// If the repeat period d is not a power of two, it can't be encoded.
183
if (!IsPowerOfTwo(d)) {
184
return false;
185
}
186
187
if (((b - a) & ~mask) != 0) {
188
// If the bit stretch (b - a) does not fit within the mask derived from the
189
// repeat period, then fail.
190
return false;
191
}
192
193
// The only possible option is b - a repeated every d bits. Now we're going to
194
// actually construct the valid logical immediate derived from that
195
// specification, and see if it equals our original input.
196
//
197
// To repeat a value every d bits, we multiply it by a number of the form
198
// (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
199
// be derived using a table lookup on CLZ(d).
200
static const uint64_t multipliers[] = {
201
0x0000000000000001UL,
202
0x0000000100000001UL,
203
0x0001000100010001UL,
204
0x0101010101010101UL,
205
0x1111111111111111UL,
206
0x5555555555555555UL,
207
};
208
int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
209
// Ensure that the index to the multipliers array is within bounds.
210
_dbg_assert_((multiplier_idx >= 0) &&
211
(static_cast<size_t>(multiplier_idx) < ARRAY_SIZE(multipliers)));
212
uint64_t multiplier = multipliers[multiplier_idx];
213
uint64_t candidate = (b - a) * multiplier;
214
215
if (value != candidate) {
216
// The candidate pattern doesn't match our input value, so fail.
217
return false;
218
}
219
220
// We have a match! This is a valid logical immediate, so now we have to
221
// construct the bits and pieces of the instruction encoding that generates
222
// it.
223
224
// Count the set bits in our basic stretch. The special case of clz(0) == -1
225
// makes the answer come out right for stretches that reach the very top of
226
// the word (e.g. numbers like 0xffffc00000000000).
227
int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
228
int s = clz_a - clz_b;
229
230
// Decide how many bits to rotate right by, to put the low bit of that basic
231
// stretch in position a.
232
int r;
233
if (negate) {
234
// If we inverted the input right at the start of this function, here's
235
// where we compensate: the number of set bits becomes the number of clear
236
// bits, and the rotation count is based on position b rather than position
237
// a (since b is the location of the 'lowest' 1 bit after inversion).
238
s = d - s;
239
r = (clz_b + 1) & (d - 1);
240
} else {
241
r = (clz_a + 1) & (d - 1);
242
}
243
244
// Now we're done, except for having to encode the S output in such a way that
245
// it gives both the number of set bits and the length of the repeated
246
// segment. The s field is encoded like this:
247
//
248
// imms size S
249
// ssssss 64 UInt(ssssss)
250
// 0sssss 32 UInt(sssss)
251
// 10ssss 16 UInt(ssss)
252
// 110sss 8 UInt(sss)
253
// 1110ss 4 UInt(ss)
254
// 11110s 2 UInt(s)
255
//
256
// So we 'or' (-d << 1) with our computed s to form imms.
257
*n = out_n;
258
*imm_s = ((-d << 1) | (s - 1)) & 0x3f;
259
*imm_r = r;
260
261
return true;
262
}
263
264
static int EncodeSize(int size) {
265
switch (size) {
266
case 8: return 0;
267
case 16: return 1;
268
case 32: return 2;
269
case 64: return 3;
270
default: return 0;
271
}
272
}
273
274
ARM64XEmitter::ARM64XEmitter(const u8 *ptr, u8 *writePtr) {
275
SetCodePointer(ptr, writePtr);
276
}
277
278
void ARM64XEmitter::SetCodePointer(const u8 *ptr, u8 *writePtr)
279
{
280
m_code = ptr;
281
m_writable = writePtr;
282
m_lastCacheFlushEnd = ptr;
283
}
284
285
const u8* ARM64XEmitter::GetCodePointer() const
286
{
287
return m_code;
288
}
289
290
u8* ARM64XEmitter::GetWritableCodePtr()
291
{
292
return m_writable;
293
}
294
295
void ARM64XEmitter::ReserveCodeSpace(u32 bytes)
296
{
297
for (u32 i = 0; i < bytes/4; i++)
298
BRK(0);
299
}
300
301
const u8* ARM64XEmitter::AlignCode16()
302
{
303
int c = int((u64)m_code & 15);
304
if (c)
305
ReserveCodeSpace(16 - c);
306
return m_code;
307
}
308
309
const u8* ARM64XEmitter::AlignCodePage()
310
{
311
int page_size = GetMemoryProtectPageSize();
312
int c = int((u64)m_code & (page_size - 1));
313
if (c)
314
ReserveCodeSpace(page_size - c);
315
return m_code;
316
}
317
318
const u8 *ARM64XEmitter::NopAlignCode16() {
319
int bytes = ((-(intptr_t)m_code) & 15);
320
for (int i = 0; i < bytes / 4; i++) {
321
Write32(0xD503201F); // official nop instruction
322
}
323
return m_code;
324
}
325
326
void ARM64XEmitter::FlushIcache()
327
{
328
FlushIcacheSection(m_lastCacheFlushEnd, m_code);
329
m_lastCacheFlushEnd = m_code;
330
}
331
332
void ARM64XEmitter::FlushIcacheSection(const u8 *start, const u8 *end)
333
{
334
#if PPSSPP_PLATFORM(IOS) || PPSSPP_PLATFORM(MAC)
335
// Header file says this is equivalent to: sys_icache_invalidate(start, end - start);
336
sys_cache_control(kCacheFunctionPrepareForExecution, (void *)start, end - start);
337
#elif PPSSPP_PLATFORM(WINDOWS)
338
FlushInstructionCache(GetCurrentProcess(), start, end - start);
339
#elif PPSSPP_ARCH(ARM64)
340
// Code from Dolphin, contributed by the Mono project.
341
342
size_t isize, dsize;
343
if (cpu_info.sQuirks.bExynos8890DifferingCachelineSizes) {
344
// Don't rely on GCC's __clear_cache implementation, as it caches
345
// icache/dcache cache line sizes, that can vary between cores on
346
// very buggy big.LITTLE architectures like Exynos8890D.
347
// Enforce the minimum cache line size to be completely safe on these CPUs.
348
isize = 64;
349
dsize = 64;
350
} else {
351
u64 ctr_el0;
352
static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff;
353
__asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
354
isize = 4 << ((ctr_el0 >> 0) & 0xf);
355
dsize = 4 << ((ctr_el0 >> 16) & 0xf);
356
357
// use the global minimum cache line size
358
icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize;
359
dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize;
360
}
361
362
u64 addr = (u64)start & ~(u64)(dsize - 1);
363
for (; addr < (u64)end; addr += dsize)
364
// use "civac" instead of "cvau", as this is the suggested workaround for
365
// Cortex-A53 errata 819472, 826319, 827319 and 824069.
366
__asm__ volatile("dc civac, %0" : : "r"(addr) : "memory");
367
__asm__ volatile("dsb ish" : : : "memory");
368
369
addr = (u64)start & ~(u64)(isize - 1);
370
for (; addr < (u64)end; addr += isize)
371
__asm__ volatile("ic ivau, %0" : : "r"(addr) : "memory");
372
373
__asm__ volatile("dsb ish" : : : "memory");
374
__asm__ volatile("isb" : : : "memory");
375
#endif
376
}
377
378
// Exception generation
379
static const u32 ExcEnc[][3] = {
380
{0, 0, 1}, // SVC
381
{0, 0, 2}, // HVC
382
{0, 0, 3}, // SMC
383
{1, 0, 0}, // BRK
384
{2, 0, 0}, // HLT
385
{5, 0, 1}, // DCPS1
386
{5, 0, 2}, // DCPS2
387
{5, 0, 3}, // DCPS3
388
};
389
390
// Arithmetic generation
391
static const u32 ArithEnc[] = {
392
0x058, // ADD
393
0x258, // SUB
394
};
395
396
// Conditional Select
397
static const u32 CondSelectEnc[][2] = {
398
{0, 0}, // CSEL
399
{0, 1}, // CSINC
400
{1, 0}, // CSINV
401
{1, 1}, // CSNEG
402
};
403
404
// Data-Processing (1 source)
405
static const u32 Data1SrcEnc[][2] = {
406
{0, 0}, // RBIT
407
{0, 1}, // REV16
408
{0, 2}, // REV32
409
{0, 3}, // REV64
410
{0, 4}, // CLZ
411
{0, 5}, // CLS
412
};
413
414
// Data-Processing (2 source)
415
static const u32 Data2SrcEnc[] = {
416
0x02, // UDIV
417
0x03, // SDIV
418
0x08, // LSLV
419
0x09, // LSRV
420
0x0A, // ASRV
421
0x0B, // RORV
422
0x10, // CRC32B
423
0x11, // CRC32H
424
0x12, // CRC32W
425
0x14, // CRC32CB
426
0x15, // CRC32CH
427
0x16, // CRC32CW
428
0x13, // CRC32X (64bit Only)
429
0x17, // XRC32CX (64bit Only)
430
};
431
432
// Data-Processing (3 source)
433
static const u32 Data3SrcEnc[][2] = {
434
{0, 0}, // MADD
435
{0, 1}, // MSUB
436
{1, 0}, // SMADDL (64Bit Only)
437
{1, 1}, // SMSUBL (64Bit Only)
438
{2, 0}, // SMULH (64Bit Only)
439
{5, 0}, // UMADDL (64Bit Only)
440
{5, 1}, // UMSUBL (64Bit Only)
441
{6, 0}, // UMULH (64Bit Only)
442
};
443
444
// Logical (shifted register)
445
static const u32 LogicalEnc[][2] = {
446
{0, 0}, // AND
447
{0, 1}, // BIC
448
{1, 0}, // OOR
449
{1, 1}, // ORN
450
{2, 0}, // EOR
451
{2, 1}, // EON
452
{3, 0}, // ANDS
453
{3, 1}, // BICS
454
};
455
456
// Load/Store Exclusive
457
static const u32 LoadStoreExcEnc[][5] = {
458
{0, 0, 0, 0, 0}, // STXRB
459
{0, 0, 0, 0, 1}, // STLXRB
460
{0, 0, 1, 0, 0}, // LDXRB
461
{0, 0, 1, 0, 1}, // LDAXRB
462
{0, 1, 0, 0, 1}, // STLRB
463
{0, 1, 1, 0, 1}, // LDARB
464
{1, 0, 0, 0, 0}, // STXRH
465
{1, 0, 0, 0, 1}, // STLXRH
466
{1, 0, 1, 0, 0}, // LDXRH
467
{1, 0, 1, 0, 1}, // LDAXRH
468
{1, 1, 0, 0, 1}, // STLRH
469
{1, 1, 1, 0, 1}, // LDARH
470
{2, 0, 0, 0, 0}, // STXR
471
{3, 0, 0, 0, 0}, // (64bit) STXR
472
{2, 0, 0, 0, 1}, // STLXR
473
{3, 0, 0, 0, 1}, // (64bit) STLXR
474
{2, 0, 0, 1, 0}, // STXP
475
{3, 0, 0, 1, 0}, // (64bit) STXP
476
{2, 0, 0, 1, 1}, // STLXP
477
{3, 0, 0, 1, 1}, // (64bit) STLXP
478
{2, 0, 1, 0, 0}, // LDXR
479
{3, 0, 1, 0, 0}, // (64bit) LDXR
480
{2, 0, 1, 0, 1}, // LDAXR
481
{3, 0, 1, 0, 1}, // (64bit) LDAXR
482
{2, 0, 1, 1, 0}, // LDXP
483
{3, 0, 1, 1, 0}, // (64bit) LDXP
484
{2, 0, 1, 1, 1}, // LDAXP
485
{3, 0, 1, 1, 1}, // (64bit) LDAXP
486
{2, 1, 0, 0, 1}, // STLR
487
{3, 1, 0, 0, 1}, // (64bit) STLR
488
{2, 1, 1, 0, 1}, // LDAR
489
{3, 1, 1, 0, 1}, // (64bit) LDAR
490
};
491
492
void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr)
493
{
494
bool b64Bit = Is64Bit(Rt);
495
s64 distance = (s64)ptr - (s64)m_code;
496
497
_assert_msg_(!(distance & 0x3), "%s: distance must be a multiple of 4: %llx", __FUNCTION__, distance);
498
499
distance >>= 2;
500
501
_assert_msg_(distance >= -0x40000 && distance <= 0x3FFFF, "%s: Received too large distance: %llx", __FUNCTION__, distance);
502
503
Rt = DecodeReg(Rt);
504
Write32((b64Bit << 31) | (0x34 << 24) | (op << 24) | \
505
(((u32)distance << 5) & 0xFFFFE0) | Rt);
506
}
507
508
void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr)
509
{
510
bool b64Bit = Is64Bit(Rt);
511
s64 distance = (s64)ptr - (s64)m_code;
512
513
_assert_msg_(!(distance & 0x3), "%s: distance must be a multiple of 4: %llx", __FUNCTION__, distance);
514
515
distance >>= 2;
516
517
_assert_msg_(distance >= -0x2000 && distance <= 0x1FFF, "%s: Received too large distance: %llx", __FUNCTION__, distance);
518
519
Rt = DecodeReg(Rt);
520
Write32((b64Bit << 31) | (0x36 << 24) | (op << 24) | \
521
(bits << 19) | (((u32)distance << 5) & 0x7FFE0) | Rt);
522
}
523
524
void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr)
525
{
526
s64 distance = (s64)ptr - s64(m_code);
527
528
_assert_msg_(!(distance & 0x3), "%s: distance must be a multiple of 4: %llx", __FUNCTION__, distance);
529
530
distance >>= 2;
531
532
_assert_msg_(distance >= -0x2000000LL && distance <= 0x1FFFFFFLL, "%s: Received too large distance: %llx", __FUNCTION__, distance);
533
534
Write32((op << 31) | (0x5 << 26) | (distance & 0x3FFFFFF));
535
}
536
537
void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 opc, u32 op2, u32 op3, u32 op4, ARM64Reg Rn)
538
{
539
Rn = DecodeReg(Rn);
540
Write32((0x6B << 25) | (opc << 21) | (op2 << 16) | (op3 << 10) | (Rn << 5) | op4);
541
}
542
543
void ARM64XEmitter::EncodeExceptionInst(u32 instenc, u32 imm)
544
{
545
_assert_msg_(!(imm & ~0xFFFF), "%s: Exception instruction too large immediate: %d", __FUNCTION__, imm);
546
547
Write32((0xD4 << 24) | (ExcEnc[instenc][0] << 21) | (imm << 5) | (ExcEnc[instenc][1] << 2) | ExcEnc[instenc][2]);
548
}
549
550
void ARM64XEmitter::EncodeSystemInst(u32 op0, u32 op1, u32 CRn, u32 CRm, u32 op2, ARM64Reg Rt)
551
{
552
Write32((0x354 << 22) | (op0 << 19) | (op1 << 16) | (CRn << 12) | (CRm << 8) | (op2 << 5) | Rt);
553
}
554
555
void ARM64XEmitter::EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
556
{
557
bool b64Bit = Is64Bit(Rd);
558
559
Rd = DecodeReg(Rd);
560
Rn = DecodeReg(Rn);
561
Rm = DecodeReg(Rm);
562
Write32((b64Bit << 31) | (flags << 29) | (ArithEnc[instenc] << 21) | \
563
(Option.GetType() == ArithOption::TYPE_EXTENDEDREG ? (1 << 21) : 0) | (Rm << 16) | Option.GetData() | (Rn << 5) | Rd);
564
}
565
566
void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
567
{
568
bool b64Bit = Is64Bit(Rd);
569
570
Rd = DecodeReg(Rd);
571
Rm = DecodeReg(Rm);
572
Rn = DecodeReg(Rn);
573
Write32((b64Bit << 31) | (op << 30) | (flags << 29) | \
574
(0xD0 << 21) | (Rm << 16) | (Rn << 5) | Rd);
575
}
576
577
void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond)
578
{
579
bool b64Bit = Is64Bit(Rn);
580
581
_assert_msg_(!(imm & ~0x1F), "%s: too large immediate: %d", __FUNCTION__, imm)
582
_assert_msg_(!(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv)
583
584
Rn = DecodeReg(Rn);
585
Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | \
586
(imm << 16) | (cond << 12) | (1 << 11) | (Rn << 5) | nzcv);
587
}
588
589
void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond)
590
{
591
bool b64Bit = Is64Bit(Rm);
592
593
_assert_msg_(!(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv)
594
595
Rm = DecodeReg(Rm);
596
Rn = DecodeReg(Rn);
597
Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | \
598
(Rm << 16) | (cond << 12) | (Rn << 5) | nzcv);
599
}
600
601
void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
602
{
603
bool b64Bit = Is64Bit(Rd);
604
605
Rd = DecodeReg(Rd);
606
Rm = DecodeReg(Rm);
607
Rn = DecodeReg(Rn);
608
Write32((b64Bit << 31) | (CondSelectEnc[instenc][0] << 30) | \
609
(0xD4 << 21) | (Rm << 16) | (cond << 12) | (CondSelectEnc[instenc][1] << 10) | \
610
(Rn << 5) | Rd);
611
}
612
613
void ARM64XEmitter::EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn)
614
{
615
bool b64Bit = Is64Bit(Rd);
616
617
Rd = DecodeReg(Rd);
618
Rn = DecodeReg(Rn);
619
Write32((b64Bit << 31) | (0x2D6 << 21) | \
620
(Data1SrcEnc[instenc][0] << 16) | (Data1SrcEnc[instenc][1] << 10) | \
621
(Rn << 5) | Rd);
622
}
623
624
void ARM64XEmitter::EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
625
{
626
bool b64Bit = Is64Bit(Rd);
627
628
Rd = DecodeReg(Rd);
629
Rm = DecodeReg(Rm);
630
Rn = DecodeReg(Rn);
631
Write32((b64Bit << 31) | (0x0D6 << 21) | \
632
(Rm << 16) | (Data2SrcEnc[instenc] << 10) | \
633
(Rn << 5) | Rd);
634
}
635
636
void ARM64XEmitter::EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
637
{
638
bool b64Bit = Is64Bit(Rd);
639
640
Rd = DecodeReg(Rd);
641
Rm = DecodeReg(Rm);
642
Rn = DecodeReg(Rn);
643
Ra = DecodeReg(Ra);
644
Write32((b64Bit << 31) | (0xD8 << 21) | (Data3SrcEnc[instenc][0] << 21) | \
645
(Rm << 16) | (Data3SrcEnc[instenc][1] << 15) | \
646
(Ra << 10) | (Rn << 5) | Rd);
647
}
648
649
void ARM64XEmitter::EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
650
{
651
bool b64Bit = Is64Bit(Rd);
652
653
Rd = DecodeReg(Rd);
654
Rm = DecodeReg(Rm);
655
Rn = DecodeReg(Rn);
656
Write32((b64Bit << 31) | (LogicalEnc[instenc][0] << 29) | (0x5 << 25) | (LogicalEnc[instenc][1] << 21) | \
657
Shift.GetData() | (Rm << 16) | (Rn << 5) | Rd);
658
}
659
660
void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm)
661
{
662
bool b64Bit = Is64Bit(Rt);
663
bool bVec = IsVector(Rt);
664
665
_assert_msg_(!(imm & 0xFFFFF), "%s: offset too large %d", __FUNCTION__, imm);
666
667
Rt = DecodeReg(Rt);
668
if (b64Bit && bitop != 0x2) // LDRSW(0x2) uses 64bit reg, doesn't have 64bit bit set
669
bitop |= 0x1;
670
Write32((bitop << 30) | (bVec << 26) | (0x18 << 24) | (imm << 5) | Rt);
671
}
672
673
void ARM64XEmitter::EncodeLoadStoreExcInst(u32 instenc,
674
ARM64Reg Rs, ARM64Reg Rt2, ARM64Reg Rn, ARM64Reg Rt)
675
{
676
Rs = DecodeReg(Rs);
677
Rt2 = DecodeReg(Rt2);
678
Rn = DecodeReg(Rn);
679
Rt = DecodeReg(Rt);
680
Write32((LoadStoreExcEnc[instenc][0] << 30) | (0x8 << 24) | (LoadStoreExcEnc[instenc][1] << 23) | \
681
(LoadStoreExcEnc[instenc][2] << 22) | (LoadStoreExcEnc[instenc][3] << 21) | (Rs << 16) | \
682
(LoadStoreExcEnc[instenc][4] << 15) | (Rt2 << 10) | (Rn << 5) | Rt);
683
}
684
685
void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm)
686
{
687
bool b64Bit = Is64Bit(Rt);
688
bool b128Bit = IsQuad(Rt);
689
bool bVec = IsVector(Rt);
690
691
if (b128Bit)
692
imm >>= 4;
693
else if (b64Bit)
694
imm >>= 3;
695
else
696
imm >>= 2;
697
698
_assert_msg_(!(imm & ~0xF), "%s: offset too large %d", __FUNCTION__, imm);
699
700
u32 opc = 0;
701
if (b128Bit)
702
opc = 2;
703
else if (b64Bit && bVec)
704
opc = 1;
705
else if (b64Bit && !bVec)
706
opc = 2;
707
708
Rt = DecodeReg(Rt);
709
Rt2 = DecodeReg(Rt2);
710
Rn = DecodeReg(Rn);
711
Write32((opc << 30) | (bVec << 26) | (op << 22) | (imm << 15) | (Rt2 << 10) | (Rn << 5) | Rt);
712
}
713
714
void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
715
{
716
bool b64Bit = Is64Bit(Rt);
717
bool bVec = IsVector(Rt);
718
719
u32 offset = imm & 0x1FF;
720
721
_assert_msg_(!(imm < -256 || imm > 255), "%s: offset too large %d", __FUNCTION__, imm);
722
723
Rt = DecodeReg(Rt);
724
Rn = DecodeReg(Rn);
725
Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (offset << 12) | (op2 << 10) | (Rn << 5) | Rt);
726
}
727
728
void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm, u8 size)
729
{
730
bool b64Bit = Is64Bit(Rt);
731
bool bVec = IsVector(Rt);
732
733
u8 shift = 0;
734
if (size == 64)
735
shift = 3;
736
else if (size == 32)
737
shift = 2;
738
else if (size == 16)
739
shift = 1;
740
741
if (shift) {
742
_assert_msg_(((imm >> shift) << shift) == imm, "%s(INDEX_UNSIGNED): offset must be aligned %d", __FUNCTION__, imm);
743
imm >>= shift;
744
}
745
746
_assert_msg_(imm >= 0, "%s(INDEX_UNSIGNED): offset must be positive %d", __FUNCTION__, imm);
747
_assert_msg_(!(imm & ~0xFFF), "%s(INDEX_UNSIGNED): offset too large %d", __FUNCTION__, imm);
748
749
Rt = DecodeReg(Rt);
750
Rn = DecodeReg(Rn);
751
Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (imm << 10) | (Rn << 5) | Rt);
752
}
753
754
void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos)
755
{
756
bool b64Bit = Is64Bit(Rd);
757
758
_assert_msg_(!(imm & ~0xFFFF), "%s: immediate out of range: %d", __FUNCTION__, imm);
759
760
Rd = DecodeReg(Rd);
761
Write32((b64Bit << 31) | (op << 29) | (0x25 << 23) | (pos << 21) | (imm << 5) | Rd);
762
}
763
764
void ARM64XEmitter::EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms)
765
{
766
bool b64Bit = Is64Bit(Rd);
767
768
Rd = DecodeReg(Rd);
769
Rn = DecodeReg(Rn);
770
Write32((b64Bit << 31) | (op << 29) | (0x26 << 23) | (b64Bit << 22) | \
771
(immr << 16) | (imms << 10) | (Rn << 5) | Rd);
772
}
773
774
void ARM64XEmitter::EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
775
{
776
Rt = DecodeReg(Rt);
777
Rn = DecodeReg(Rn);
778
ARM64Reg decoded_Rm = DecodeReg(Rm.GetReg());
779
780
Write32((size << 30) | (opc << 22) | (0x1C1 << 21) | (decoded_Rm << 16) | \
781
Rm.GetData() | (1 << 11) | (Rn << 5) | Rt);
782
}
783
784
void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd)
785
{
786
bool b64Bit = Is64Bit(Rd);
787
788
_assert_msg_(!(imm & ~0xFFF), "%s: immediate too large: %x", __FUNCTION__, imm);
789
790
Rd = DecodeReg(Rd);
791
Rn = DecodeReg(Rn);
792
Write32((b64Bit << 31) | (op << 30) | (flags << 29) | (0x11 << 24) | (shift << 22) | \
793
(imm << 10) | (Rn << 5) | Rd);
794
}
795
796
void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, int n)
797
{
798
// Sometimes Rd is fixed to SP, but can still be 32bit or 64bit.
799
// Use Rn to determine bitness here.
800
bool b64Bit = Is64Bit(Rn);
801
802
Rd = DecodeReg(Rd);
803
Rn = DecodeReg(Rn);
804
805
Write32((b64Bit << 31) | (op << 29) | (0x24 << 23) | (n << 22) | \
806
(immr << 16) | (imms << 10) | (Rn << 5) | Rd);
807
}
808
809
void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
810
{
811
bool b64Bit = Is64Bit(Rt);
812
u32 type_encode = 0;
813
814
switch (type) {
815
case INDEX_SIGNED:
816
type_encode = 2;
817
break;
818
case INDEX_POST:
819
type_encode = 1;
820
break;
821
case INDEX_PRE:
822
type_encode = 3;
823
break;
824
case INDEX_UNSIGNED:
825
_assert_msg_(false, "%s doesn't support INDEX_UNSIGNED!", __FUNCTION__);
826
break;
827
}
828
829
if (b64Bit) {
830
op |= 2;
831
imm >>= 3;
832
}
833
else
834
{
835
imm >>= 2;
836
}
837
838
_assert_msg_(imm >= -64 && imm <= 63, "%s recieved too large imm: %d", __FUNCTION__, imm);
839
840
Rt = DecodeReg(Rt);
841
Rt2 = DecodeReg(Rt2);
842
Rn = DecodeReg(Rn);
843
844
Write32((op << 30) | (5 << 27) | (type_encode << 23) | (load << 22) | \
845
(((uint32_t)imm & 0x7F) << 15) | (Rt2 << 10) | (Rn << 5) | Rt);
846
}
847
void ARM64XEmitter::EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm)
848
{
849
Rd = DecodeReg(Rd);
850
851
Write32((op << 31) | ((imm & 0x3) << 29) | (0x10 << 24) | \
852
((imm & 0x1FFFFC) << 3) | Rd);
853
}
854
855
void ARM64XEmitter::EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
856
{
857
_assert_msg_(!(imm < -256 || imm > 255), "%s received too large offset: %d", __FUNCTION__, imm);
858
Rt = DecodeReg(Rt);
859
Rn = DecodeReg(Rn);
860
861
Write32((size << 30) | (7 << 27) | (op << 22) | ((imm & 0x1FF) << 12) | (Rn << 5) | Rt);
862
}
863
864
static inline bool IsInRangeImm19(s64 distance) {
865
return (distance >= -0x40000 && distance <= 0x3FFFF);
866
}
867
868
static inline bool IsInRangeImm14(s64 distance) {
869
return (distance >= -0x2000 && distance <= 0x1FFF);
870
}
871
872
static inline bool IsInRangeImm26(s64 distance) {
873
return (distance >= -0x2000000 && distance <= 0x1FFFFFF);
874
}
875
876
static inline u32 MaskImm19(s64 distance) {
877
return distance & 0x7FFFF;
878
}
879
880
static inline u32 MaskImm14(s64 distance) {
881
return distance & 0x3FFF;
882
}
883
884
static inline u32 MaskImm26(s64 distance) {
885
return distance & 0x3FFFFFF;
886
}
887
888
// FixupBranch branching
889
void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch)
890
{
891
bool Not = false;
892
u32 inst = 0;
893
s64 distance = (s64)(m_code - branch.ptr);
894
distance >>= 2;
895
896
switch (branch.type)
897
{
898
case 1: // CBNZ
899
Not = true;
900
case 0: // CBZ
901
{
902
_assert_msg_(IsInRangeImm19(distance), "%s(%d): Received too large distance: %llx", __FUNCTION__, branch.type, distance);
903
bool b64Bit = Is64Bit(branch.reg);
904
ARM64Reg reg = DecodeReg(branch.reg);
905
inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (MaskImm19(distance) << 5) | reg;
906
}
907
break;
908
case 2: // B (conditional)
909
_assert_msg_(IsInRangeImm19(distance), "%s(%d): Received too large distance: %llx", __FUNCTION__, branch.type, distance);
910
inst = (0x2A << 25) | (MaskImm19(distance) << 5) | branch.cond;
911
break;
912
case 4: // TBNZ
913
Not = true;
914
case 3: // TBZ
915
{
916
_assert_msg_(IsInRangeImm14(distance), "%s(%d): Received too large distance: %llx", __FUNCTION__, branch.type, distance);
917
ARM64Reg reg = DecodeReg(branch.reg);
918
inst = ((branch.bit & 0x20) << 26) | (0x1B << 25) | (Not << 24) | ((branch.bit & 0x1F) << 19) | (MaskImm14(distance) << 5) | reg;
919
}
920
break;
921
case 5: // B (unconditional)
922
_assert_msg_(IsInRangeImm26(distance), "%s(%d): Received too large distance: %llx", __FUNCTION__, branch.type, distance);
923
inst = (0x5 << 26) | MaskImm26(distance);
924
break;
925
case 6: // BL (unconditional)
926
_assert_msg_(IsInRangeImm26(distance), "%s(%d): Received too large distance: %llx", __FUNCTION__, branch.type, distance);
927
inst = (0x25 << 26) | MaskImm26(distance);
928
break;
929
}
930
931
ptrdiff_t writable = m_writable - m_code;
932
*(u32 *)(branch.ptr + writable) = inst;
933
}
934
935
FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt)
936
{
937
FixupBranch branch;
938
branch.ptr = m_code;
939
branch.type = 0;
940
branch.reg = Rt;
941
HINT(HINT_NOP);
942
return branch;
943
}
944
FixupBranch ARM64XEmitter::CBNZ(ARM64Reg Rt)
945
{
946
FixupBranch branch;
947
branch.ptr = m_code;
948
branch.type = 1;
949
branch.reg = Rt;
950
HINT(HINT_NOP);
951
return branch;
952
}
953
FixupBranch ARM64XEmitter::B(CCFlags cond)
954
{
955
FixupBranch branch;
956
branch.ptr = m_code;
957
branch.type = 2;
958
branch.cond = cond;
959
HINT(HINT_NOP);
960
return branch;
961
}
962
FixupBranch ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bit)
963
{
964
FixupBranch branch;
965
branch.ptr = m_code;
966
branch.type = 3;
967
branch.reg = Rt;
968
branch.bit = bit;
969
HINT(HINT_NOP);
970
return branch;
971
}
972
FixupBranch ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bit)
973
{
974
FixupBranch branch;
975
branch.ptr = m_code;
976
branch.type = 4;
977
branch.reg = Rt;
978
branch.bit = bit;
979
HINT(HINT_NOP);
980
return branch;
981
}
982
FixupBranch ARM64XEmitter::B()
983
{
984
FixupBranch branch;
985
branch.ptr = m_code;
986
branch.type = 5;
987
HINT(HINT_NOP);
988
return branch;
989
}
990
FixupBranch ARM64XEmitter::BL()
991
{
992
FixupBranch branch;
993
branch.ptr = m_code;
994
branch.type = 6;
995
HINT(HINT_NOP);
996
return branch;
997
}
998
999
// Compare and Branch
1000
void ARM64XEmitter::CBZ(ARM64Reg Rt, const void* ptr)
1001
{
1002
EncodeCompareBranchInst(0, Rt, ptr);
1003
}
1004
void ARM64XEmitter::CBNZ(ARM64Reg Rt, const void* ptr)
1005
{
1006
EncodeCompareBranchInst(1, Rt, ptr);
1007
}
1008
1009
// Conditional Branch
1010
void ARM64XEmitter::B(CCFlags cond, const void* ptr)
1011
{
1012
s64 distance = (s64)ptr - (s64)m_code;
1013
1014
distance >>= 2;
1015
1016
_assert_msg_(IsInRangeImm19(distance), "%s: Received too large distance: %p->%p %lld %llx", __FUNCTION__, m_code, ptr, distance, distance);
1017
Write32((0x54 << 24) | (MaskImm19(distance) << 5) | cond);
1018
}
1019
1020
// Test and Branch
1021
void ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bits, const void* ptr)
1022
{
1023
EncodeTestBranchInst(0, Rt, bits, ptr);
1024
}
1025
void ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bits, const void* ptr)
1026
{
1027
EncodeTestBranchInst(1, Rt, bits, ptr);
1028
}
1029
1030
// Unconditional Branch
1031
void ARM64XEmitter::B(const void* ptr)
1032
{
1033
EncodeUnconditionalBranchInst(0, ptr);
1034
}
1035
void ARM64XEmitter::BL(const void* ptr)
1036
{
1037
EncodeUnconditionalBranchInst(1, ptr);
1038
}
1039
1040
void ARM64XEmitter::QuickCallFunction(ARM64Reg scratchreg, const void *func) {
1041
s64 distance = (s64)func - (s64)m_code;
1042
distance >>= 2; // Can only branch to opcode-aligned (4) addresses
1043
if (!IsInRangeImm26(distance)) {
1044
// WARN_LOG(Log::JIT, "Distance too far in function call (%p to %p)! Using scratch.", m_code, func);
1045
MOVI2R(scratchreg, (uintptr_t)func);
1046
BLR(scratchreg);
1047
} else {
1048
BL(func);
1049
}
1050
}
1051
1052
// Unconditional Branch (register)
1053
void ARM64XEmitter::BR(ARM64Reg Rn)
1054
{
1055
EncodeUnconditionalBranchInst(0, 0x1F, 0, 0, Rn);
1056
}
1057
void ARM64XEmitter::BLR(ARM64Reg Rn)
1058
{
1059
EncodeUnconditionalBranchInst(1, 0x1F, 0, 0, Rn);
1060
}
1061
void ARM64XEmitter::RET(ARM64Reg Rn)
1062
{
1063
EncodeUnconditionalBranchInst(2, 0x1F, 0, 0, Rn);
1064
}
1065
void ARM64XEmitter::ERET()
1066
{
1067
EncodeUnconditionalBranchInst(4, 0x1F, 0, 0, SP);
1068
}
1069
void ARM64XEmitter::DRPS()
1070
{
1071
EncodeUnconditionalBranchInst(5, 0x1F, 0, 0, SP);
1072
}
1073
1074
// Exception generation
1075
void ARM64XEmitter::SVC(u32 imm)
1076
{
1077
EncodeExceptionInst(0, imm);
1078
}
1079
1080
void ARM64XEmitter::HVC(u32 imm)
1081
{
1082
EncodeExceptionInst(1, imm);
1083
}
1084
1085
void ARM64XEmitter::SMC(u32 imm)
1086
{
1087
EncodeExceptionInst(2, imm);
1088
}
1089
1090
void ARM64XEmitter::BRK(u32 imm)
1091
{
1092
EncodeExceptionInst(3, imm);
1093
}
1094
1095
void ARM64XEmitter::HLT(u32 imm)
1096
{
1097
EncodeExceptionInst(4, imm);
1098
}
1099
1100
void ARM64XEmitter::DCPS1(u32 imm)
1101
{
1102
EncodeExceptionInst(5, imm);
1103
}
1104
1105
void ARM64XEmitter::DCPS2(u32 imm)
1106
{
1107
EncodeExceptionInst(6, imm);
1108
}
1109
1110
void ARM64XEmitter::DCPS3(u32 imm)
1111
{
1112
EncodeExceptionInst(7, imm);
1113
}
1114
1115
// System
1116
void ARM64XEmitter::_MSR(PStateField field, u8 imm)
1117
{
1118
u32 op1 = 0, op2 = 0;
1119
switch (field)
1120
{
1121
case FIELD_SPSel: op1 = 0; op2 = 5; break;
1122
case FIELD_DAIFSet: op1 = 3; op2 = 6; break;
1123
case FIELD_DAIFClr: op1 = 3; op2 = 7; break;
1124
default:
1125
_assert_msg_(false, "Invalid PStateField to do a imm move to");
1126
break;
1127
}
1128
EncodeSystemInst(0, op1, 4, imm, op2, WSP);
1129
}
1130
1131
static void GetSystemReg(PStateField field, int &o0, int &op1, int &CRn, int &CRm, int &op2) {
1132
switch (field) {
1133
case FIELD_NZCV:
1134
o0 = 3; op1 = 3; CRn = 4; CRm = 2; op2 = 0;
1135
break;
1136
case FIELD_FPCR:
1137
o0 = 3; op1 = 3; CRn = 4; CRm = 4; op2 = 0;
1138
break;
1139
case FIELD_FPSR:
1140
o0 = 3; op1 = 3; CRn = 4; CRm = 4; op2 = 1;
1141
break;
1142
default:
1143
_assert_msg_(false, "Invalid PStateField to do a register move from/to");
1144
break;
1145
}
1146
}
1147
1148
void ARM64XEmitter::_MSR(PStateField field, ARM64Reg Rt) {
1149
int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0;
1150
_assert_msg_(Is64Bit(Rt), "MSR: Rt must be 64-bit");
1151
GetSystemReg(field, o0, op1, CRn, CRm, op2);
1152
EncodeSystemInst(o0, op1, CRn, CRm, op2, DecodeReg(Rt));
1153
}
1154
1155
void ARM64XEmitter::MRS(ARM64Reg Rt, PStateField field) {
1156
int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0;
1157
_assert_msg_(Is64Bit(Rt), "MRS: Rt must be 64-bit");
1158
GetSystemReg(field, o0, op1, CRn, CRm, op2);
1159
EncodeSystemInst(o0 | 4, op1, CRn, CRm, op2, DecodeReg(Rt));
1160
}
1161
1162
void ARM64XEmitter::HINT(SystemHint op)
1163
{
1164
EncodeSystemInst(0, 3, 2, 0, op, WSP);
1165
}
1166
void ARM64XEmitter::CLREX()
1167
{
1168
EncodeSystemInst(0, 3, 3, 0, 2, WSP);
1169
}
1170
void ARM64XEmitter::DSB(BarrierType type)
1171
{
1172
EncodeSystemInst(0, 3, 3, type, 4, WSP);
1173
}
1174
void ARM64XEmitter::DMB(BarrierType type)
1175
{
1176
EncodeSystemInst(0, 3, 3, type, 5, WSP);
1177
}
1178
void ARM64XEmitter::ISB(BarrierType type)
1179
{
1180
EncodeSystemInst(0, 3, 3, type, 6, WSP);
1181
}
1182
1183
// Add/Subtract (extended register)
1184
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1185
{
1186
ADD(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
1187
}
1188
1189
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
1190
{
1191
EncodeArithmeticInst(0, false, Rd, Rn, Rm, Option);
1192
}
1193
1194
void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1195
{
1196
EncodeArithmeticInst(0, true, Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
1197
}
1198
1199
void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
1200
{
1201
EncodeArithmeticInst(0, true, Rd, Rn, Rm, Option);
1202
}
1203
1204
void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1205
{
1206
SUB(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
1207
}
1208
1209
void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
1210
{
1211
EncodeArithmeticInst(1, false, Rd, Rn, Rm, Option);
1212
}
1213
1214
void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1215
{
1216
EncodeArithmeticInst(1, true, Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
1217
}
1218
1219
void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
1220
{
1221
EncodeArithmeticInst(1, true, Rd, Rn, Rm, Option);
1222
}
1223
1224
void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm)
1225
{
1226
CMN(Rn, Rm, ArithOption(Rn, ST_LSL, 0));
1227
}
1228
1229
void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
1230
{
1231
EncodeArithmeticInst(0, true, Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Option);
1232
}
1233
1234
void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm)
1235
{
1236
CMP(Rn, Rm, ArithOption(Rn, ST_LSL, 0));
1237
}
1238
1239
void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Option)
1240
{
1241
EncodeArithmeticInst(1, true, Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Option);
1242
}
1243
1244
// Add/Subtract (with carry)
1245
void ARM64XEmitter::ADC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1246
{
1247
EncodeArithmeticCarryInst(0, false, Rd, Rn, Rm);
1248
}
1249
void ARM64XEmitter::ADCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1250
{
1251
EncodeArithmeticCarryInst(0, true, Rd, Rn, Rm);
1252
}
1253
void ARM64XEmitter::SBC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1254
{
1255
EncodeArithmeticCarryInst(1, false, Rd, Rn, Rm);
1256
}
1257
void ARM64XEmitter::SBCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1258
{
1259
EncodeArithmeticCarryInst(1, true, Rd, Rn, Rm);
1260
}
1261
1262
// Conditional Compare (immediate)
1263
void ARM64XEmitter::CCMN(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond)
1264
{
1265
EncodeCondCompareImmInst(0, Rn, imm, nzcv, cond);
1266
}
1267
void ARM64XEmitter::CCMP(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond)
1268
{
1269
EncodeCondCompareImmInst(1, Rn, imm, nzcv, cond);
1270
}
1271
1272
// Conditiona Compare (register)
1273
void ARM64XEmitter::CCMN(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond)
1274
{
1275
EncodeCondCompareRegInst(0, Rn, Rm, nzcv, cond);
1276
}
1277
void ARM64XEmitter::CCMP(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond)
1278
{
1279
EncodeCondCompareRegInst(1, Rn, Rm, nzcv, cond);
1280
}
1281
1282
// Conditional Select
1283
void ARM64XEmitter::CSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
1284
{
1285
EncodeCondSelectInst(0, Rd, Rn, Rm, cond);
1286
}
1287
void ARM64XEmitter::CSINC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
1288
{
1289
EncodeCondSelectInst(1, Rd, Rn, Rm, cond);
1290
}
1291
void ARM64XEmitter::CSINV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
1292
{
1293
EncodeCondSelectInst(2, Rd, Rn, Rm, cond);
1294
}
1295
void ARM64XEmitter::CSNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
1296
{
1297
EncodeCondSelectInst(3, Rd, Rn, Rm, cond);
1298
}
1299
1300
// Data-Processing 1 source
1301
void ARM64XEmitter::RBIT(ARM64Reg Rd, ARM64Reg Rn)
1302
{
1303
EncodeData1SrcInst(0, Rd, Rn);
1304
}
1305
void ARM64XEmitter::REV16(ARM64Reg Rd, ARM64Reg Rn)
1306
{
1307
EncodeData1SrcInst(1, Rd, Rn);
1308
}
1309
void ARM64XEmitter::REV32(ARM64Reg Rd, ARM64Reg Rn)
1310
{
1311
EncodeData1SrcInst(2, Rd, Rn);
1312
}
1313
void ARM64XEmitter::REV64(ARM64Reg Rd, ARM64Reg Rn)
1314
{
1315
EncodeData1SrcInst(3, Rd, Rn);
1316
}
1317
void ARM64XEmitter::CLZ(ARM64Reg Rd, ARM64Reg Rn)
1318
{
1319
EncodeData1SrcInst(4, Rd, Rn);
1320
}
1321
void ARM64XEmitter::CLS(ARM64Reg Rd, ARM64Reg Rn)
1322
{
1323
EncodeData1SrcInst(5, Rd, Rn);
1324
}
1325
1326
// Data-Processing 2 source
1327
void ARM64XEmitter::UDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1328
{
1329
EncodeData2SrcInst(0, Rd, Rn, Rm);
1330
}
1331
void ARM64XEmitter::SDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1332
{
1333
EncodeData2SrcInst(1, Rd, Rn, Rm);
1334
}
1335
void ARM64XEmitter::LSLV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1336
{
1337
EncodeData2SrcInst(2, Rd, Rn, Rm);
1338
}
1339
void ARM64XEmitter::LSRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1340
{
1341
EncodeData2SrcInst(3, Rd, Rn, Rm);
1342
}
1343
void ARM64XEmitter::ASRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1344
{
1345
EncodeData2SrcInst(4, Rd, Rn, Rm);
1346
}
1347
void ARM64XEmitter::RORV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1348
{
1349
EncodeData2SrcInst(5, Rd, Rn, Rm);
1350
}
1351
void ARM64XEmitter::CRC32B(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1352
{
1353
EncodeData2SrcInst(6, Rd, Rn, Rm);
1354
}
1355
void ARM64XEmitter::CRC32H(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1356
{
1357
EncodeData2SrcInst(7, Rd, Rn, Rm);
1358
}
1359
void ARM64XEmitter::CRC32W(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1360
{
1361
EncodeData2SrcInst(8, Rd, Rn, Rm);
1362
}
1363
void ARM64XEmitter::CRC32CB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1364
{
1365
EncodeData2SrcInst(9, Rd, Rn, Rm);
1366
}
1367
void ARM64XEmitter::CRC32CH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1368
{
1369
EncodeData2SrcInst(10, Rd, Rn, Rm);
1370
}
1371
void ARM64XEmitter::CRC32CW(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1372
{
1373
EncodeData2SrcInst(11, Rd, Rn, Rm);
1374
}
1375
void ARM64XEmitter::CRC32X(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1376
{
1377
EncodeData2SrcInst(12, Rd, Rn, Rm);
1378
}
1379
void ARM64XEmitter::CRC32CX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1380
{
1381
EncodeData2SrcInst(13, Rd, Rn, Rm);
1382
}
1383
1384
// Data-Processing 3 source
1385
void ARM64XEmitter::MADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
1386
{
1387
EncodeData3SrcInst(0, Rd, Rn, Rm, Ra);
1388
}
1389
void ARM64XEmitter::MSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
1390
{
1391
EncodeData3SrcInst(1, Rd, Rn, Rm, Ra);
1392
}
1393
void ARM64XEmitter::SMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
1394
{
1395
EncodeData3SrcInst(2, Rd, Rn, Rm, Ra);
1396
}
1397
void ARM64XEmitter::SMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1398
{
1399
SMADDL(Rd, Rn, Rm, SP);
1400
}
1401
void ARM64XEmitter::SMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
1402
{
1403
EncodeData3SrcInst(3, Rd, Rn, Rm, Ra);
1404
}
1405
void ARM64XEmitter::SMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1406
{
1407
EncodeData3SrcInst(4, Rd, Rn, Rm, SP);
1408
}
1409
void ARM64XEmitter::UMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
1410
{
1411
EncodeData3SrcInst(5, Rd, Rn, Rm, Ra);
1412
}
1413
void ARM64XEmitter::UMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1414
{
1415
UMADDL(Rd, Rn, Rm, SP);
1416
}
1417
void ARM64XEmitter::UMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
1418
{
1419
EncodeData3SrcInst(6, Rd, Rn, Rm, Ra);
1420
}
1421
void ARM64XEmitter::UMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1422
{
1423
EncodeData3SrcInst(7, Rd, Rn, Rm, SP);
1424
}
1425
void ARM64XEmitter::MUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1426
{
1427
EncodeData3SrcInst(0, Rd, Rn, Rm, SP);
1428
}
1429
void ARM64XEmitter::MNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
1430
{
1431
EncodeData3SrcInst(1, Rd, Rn, Rm, SP);
1432
}
1433
1434
// Logical (shifted register)
1435
void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1436
{
1437
EncodeLogicalInst(0, Rd, Rn, Rm, Shift);
1438
}
1439
void ARM64XEmitter::BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1440
{
1441
EncodeLogicalInst(1, Rd, Rn, Rm, Shift);
1442
}
1443
void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1444
{
1445
EncodeLogicalInst(2, Rd, Rn, Rm, Shift);
1446
}
1447
void ARM64XEmitter::ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1448
{
1449
EncodeLogicalInst(3, Rd, Rn, Rm, Shift);
1450
}
1451
void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1452
{
1453
EncodeLogicalInst(4, Rd, Rn, Rm, Shift);
1454
}
1455
void ARM64XEmitter::EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1456
{
1457
EncodeLogicalInst(5, Rd, Rn, Rm, Shift);
1458
}
1459
void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1460
{
1461
EncodeLogicalInst(6, Rd, Rn, Rm, Shift);
1462
}
1463
void ARM64XEmitter::BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1464
{
1465
EncodeLogicalInst(7, Rd, Rn, Rm, Shift);
1466
}
1467
void ARM64XEmitter::TST(ARM64Reg Rn, ARM64Reg Rm, const ArithOption &Shift)
1468
{
1469
ANDS(Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Shift);
1470
}
1471
1472
void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm, const ArithOption &Shift) {
1473
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, Shift);
1474
}
1475
1476
void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm)
1477
{
1478
if (IsGPR(Rd) && IsGPR(Rm)) {
1479
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0));
1480
} else {
1481
_assert_msg_(false, "Non-GPRs not supported in MOV");
1482
}
1483
}
1484
1485
void ARM64XEmitter::MOVfromSP(ARM64Reg Rd) {
1486
ADD(Rd, ARM64Reg::SP, 0, false);
1487
}
1488
1489
void ARM64XEmitter::MOVtoSP(ARM64Reg Rn) {
1490
ADD(ARM64Reg::SP, Rn, 0, false);
1491
}
1492
1493
void ARM64XEmitter::MVN(ARM64Reg Rd, ARM64Reg Rm)
1494
{
1495
ORN(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0));
1496
}
1497
void ARM64XEmitter::LSL(ARM64Reg Rd, ARM64Reg Rm, int shift)
1498
{
1499
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, shift));
1500
}
1501
void ARM64XEmitter::LSR(ARM64Reg Rd, ARM64Reg Rm, int shift)
1502
{
1503
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSR, shift));
1504
}
1505
void ARM64XEmitter::ASR(ARM64Reg Rd, ARM64Reg Rm, int shift)
1506
{
1507
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_ASR, shift));
1508
}
1509
void ARM64XEmitter::ROR(ARM64Reg Rd, ARM64Reg Rm, int shift)
1510
{
1511
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_ROR, shift));
1512
}
1513
1514
// Logical (immediate)
1515
void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
1516
{
1517
EncodeLogicalImmInst(0, Rd, Rn, immr, imms, invert);
1518
}
1519
void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
1520
{
1521
EncodeLogicalImmInst(3, Rd, Rn, immr, imms, invert);
1522
}
1523
void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
1524
{
1525
EncodeLogicalImmInst(2, Rd, Rn, immr, imms, invert);
1526
}
1527
void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
1528
{
1529
EncodeLogicalImmInst(1, Rd, Rn, immr, imms, invert);
1530
}
1531
void ARM64XEmitter::TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert)
1532
{
1533
EncodeLogicalImmInst(3, Is64Bit(Rn) ? ZR : WZR, Rn, immr, imms, invert);
1534
}
1535
1536
// Add/subtract (immediate)
1537
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
1538
{
1539
EncodeAddSubImmInst(0, false, shift, imm, Rn, Rd);
1540
}
1541
void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
1542
{
1543
EncodeAddSubImmInst(0, true, shift, imm, Rn, Rd);
1544
}
1545
void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
1546
{
1547
EncodeAddSubImmInst(1, false, shift, imm, Rn, Rd);
1548
}
1549
void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
1550
{
1551
EncodeAddSubImmInst(1, true, shift, imm, Rn, Rd);
1552
}
1553
void ARM64XEmitter::CMP(ARM64Reg Rn, u32 imm, bool shift)
1554
{
1555
EncodeAddSubImmInst(1, true, shift, imm, Rn, Is64Bit(Rn) ? SP : WSP);
1556
}
1557
void ARM64XEmitter::CMN(ARM64Reg Rn, u32 imm, bool shift)
1558
{
1559
EncodeAddSubImmInst(0, true, shift, imm, Rn, Is64Bit(Rn) ? SP : WSP);
1560
}
1561
1562
// Data Processing (Immediate)
1563
void ARM64XEmitter::MOVZ(ARM64Reg Rd, u32 imm, ShiftAmount pos)
1564
{
1565
EncodeMOVWideInst(2, Rd, imm, pos);
1566
}
1567
void ARM64XEmitter::MOVN(ARM64Reg Rd, u32 imm, ShiftAmount pos)
1568
{
1569
EncodeMOVWideInst(0, Rd, imm, pos);
1570
}
1571
void ARM64XEmitter::MOVK(ARM64Reg Rd, u32 imm, ShiftAmount pos)
1572
{
1573
EncodeMOVWideInst(3, Rd, imm, pos);
1574
}
1575
1576
// Bitfield move
1577
void ARM64XEmitter::BFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms)
1578
{
1579
EncodeBitfieldMOVInst(1, Rd, Rn, immr, imms);
1580
}
1581
void ARM64XEmitter::SBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms)
1582
{
1583
EncodeBitfieldMOVInst(0, Rd, Rn, immr, imms);
1584
}
1585
void ARM64XEmitter::UBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms)
1586
{
1587
EncodeBitfieldMOVInst(2, Rd, Rn, immr, imms);
1588
}
1589
1590
void ARM64XEmitter::BFI(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width)
1591
{
1592
u32 size = Is64Bit(Rn) ? 64 : 32;
1593
_assert_msg_((lsb + width) <= size, "%s passed lsb %d and width %d which is greater than the register size!",
1594
__FUNCTION__, lsb, width);
1595
EncodeBitfieldMOVInst(1, Rd, Rn, (size - lsb) % size, width - 1);
1596
}
1597
void ARM64XEmitter::UBFIZ(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width)
1598
{
1599
u32 size = Is64Bit(Rn) ? 64 : 32;
1600
_assert_msg_((lsb + width) <= size, "%s passed lsb %d and width %d which is greater than the register size!",
1601
__FUNCTION__, lsb, width);
1602
EncodeBitfieldMOVInst(2, Rd, Rn, (size - lsb) % size, width - 1);
1603
}
1604
void ARM64XEmitter::EXTR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u32 shift) {
1605
bool sf = Is64Bit(Rd);
1606
bool N = sf;
1607
Rd = DecodeReg(Rd);
1608
Rn = DecodeReg(Rn);
1609
Rm = DecodeReg(Rm);
1610
Write32((sf << 31) | (0x27 << 23) | (N << 22) | (Rm << 16) | (shift << 10) | (Rm << 5) | Rd);
1611
}
1612
void ARM64XEmitter::SXTB(ARM64Reg Rd, ARM64Reg Rn)
1613
{
1614
SBFM(Rd, Rn, 0, 7);
1615
}
1616
void ARM64XEmitter::SXTH(ARM64Reg Rd, ARM64Reg Rn)
1617
{
1618
SBFM(Rd, Rn, 0, 15);
1619
}
1620
void ARM64XEmitter::SXTW(ARM64Reg Rd, ARM64Reg Rn)
1621
{
1622
_assert_msg_(Is64Bit(Rd), "%s requires 64bit register as destination", __FUNCTION__);
1623
SBFM(Rd, Rn, 0, 31);
1624
}
1625
void ARM64XEmitter::UXTB(ARM64Reg Rd, ARM64Reg Rn)
1626
{
1627
UBFM(Rd, Rn, 0, 7);
1628
}
1629
void ARM64XEmitter::UXTH(ARM64Reg Rd, ARM64Reg Rn)
1630
{
1631
UBFM(Rd, Rn, 0, 15);
1632
}
1633
1634
// Load Register (Literal)
1635
void ARM64XEmitter::LDR(ARM64Reg Rt, u32 imm)
1636
{
1637
EncodeLoadRegisterInst(0, Rt, imm);
1638
}
1639
void ARM64XEmitter::LDRSW(ARM64Reg Rt, u32 imm)
1640
{
1641
EncodeLoadRegisterInst(2, Rt, imm);
1642
}
1643
void ARM64XEmitter::PRFM(ARM64Reg Rt, u32 imm)
1644
{
1645
EncodeLoadRegisterInst(3, Rt, imm);
1646
}
1647
1648
// Load/Store pair
1649
void ARM64XEmitter::LDP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
1650
{
1651
EncodeLoadStorePair(0, 1, type, Rt, Rt2, Rn, imm);
1652
}
1653
void ARM64XEmitter::LDPSW(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
1654
{
1655
EncodeLoadStorePair(1, 1, type, Rt, Rt2, Rn, imm);
1656
}
1657
void ARM64XEmitter::STP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
1658
{
1659
EncodeLoadStorePair(0, 0, type, Rt, Rt2, Rn, imm);
1660
}
1661
1662
// Load/Store Exclusive
1663
void ARM64XEmitter::STXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
1664
{
1665
EncodeLoadStoreExcInst(0, Rs, SP, Rt, Rn);
1666
}
1667
void ARM64XEmitter::STLXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
1668
{
1669
EncodeLoadStoreExcInst(1, Rs, SP, Rt, Rn);
1670
}
1671
void ARM64XEmitter::LDXRB(ARM64Reg Rt, ARM64Reg Rn)
1672
{
1673
EncodeLoadStoreExcInst(2, SP, SP, Rt, Rn);
1674
}
1675
void ARM64XEmitter::LDAXRB(ARM64Reg Rt, ARM64Reg Rn)
1676
{
1677
EncodeLoadStoreExcInst(3, SP, SP, Rt, Rn);
1678
}
1679
void ARM64XEmitter::STLRB(ARM64Reg Rt, ARM64Reg Rn)
1680
{
1681
EncodeLoadStoreExcInst(4, SP, SP, Rt, Rn);
1682
}
1683
void ARM64XEmitter::LDARB(ARM64Reg Rt, ARM64Reg Rn)
1684
{
1685
EncodeLoadStoreExcInst(5, SP, SP, Rt, Rn);
1686
}
1687
void ARM64XEmitter::STXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
1688
{
1689
EncodeLoadStoreExcInst(6, Rs, SP, Rt, Rn);
1690
}
1691
void ARM64XEmitter::STLXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
1692
{
1693
EncodeLoadStoreExcInst(7, Rs, SP, Rt, Rn);
1694
}
1695
void ARM64XEmitter::LDXRH(ARM64Reg Rt, ARM64Reg Rn)
1696
{
1697
EncodeLoadStoreExcInst(8, SP, SP, Rt, Rn);
1698
}
1699
void ARM64XEmitter::LDAXRH(ARM64Reg Rt, ARM64Reg Rn)
1700
{
1701
EncodeLoadStoreExcInst(9, SP, SP, Rt, Rn);
1702
}
1703
void ARM64XEmitter::STLRH(ARM64Reg Rt, ARM64Reg Rn)
1704
{
1705
EncodeLoadStoreExcInst(10, SP, SP, Rt, Rn);
1706
}
1707
void ARM64XEmitter::LDARH(ARM64Reg Rt, ARM64Reg Rn)
1708
{
1709
EncodeLoadStoreExcInst(11, SP, SP, Rt, Rn);
1710
}
1711
void ARM64XEmitter::STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
1712
{
1713
EncodeLoadStoreExcInst(12 + Is64Bit(Rt), Rs, SP, Rt, Rn);
1714
}
1715
void ARM64XEmitter::STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
1716
{
1717
EncodeLoadStoreExcInst(14 + Is64Bit(Rt), Rs, SP, Rt, Rn);
1718
}
1719
void ARM64XEmitter::STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
1720
{
1721
EncodeLoadStoreExcInst(16 + Is64Bit(Rt), Rs, Rt2, Rt, Rn);
1722
}
1723
void ARM64XEmitter::STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
1724
{
1725
EncodeLoadStoreExcInst(18 + Is64Bit(Rt), Rs, Rt2, Rt, Rn);
1726
}
1727
void ARM64XEmitter::LDXR(ARM64Reg Rt, ARM64Reg Rn)
1728
{
1729
EncodeLoadStoreExcInst(20 + Is64Bit(Rt), SP, SP, Rt, Rn);
1730
}
1731
void ARM64XEmitter::LDAXR(ARM64Reg Rt, ARM64Reg Rn)
1732
{
1733
EncodeLoadStoreExcInst(22 + Is64Bit(Rt), SP, SP, Rt, Rn);
1734
}
1735
void ARM64XEmitter::LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
1736
{
1737
EncodeLoadStoreExcInst(24 + Is64Bit(Rt), SP, Rt2, Rt, Rn);
1738
}
1739
void ARM64XEmitter::LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
1740
{
1741
EncodeLoadStoreExcInst(26 + Is64Bit(Rt), SP, Rt2, Rt, Rn);
1742
}
1743
void ARM64XEmitter::STLR(ARM64Reg Rt, ARM64Reg Rn)
1744
{
1745
EncodeLoadStoreExcInst(28 + Is64Bit(Rt), SP, SP, Rt, Rn);
1746
}
1747
void ARM64XEmitter::LDAR(ARM64Reg Rt, ARM64Reg Rn)
1748
{
1749
EncodeLoadStoreExcInst(30 + Is64Bit(Rt), SP, SP, Rt, Rn);
1750
}
1751
1752
// Load/Store no-allocate pair (offset)
1753
void ARM64XEmitter::STNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm)
1754
{
1755
EncodeLoadStorePairedInst(0xA0, Rt, Rt2, Rn, imm);
1756
}
1757
void ARM64XEmitter::LDNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm)
1758
{
1759
EncodeLoadStorePairedInst(0xA1, Rt, Rt2, Rn, imm);
1760
}
1761
1762
// Load/Store register (immediate post-indexed)
1763
// XXX: Most of these support vectors
1764
void ARM64XEmitter::STRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1765
{
1766
if (type == INDEX_UNSIGNED)
1767
EncodeLoadStoreIndexedInst(0x0E4, Rt, Rn, imm, 8);
1768
else
1769
EncodeLoadStoreIndexedInst(0x0E0,
1770
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1771
}
1772
void ARM64XEmitter::LDRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1773
{
1774
if (type == INDEX_UNSIGNED)
1775
EncodeLoadStoreIndexedInst(0x0E5, Rt, Rn, imm, 8);
1776
else
1777
EncodeLoadStoreIndexedInst(0x0E1,
1778
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1779
}
1780
void ARM64XEmitter::LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1781
{
1782
if (type == INDEX_UNSIGNED)
1783
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E6 : 0x0E7, Rt, Rn, imm, 8);
1784
else
1785
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E2 : 0x0E3,
1786
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1787
}
1788
void ARM64XEmitter::STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1789
{
1790
if (type == INDEX_UNSIGNED)
1791
EncodeLoadStoreIndexedInst(0x1E4, Rt, Rn, imm, 16);
1792
else
1793
EncodeLoadStoreIndexedInst(0x1E0,
1794
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1795
}
1796
void ARM64XEmitter::LDRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1797
{
1798
if (type == INDEX_UNSIGNED)
1799
EncodeLoadStoreIndexedInst(0x1E5, Rt, Rn, imm, 16);
1800
else
1801
EncodeLoadStoreIndexedInst(0x1E1,
1802
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1803
}
1804
void ARM64XEmitter::LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1805
{
1806
if (type == INDEX_UNSIGNED)
1807
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E6 : 0x1E7, Rt, Rn, imm, 16);
1808
else
1809
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E2 : 0x1E3,
1810
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1811
}
1812
void ARM64XEmitter::STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1813
{
1814
if (type == INDEX_UNSIGNED)
1815
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E4 : 0x2E4, Rt, Rn, imm, Is64Bit(Rt) ? 64 : 32);
1816
else
1817
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E0 : 0x2E0,
1818
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1819
}
1820
void ARM64XEmitter::LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1821
{
1822
if (type == INDEX_UNSIGNED)
1823
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E5 : 0x2E5, Rt, Rn, imm, Is64Bit(Rt) ? 64 : 32);
1824
else
1825
EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E1 : 0x2E1,
1826
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1827
}
1828
void ARM64XEmitter::LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1829
{
1830
if (type == INDEX_UNSIGNED)
1831
EncodeLoadStoreIndexedInst(0x2E6, Rt, Rn, imm, 32);
1832
else
1833
EncodeLoadStoreIndexedInst(0x2E2,
1834
type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
1835
}
1836
1837
// Load/Store register (register offset)
1838
void ARM64XEmitter::STRB(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1839
{
1840
EncodeLoadStoreRegisterOffset(0, 0, Rt, Rn, Rm);
1841
}
1842
void ARM64XEmitter::LDRB(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1843
{
1844
EncodeLoadStoreRegisterOffset(0, 1, Rt, Rn, Rm);
1845
}
1846
void ARM64XEmitter::LDRSB(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1847
{
1848
bool b64Bit = Is64Bit(Rt);
1849
EncodeLoadStoreRegisterOffset(0, 3 - b64Bit, Rt, Rn, Rm);
1850
}
1851
void ARM64XEmitter::STRH(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1852
{
1853
EncodeLoadStoreRegisterOffset(1, 0, Rt, Rn, Rm);
1854
}
1855
void ARM64XEmitter::LDRH(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1856
{
1857
EncodeLoadStoreRegisterOffset(1, 1, Rt, Rn, Rm);
1858
}
1859
void ARM64XEmitter::LDRSH(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1860
{
1861
bool b64Bit = Is64Bit(Rt);
1862
EncodeLoadStoreRegisterOffset(1, 3 - b64Bit, Rt, Rn, Rm);
1863
}
1864
void ARM64XEmitter::STR(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1865
{
1866
bool b64Bit = Is64Bit(Rt);
1867
EncodeLoadStoreRegisterOffset(2 + b64Bit, 0, Rt, Rn, Rm);
1868
}
1869
void ARM64XEmitter::LDR(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1870
{
1871
bool b64Bit = Is64Bit(Rt);
1872
EncodeLoadStoreRegisterOffset(2 + b64Bit, 1, Rt, Rn, Rm);
1873
}
1874
void ARM64XEmitter::LDRSW(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1875
{
1876
EncodeLoadStoreRegisterOffset(2, 2, Rt, Rn, Rm);
1877
}
1878
void ARM64XEmitter::PRFM(ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
1879
{
1880
EncodeLoadStoreRegisterOffset(3, 2, Rt, Rn, Rm);
1881
}
1882
1883
// Load/Store register (unscaled offset)
1884
void ARM64XEmitter::STURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1885
{
1886
EncodeLoadStoreUnscaled(0, 0, Rt, Rn, imm);
1887
}
1888
void ARM64XEmitter::LDURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1889
{
1890
EncodeLoadStoreUnscaled(0, 1, Rt, Rn, imm);
1891
}
1892
void ARM64XEmitter::LDURSB(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1893
{
1894
EncodeLoadStoreUnscaled(0, Is64Bit(Rt) ? 2 : 3, Rt, Rn, imm);
1895
}
1896
void ARM64XEmitter::STURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1897
{
1898
EncodeLoadStoreUnscaled(1, 0, Rt, Rn, imm);
1899
}
1900
void ARM64XEmitter::LDURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1901
{
1902
EncodeLoadStoreUnscaled(1, 1, Rt, Rn, imm);
1903
}
1904
void ARM64XEmitter::LDURSH(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1905
{
1906
EncodeLoadStoreUnscaled(1, Is64Bit(Rt) ? 2 : 3, Rt, Rn, imm);
1907
}
1908
void ARM64XEmitter::STUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1909
{
1910
EncodeLoadStoreUnscaled(Is64Bit(Rt) ? 3 : 2, 0, Rt, Rn, imm);
1911
}
1912
void ARM64XEmitter::LDUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1913
{
1914
EncodeLoadStoreUnscaled(Is64Bit(Rt) ? 3 : 2, 1, Rt, Rn, imm);
1915
}
1916
void ARM64XEmitter::LDURSW(ARM64Reg Rt, ARM64Reg Rn, s32 imm)
1917
{
1918
_assert_msg_(!Is64Bit(Rt), "%s must have a 64bit destination register!", __FUNCTION__);
1919
EncodeLoadStoreUnscaled(2, 2, Rt, Rn, imm);
1920
}
1921
1922
// Address of label/page PC-relative
1923
void ARM64XEmitter::ADR(ARM64Reg Rd, s32 imm)
1924
{
1925
EncodeAddressInst(0, Rd, imm);
1926
}
1927
void ARM64XEmitter::ADRP(ARM64Reg Rd, s32 imm)
1928
{
1929
EncodeAddressInst(1, Rd, imm >> 12);
1930
}
1931
1932
// LLVM is unhappy about the regular abs function, so here we go.
1933
inline int64_t abs64(int64_t x) {
1934
return x >= 0 ? x : -x;
1935
}
1936
1937
static int Count(const bool part[4]) {
1938
int cnt = 0;
1939
for (int i = 0; i < 4; i++) {
1940
if (part[i])
1941
cnt++;
1942
}
1943
return cnt;
1944
}
1945
1946
// Wrapper around MOVZ+MOVK (and later MOVN)
1947
void ARM64XEmitter::MOVI2R(ARM64Reg Rd, u64 imm, bool optimize)
1948
{
1949
unsigned int parts = Is64Bit(Rd) ? 4 : 2;
1950
bool upload_part[4];
1951
1952
// Always start with a movz! Kills the dependency on the register.
1953
bool use_movz = true;
1954
1955
if (!imm) {
1956
// Zero immediate, just clear the register. EOR is pointless when we have MOVZ, which looks clearer in disasm too.
1957
MOVZ(Rd, 0, SHIFT_0);
1958
return;
1959
}
1960
1961
if ((Is64Bit(Rd) && imm == std::numeric_limits<u64>::max()) ||
1962
(!Is64Bit(Rd) && imm == std::numeric_limits<u32>::max()))
1963
{
1964
// Max unsigned value (or if signed, -1)
1965
// Set to ~ZR
1966
ARM64Reg ZR = Is64Bit(Rd) ? SP : WSP;
1967
ORN(Rd, ZR, ZR, ArithOption(ZR, ST_LSL, 0));
1968
return;
1969
}
1970
1971
// TODO: Make some more systemic use of MOVN, but this will take care of most cases.
1972
// Small negative integer. Use MOVN
1973
if (!Is64Bit(Rd) && (imm | 0xFFFF0000) == imm) {
1974
MOVN(Rd, (u32)(~imm), SHIFT_0);
1975
return;
1976
}
1977
1978
1979
// XXX: Use MOVN when possible.
1980
// XXX: Optimize more
1981
// XXX: Support rotating immediates to save instructions
1982
if (optimize)
1983
{
1984
for (unsigned int i = 0; i < parts; ++i)
1985
{
1986
if ((imm >> (i * 16)) & 0xFFFF)
1987
upload_part[i] = 1;
1988
}
1989
}
1990
1991
u64 aligned_pc = (u64)GetCodePointer() & ~0xFFF;
1992
s64 aligned_offset = (s64)imm - (s64)aligned_pc;
1993
if (Count(upload_part) > 1 && abs64(aligned_offset) < 0x7FFFFFFFLL)
1994
{
1995
// Immediate we are loading is within 4GB of our aligned range
1996
// Most likely a address that we can load in one or two instructions
1997
if (!(abs64(aligned_offset) & 0xFFF))
1998
{
1999
// Aligned ADR
2000
ADRP(Rd, (s32)aligned_offset);
2001
return;
2002
}
2003
else
2004
{
2005
// If the address is within 1MB of PC we can load it in a single instruction still
2006
s64 offset = (s64)imm - (s64)GetCodePointer();
2007
if (offset >= -0xFFFFF && offset <= 0xFFFFF)
2008
{
2009
ADR(Rd, (s32)offset);
2010
return;
2011
}
2012
else
2013
{
2014
ADRP(Rd, (s32)(aligned_offset & ~0xFFF));
2015
ADD(Rd, Rd, imm & 0xFFF);
2016
return;
2017
}
2018
}
2019
}
2020
2021
for (unsigned i = 0; i < parts; ++i)
2022
{
2023
if (use_movz && upload_part[i])
2024
{
2025
MOVZ(Rd, (imm >> (i * 16)) & 0xFFFF, (ShiftAmount)i);
2026
use_movz = false;
2027
}
2028
else
2029
{
2030
if (upload_part[i] || !optimize)
2031
MOVK(Rd, (imm >> (i * 16)) & 0xFFFF, (ShiftAmount)i);
2032
}
2033
}
2034
}
2035
2036
void ARM64XEmitter::PUSH(ARM64Reg Rd) {
2037
STR(INDEX_PRE, Rd, SP, -16);
2038
}
2039
2040
void ARM64XEmitter::POP(ARM64Reg Rd) {
2041
LDR(INDEX_POST, Rd, SP, 16);
2042
}
2043
2044
void ARM64XEmitter::PUSH2(ARM64Reg Rd, ARM64Reg Rn) {
2045
STP(INDEX_PRE, Rd, Rn, SP, -16);
2046
}
2047
2048
void ARM64XEmitter::POP2(ARM64Reg Rd, ARM64Reg Rn) {
2049
LDP(INDEX_POST, Rd, Rn, SP, 16);
2050
}
2051
2052
// Float Emitter
2053
void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
2054
{
2055
Rt = DecodeReg(Rt);
2056
Rn = DecodeReg(Rn);
2057
u32 encoded_size = 0;
2058
u32 encoded_imm = 0;
2059
2060
if (size == 8)
2061
encoded_size = 0;
2062
else if (size == 16)
2063
encoded_size = 1;
2064
else if (size == 32)
2065
encoded_size = 2;
2066
else if (size == 64)
2067
encoded_size = 3;
2068
else if (size == 128)
2069
encoded_size = 0;
2070
2071
if (type == INDEX_UNSIGNED)
2072
{
2073
_assert_msg_(!(imm & ((size - 1) >> 3)), "%s(INDEX_UNSIGNED) immediate offset must be aligned to size! (%d) (%p)", __FUNCTION__, imm, m_emit->GetCodePointer());
2074
_assert_msg_(imm >= 0, "%s(INDEX_UNSIGNED) immediate offset must be positive!", __FUNCTION__);
2075
if (size == 16)
2076
imm >>= 1;
2077
else if (size == 32)
2078
imm >>= 2;
2079
else if (size == 64)
2080
imm >>= 3;
2081
else if (size == 128)
2082
imm >>= 4;
2083
encoded_imm = (imm & 0xFFF);
2084
}
2085
else
2086
{
2087
_assert_msg_(!(imm < -256 || imm > 255), "%s immediate offset must be within range of -256 to 255!", __FUNCTION__);
2088
encoded_imm = (imm & 0x1FF) << 2;
2089
if (type == INDEX_POST)
2090
encoded_imm |= 1;
2091
else
2092
encoded_imm |= 3;
2093
}
2094
2095
Write32((encoded_size << 30) | (0xF << 26) | (type == INDEX_UNSIGNED ? (1 << 24) : 0) | \
2096
(size == 128 ? (1 << 23) : 0) | (opc << 22) | (encoded_imm << 10) | (Rn << 5) | Rt);
2097
}
2098
2099
void ARM64FloatEmitter::EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2100
{
2101
_assert_msg_(!IsQuad(Rd), "%s only supports double and single registers!", __FUNCTION__);
2102
Rd = DecodeReg(Rd);
2103
Rn = DecodeReg(Rn);
2104
Rm = DecodeReg(Rm);
2105
2106
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (Rm << 16) | \
2107
(opcode << 12) | (1 << 11) | (Rn << 5) | Rd);
2108
}
2109
2110
void ARM64FloatEmitter::EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2111
{
2112
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__);
2113
bool quad = IsQuad(Rd);
2114
Rd = DecodeReg(Rd);
2115
Rn = DecodeReg(Rn);
2116
Rm = DecodeReg(Rm);
2117
2118
Write32((quad << 30) | (U << 29) | (0x71 << 21) | (size << 22) | \
2119
(Rm << 16) | (opcode << 11) | (1 << 10) | (Rn << 5) | Rd);
2120
}
2121
2122
void ARM64FloatEmitter::EmitCopy(bool Q, u32 op, u32 imm5, u32 imm4, ARM64Reg Rd, ARM64Reg Rn)
2123
{
2124
Rd = DecodeReg(Rd);
2125
Rn = DecodeReg(Rn);
2126
2127
Write32((Q << 30) | (op << 29) | (0x7 << 25) | (imm5 << 16) | (imm4 << 11) | \
2128
(1 << 10) | (Rn << 5) | Rd);
2129
}
2130
2131
void ARM64FloatEmitter::EmitScalarPairwise(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) {
2132
Rd = DecodeReg(Rd);
2133
Rn = DecodeReg(Rn);
2134
2135
Write32((1 << 30) | (U << 29) | (0b111100011 << 20) | (size << 22) | (opcode << 12) | (1 << 11) | (Rn << 5) | Rd);
2136
}
2137
2138
void ARM64FloatEmitter::Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn)
2139
{
2140
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__);
2141
Rd = DecodeReg(Rd);
2142
Rn = DecodeReg(Rn);
2143
2144
Write32((Q << 30) | (U << 29) | (0x71 << 21) | (size << 22) | \
2145
(opcode << 12) | (1 << 11) | (Rn << 5) | Rd);
2146
}
2147
2148
void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, ARM64Reg Rn)
2149
{
2150
_assert_msg_(!IsSingle(Rt), "%s doesn't support singles!", __FUNCTION__);
2151
bool quad = IsQuad(Rt);
2152
Rt = DecodeReg(Rt);
2153
Rn = DecodeReg(Rn);
2154
2155
Write32((quad << 30) | (0xD << 24) | (L << 22) | (R << 21) | (opcode << 13) | \
2156
(S << 12) | (size << 10) | (Rn << 5) | Rt);
2157
}
2158
2159
void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm)
2160
{
2161
_assert_msg_(!IsSingle(Rt), "%s doesn't support singles!", __FUNCTION__);
2162
bool quad = IsQuad(Rt);
2163
Rt = DecodeReg(Rt);
2164
Rn = DecodeReg(Rn);
2165
Rm = DecodeReg(Rm);
2166
2167
Write32((quad << 30) | (0x1B << 23) | (L << 22) | (R << 21) | (Rm << 16) | \
2168
(opcode << 13) | (S << 12) | (size << 10) | (Rn << 5) | Rt);
2169
}
2170
2171
void ARM64FloatEmitter::Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn)
2172
{
2173
_assert_msg_(!IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__);
2174
Rd = DecodeReg(Rd);
2175
Rn = DecodeReg(Rn);
2176
2177
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (opcode << 15) | \
2178
(1 << 14) | (Rn << 5) | Rd);
2179
}
2180
2181
void ARM64FloatEmitter::EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 opcode, ARM64Reg Rd, ARM64Reg Rn)
2182
{
2183
_assert_msg_(Rn <= SP, "%s only supports GPR as source!", __FUNCTION__);
2184
Rd = DecodeReg(Rd);
2185
Rn = DecodeReg(Rn);
2186
2187
Write32((sf << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (rmode << 19) | \
2188
(opcode << 16) | (Rn << 5) | Rd);
2189
}
2190
2191
void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round, bool sign)
2192
{
2193
_dbg_assert_msg_(IsScalar(Rn), "fcvts: Rn must be floating point");
2194
if (IsGPR(Rd)) {
2195
// Use the encoding that transfers the result to a GPR.
2196
bool sf = Is64Bit(Rd);
2197
int type = IsDouble(Rn) ? 1 : 0;
2198
Rd = DecodeReg(Rd);
2199
Rn = DecodeReg(Rn);
2200
int opcode = (sign ? 1 : 0);
2201
int rmode = 0;
2202
switch (round) {
2203
case ROUND_A: rmode = 0; opcode |= 4; break;
2204
case ROUND_P: rmode = 1; break;
2205
case ROUND_M: rmode = 2; break;
2206
case ROUND_Z: rmode = 3; break;
2207
case ROUND_N: rmode = 0; break;
2208
}
2209
EmitConversion2(sf, 0, true, type, rmode, opcode, 0, Rd, Rn);
2210
}
2211
else
2212
{
2213
// Use the encoding (vector, single) that keeps the result in the fp register.
2214
int sz = IsDouble(Rn);
2215
Rd = DecodeReg(Rd);
2216
Rn = DecodeReg(Rn);
2217
int opcode = 0;
2218
switch (round) {
2219
case ROUND_A: opcode = 0x1C; break;
2220
case ROUND_N: opcode = 0x1A; break;
2221
case ROUND_M: opcode = 0x1B; break;
2222
case ROUND_P: opcode = 0x1A; sz |= 2; break;
2223
case ROUND_Z: opcode = 0x1B; sz |= 2; break;
2224
}
2225
Write32((0x5E << 24) | (sign << 29) | (sz << 22) | (1 << 21) | (opcode << 12) | (2 << 10) | (Rn << 5) | Rd);
2226
}
2227
}
2228
2229
void ARM64FloatEmitter::FCVTS(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round) {
2230
EmitConvertScalarToInt(Rd, Rn, round, false);
2231
}
2232
2233
void ARM64FloatEmitter::FCVTU(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round) {
2234
EmitConvertScalarToInt(Rd, Rn, round, true);
2235
}
2236
2237
void ARM64FloatEmitter::FCVTZS(ARM64Reg Rd, ARM64Reg Rn, int scale) {
2238
if (IsScalar(Rd)) {
2239
int imm = (IsDouble(Rn) ? 64 : 32) * 2 - scale;
2240
Rd = DecodeReg(Rd);
2241
Rn = DecodeReg(Rn);
2242
2243
Write32((1 << 30) | (0 << 29) | (0x1F << 24) | (imm << 16) | (0x1F << 11) | (1 << 10) | (Rn << 5) | Rd);
2244
} else {
2245
bool sf = Is64Bit(Rd);
2246
u32 type = 0;
2247
if (IsDouble(Rd))
2248
type = 1;
2249
int rmode = 3;
2250
int opcode = 0;
2251
2252
Write32((sf << 31) | (0 << 29) | (0x1E << 24) | (type << 22) | (rmode << 19) | (opcode << 16) | (scale << 10) | (Rn << 5) | Rd);
2253
2254
}
2255
}
2256
2257
void ARM64FloatEmitter::FCVTZU(ARM64Reg Rd, ARM64Reg Rn, int scale) {
2258
if (IsScalar(Rd)) {
2259
int imm = (IsDouble(Rn) ? 64 : 32) * 2 - scale;
2260
Rd = DecodeReg(Rd);
2261
Rn = DecodeReg(Rn);
2262
2263
Write32((1 << 30) | (1 << 29) | (0x1F << 24) | (imm << 16) | (0x1F << 11) | (1 << 10) | (Rn << 5) | Rd);
2264
} else {
2265
bool sf = Is64Bit(Rd);
2266
u32 type = 0;
2267
if (IsDouble(Rd))
2268
type = 1;
2269
int rmode = 3;
2270
int opcode = 1;
2271
2272
Write32((sf << 31) | (0 << 29) | (0x1E << 24) | (type << 22) | (rmode << 19) | (opcode << 16) | (scale << 10) | (Rn << 5) | Rd);
2273
}
2274
}
2275
2276
void ARM64FloatEmitter::EmitConversion2(bool sf, bool S, bool direction, u32 type, u32 rmode, u32 opcode, int scale, ARM64Reg Rd, ARM64Reg Rn)
2277
{
2278
Rd = DecodeReg(Rd);
2279
Rn = DecodeReg(Rn);
2280
2281
Write32((sf << 31) | (S << 29) | (0xF0 << 21) | (direction << 21) | (type << 22) | (rmode << 19) | \
2282
(opcode << 16) | (scale << 10) | (Rn << 5) | Rd);
2283
}
2284
2285
void ARM64FloatEmitter::EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Reg Rn, ARM64Reg Rm)
2286
{
2287
_assert_msg_(!IsQuad(Rn), "%s doesn't support vector!", __FUNCTION__);
2288
bool is_double = IsDouble(Rn);
2289
2290
Rn = DecodeReg(Rn);
2291
Rm = DecodeReg(Rm);
2292
2293
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | \
2294
(op << 14) | (1 << 13) | (Rn << 5) | opcode2);
2295
}
2296
2297
void ARM64FloatEmitter::EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2298
{
2299
_assert_msg_(!IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__);
2300
bool is_double = IsDouble(Rd);
2301
2302
Rd = DecodeReg(Rd);
2303
Rn = DecodeReg(Rn);
2304
Rm = DecodeReg(Rm);
2305
2306
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | \
2307
(cond << 12) | (3 << 10) | (Rn << 5) | Rd);
2308
}
2309
2310
void ARM64FloatEmitter::EmitCondCompare(bool M, bool S, CCFlags cond, int op, u8 nzcv, ARM64Reg Rn, ARM64Reg Rm) {
2311
_assert_msg_(!IsQuad(Rn), "%s doesn't support vector!", __FUNCTION__);
2312
bool is_double = IsDouble(Rn);
2313
2314
Rn = DecodeReg(Rn);
2315
Rm = DecodeReg(Rm);
2316
2317
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | \
2318
(cond << 12) | (1 << 10) | (Rn << 5) | (op << 4) | nzcv);
2319
}
2320
2321
void ARM64FloatEmitter::EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2322
{
2323
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__);
2324
2325
bool quad = IsQuad(Rd);
2326
2327
u32 encoded_size = 0;
2328
if (size == 16)
2329
encoded_size = 1;
2330
else if (size == 32)
2331
encoded_size = 2;
2332
else if (size == 64)
2333
encoded_size = 3;
2334
2335
Rd = DecodeReg(Rd);
2336
Rn = DecodeReg(Rn);
2337
Rm = DecodeReg(Rm);
2338
2339
Write32((quad << 30) | (7 << 25) | (encoded_size << 22) | (Rm << 16) | (op << 12) | \
2340
(1 << 11) | (Rn << 5) | Rd);
2341
}
2342
2343
void ARM64FloatEmitter::EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64Reg Rd, u32 imm8)
2344
{
2345
_assert_msg_(!IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__);
2346
2347
bool is_double = !IsSingle(Rd);
2348
2349
Rd = DecodeReg(Rd);
2350
2351
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (type << 22) | \
2352
(imm8 << 13) | (1 << 12) | (imm5 << 5) | Rd);
2353
}
2354
2355
void ARM64FloatEmitter::EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn)
2356
{
2357
_assert_msg_(immh, "%s bad encoding! Can't have zero immh", __FUNCTION__);
2358
2359
Rd = DecodeReg(Rd);
2360
Rn = DecodeReg(Rn);
2361
2362
Write32((Q << 30) | (U << 29) | (0xF << 24) | (immh << 19) | (immb << 16) | \
2363
(opcode << 11) | (1 << 10) | (Rn << 5) | Rd);
2364
}
2365
2366
void ARM64FloatEmitter::EmitScalarShiftImm(bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) {
2367
Rd = DecodeReg(Rd);
2368
Rn = DecodeReg(Rn);
2369
2370
Write32((2 << 30) | (U << 29) | (0x3E << 23) | (immh << 19) | (immb << 16) | (opcode << 11) | (1 << 10) | (Rn << 5) | Rd);
2371
}
2372
2373
void ARM64FloatEmitter::EmitLoadStoreMultipleStructure(u32 size, bool L, u32 opcode, ARM64Reg Rt, ARM64Reg Rn)
2374
{
2375
bool quad = IsQuad(Rt);
2376
u32 encoded_size = 0;
2377
2378
if (size == 16)
2379
encoded_size = 1;
2380
else if (size == 32)
2381
encoded_size = 2;
2382
else if (size == 64)
2383
encoded_size = 3;
2384
2385
Rt = DecodeReg(Rt);
2386
Rn = DecodeReg(Rn);
2387
2388
Write32((quad << 30) | (3 << 26) | (L << 22) | (opcode << 12) | \
2389
(encoded_size << 10) | (Rn << 5) | Rt);
2390
}
2391
2392
void ARM64FloatEmitter::EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 opcode, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm)
2393
{
2394
bool quad = IsQuad(Rt);
2395
u32 encoded_size = 0;
2396
2397
if (size == 16)
2398
encoded_size = 1;
2399
else if (size == 32)
2400
encoded_size = 2;
2401
else if (size == 64)
2402
encoded_size = 3;
2403
2404
Rt = DecodeReg(Rt);
2405
Rn = DecodeReg(Rn);
2406
Rm = DecodeReg(Rm);
2407
2408
Write32((quad << 30) | (0x19 << 23) | (L << 22) | (Rm << 16) | (opcode << 12) | \
2409
(encoded_size << 10) | (Rn << 5) | Rt);
2410
2411
}
2412
2413
void ARM64FloatEmitter::EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn)
2414
{
2415
_assert_msg_(!IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__);
2416
2417
Rd = DecodeReg(Rd);
2418
Rn = DecodeReg(Rn);
2419
2420
Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | \
2421
(opcode << 15) | (1 << 14) | (Rn << 5) | Rd);
2422
}
2423
2424
void ARM64FloatEmitter::EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, bool H, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2425
{
2426
bool quad = IsQuad(Rd);
2427
2428
Rd = DecodeReg(Rd);
2429
Rn = DecodeReg(Rn);
2430
Rm = DecodeReg(Rm);
2431
2432
Write32((quad << 30) | (U << 29) | (0xF << 24) | (size << 22) | (L << 21) | \
2433
(Rm << 16) | (opcode << 12) | (H << 11) | (Rn << 5) | Rd);
2434
}
2435
2436
void ARM64FloatEmitter::EmitLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
2437
{
2438
_assert_msg_(!(imm < -256 || imm > 255), "%s received too large offset: %d", __FUNCTION__, imm);
2439
Rt = DecodeReg(Rt);
2440
Rn = DecodeReg(Rn);
2441
2442
Write32((size << 30) | (0xF << 26) | (op << 22) | ((imm & 0x1FF) << 12) | (Rn << 5) | Rt);
2443
}
2444
2445
void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
2446
{
2447
u32 type_encode = 0;
2448
u32 opc = 0;
2449
2450
switch (type)
2451
{
2452
case INDEX_SIGNED:
2453
type_encode = 2;
2454
break;
2455
case INDEX_POST:
2456
type_encode = 1;
2457
break;
2458
case INDEX_PRE:
2459
type_encode = 3;
2460
break;
2461
case INDEX_UNSIGNED:
2462
_assert_msg_(false, "%s doesn't support INDEX_UNSIGNED!", __FUNCTION__);
2463
break;
2464
}
2465
2466
if (size == 128)
2467
{
2468
_assert_msg_(!(imm & 0xF), "%s received invalid offset 0x%x!", __FUNCTION__, imm);
2469
opc = 2;
2470
imm >>= 4;
2471
}
2472
else if (size == 64)
2473
{
2474
_assert_msg_(!(imm & 0x7), "%s received invalid offset 0x%x!", __FUNCTION__, imm);
2475
opc = 1;
2476
imm >>= 3;
2477
}
2478
else if (size == 32)
2479
{
2480
_assert_msg_(!(imm & 0x3), "%s received invalid offset 0x%x!", __FUNCTION__, imm);
2481
opc = 0;
2482
imm >>= 2;
2483
}
2484
2485
Rt = DecodeReg(Rt);
2486
Rt2 = DecodeReg(Rt2);
2487
Rn = DecodeReg(Rn);
2488
2489
Write32((opc << 30) | (0xB << 26) | (type_encode << 23) | (load << 22) | \
2490
((imm & 0x7F) << 15) | (Rt2 << 10) | (Rn << 5) | Rt);
2491
2492
}
2493
2494
void ARM64FloatEmitter::EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
2495
{
2496
_assert_msg_(Rm.GetType() == ArithOption::TYPE_EXTENDEDREG, "%s must contain an extended reg as Rm!", __FUNCTION__);
2497
2498
u32 encoded_size = 0;
2499
u32 encoded_op = 0;
2500
2501
if (size == 8)
2502
{
2503
encoded_size = 0;
2504
encoded_op = 0;
2505
}
2506
else if (size == 16)
2507
{
2508
encoded_size = 1;
2509
encoded_op = 0;
2510
}
2511
else if (size == 32)
2512
{
2513
encoded_size = 2;
2514
encoded_op = 0;
2515
}
2516
else if (size == 64)
2517
{
2518
encoded_size = 3;
2519
encoded_op = 0;
2520
}
2521
else if (size == 128)
2522
{
2523
encoded_size = 0;
2524
encoded_op = 2;
2525
}
2526
2527
if (load)
2528
encoded_op |= 1;
2529
2530
Rt = DecodeReg(Rt);
2531
Rn = DecodeReg(Rn);
2532
ARM64Reg decoded_Rm = DecodeReg(Rm.GetReg());
2533
2534
Write32((encoded_size << 30) | (encoded_op << 22) | (0x1E1 << 21) | (decoded_Rm << 16) | \
2535
Rm.GetData() | (1 << 11) | (Rn << 5) | Rt);
2536
}
2537
2538
void ARM64FloatEmitter::LDR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
2539
{
2540
EmitLoadStoreImmediate(size, 1, type, Rt, Rn, imm);
2541
}
2542
void ARM64FloatEmitter::STR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
2543
{
2544
EmitLoadStoreImmediate(size, 0, type, Rt, Rn, imm);
2545
}
2546
2547
// Loadstore unscaled
2548
void ARM64FloatEmitter::LDUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
2549
{
2550
u32 encoded_size = 0;
2551
u32 encoded_op = 0;
2552
2553
if (size == 8)
2554
{
2555
encoded_size = 0;
2556
encoded_op = 1;
2557
}
2558
else if (size == 16)
2559
{
2560
encoded_size = 1;
2561
encoded_op = 1;
2562
}
2563
else if (size == 32)
2564
{
2565
encoded_size = 2;
2566
encoded_op = 1;
2567
}
2568
else if (size == 64)
2569
{
2570
encoded_size = 3;
2571
encoded_op = 1;
2572
}
2573
else if (size == 128)
2574
{
2575
encoded_size = 0;
2576
encoded_op = 3;
2577
}
2578
2579
EmitLoadStoreUnscaled(encoded_size, encoded_op, Rt, Rn, imm);
2580
}
2581
void ARM64FloatEmitter::STUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm)
2582
{
2583
u32 encoded_size = 0;
2584
u32 encoded_op = 0;
2585
2586
if (size == 8)
2587
{
2588
encoded_size = 0;
2589
encoded_op = 0;
2590
}
2591
else if (size == 16)
2592
{
2593
encoded_size = 1;
2594
encoded_op = 0;
2595
}
2596
else if (size == 32)
2597
{
2598
encoded_size = 2;
2599
encoded_op = 0;
2600
}
2601
else if (size == 64)
2602
{
2603
encoded_size = 3;
2604
encoded_op = 0;
2605
}
2606
else if (size == 128)
2607
{
2608
encoded_size = 0;
2609
encoded_op = 2;
2610
}
2611
2612
EmitLoadStoreUnscaled(encoded_size, encoded_op, Rt, Rn, imm);
2613
2614
}
2615
2616
// Loadstore single structure
2617
void ARM64FloatEmitter::LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn)
2618
{
2619
bool S = 0;
2620
u32 opcode = 0;
2621
u32 encoded_size = 0;
2622
ARM64Reg encoded_reg = INVALID_REG;
2623
2624
if (size == 8)
2625
{
2626
S = (index & 4) != 0;
2627
opcode = 0;
2628
encoded_size = index & 3;
2629
if (index & 8)
2630
encoded_reg = EncodeRegToQuad(Rt);
2631
else
2632
encoded_reg = EncodeRegToDouble(Rt);
2633
2634
}
2635
else if (size == 16)
2636
{
2637
S = (index & 2) != 0;
2638
opcode = 2;
2639
encoded_size = (index & 1) << 1;
2640
if (index & 4)
2641
encoded_reg = EncodeRegToQuad(Rt);
2642
else
2643
encoded_reg = EncodeRegToDouble(Rt);
2644
2645
}
2646
else if (size == 32)
2647
{
2648
S = (index & 1) != 0;
2649
opcode = 4;
2650
encoded_size = 0;
2651
if (index & 2)
2652
encoded_reg = EncodeRegToQuad(Rt);
2653
else
2654
encoded_reg = EncodeRegToDouble(Rt);
2655
}
2656
else if (size == 64)
2657
{
2658
S = 0;
2659
opcode = 4;
2660
encoded_size = 1;
2661
if (index == 1)
2662
encoded_reg = EncodeRegToQuad(Rt);
2663
else
2664
encoded_reg = EncodeRegToDouble(Rt);
2665
}
2666
2667
EmitLoadStoreSingleStructure(1, 0, opcode, S, encoded_size, encoded_reg, Rn);
2668
}
2669
2670
void ARM64FloatEmitter::LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm)
2671
{
2672
bool S = 0;
2673
u32 opcode = 0;
2674
u32 encoded_size = 0;
2675
ARM64Reg encoded_reg = INVALID_REG;
2676
2677
if (size == 8)
2678
{
2679
S = (index & 4) != 0;
2680
opcode = 0;
2681
encoded_size = index & 3;
2682
if (index & 8)
2683
encoded_reg = EncodeRegToQuad(Rt);
2684
else
2685
encoded_reg = EncodeRegToDouble(Rt);
2686
2687
}
2688
else if (size == 16)
2689
{
2690
S = (index & 2) != 0;
2691
opcode = 2;
2692
encoded_size = (index & 1) << 1;
2693
if (index & 4)
2694
encoded_reg = EncodeRegToQuad(Rt);
2695
else
2696
encoded_reg = EncodeRegToDouble(Rt);
2697
2698
}
2699
else if (size == 32)
2700
{
2701
S = (index & 1) != 0;
2702
opcode = 4;
2703
encoded_size = 0;
2704
if (index & 2)
2705
encoded_reg = EncodeRegToQuad(Rt);
2706
else
2707
encoded_reg = EncodeRegToDouble(Rt);
2708
}
2709
else if (size == 64)
2710
{
2711
S = 0;
2712
opcode = 4;
2713
encoded_size = 1;
2714
if (index == 1)
2715
encoded_reg = EncodeRegToQuad(Rt);
2716
else
2717
encoded_reg = EncodeRegToDouble(Rt);
2718
}
2719
2720
EmitLoadStoreSingleStructure(1, 0, opcode, S, encoded_size, encoded_reg, Rn, Rm);
2721
}
2722
2723
void ARM64FloatEmitter::LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn)
2724
{
2725
EmitLoadStoreSingleStructure(1, 0, 6, 0, size >> 4, Rt, Rn);
2726
}
2727
void ARM64FloatEmitter::LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn)
2728
{
2729
EmitLoadStoreSingleStructure(1, 1, 6, 0, size >> 4, Rt, Rn);
2730
}
2731
void ARM64FloatEmitter::LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm)
2732
{
2733
EmitLoadStoreSingleStructure(1, 0, 6, 0, size >> 4, Rt, Rn, Rm);
2734
}
2735
void ARM64FloatEmitter::LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm)
2736
{
2737
EmitLoadStoreSingleStructure(1, 1, 6, 0, size >> 4, Rt, Rn, Rm);
2738
}
2739
2740
void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn)
2741
{
2742
bool S = 0;
2743
u32 opcode = 0;
2744
u32 encoded_size = 0;
2745
ARM64Reg encoded_reg = INVALID_REG;
2746
2747
if (size == 8)
2748
{
2749
S = (index & 4) != 0;
2750
opcode = 0;
2751
encoded_size = index & 3;
2752
if (index & 8)
2753
encoded_reg = EncodeRegToQuad(Rt);
2754
else
2755
encoded_reg = EncodeRegToDouble(Rt);
2756
2757
}
2758
else if (size == 16)
2759
{
2760
S = (index & 2) != 0;
2761
opcode = 2;
2762
encoded_size = (index & 1) << 1;
2763
if (index & 4)
2764
encoded_reg = EncodeRegToQuad(Rt);
2765
else
2766
encoded_reg = EncodeRegToDouble(Rt);
2767
2768
}
2769
else if (size == 32)
2770
{
2771
S = (index & 1) != 0;
2772
opcode = 4;
2773
encoded_size = 0;
2774
if (index & 2)
2775
encoded_reg = EncodeRegToQuad(Rt);
2776
else
2777
encoded_reg = EncodeRegToDouble(Rt);
2778
}
2779
else if (size == 64)
2780
{
2781
S = 0;
2782
opcode = 4;
2783
encoded_size = 1;
2784
if (index == 1)
2785
encoded_reg = EncodeRegToQuad(Rt);
2786
else
2787
encoded_reg = EncodeRegToDouble(Rt);
2788
}
2789
2790
EmitLoadStoreSingleStructure(0, 0, opcode, S, encoded_size, encoded_reg, Rn);
2791
}
2792
2793
void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm)
2794
{
2795
bool S = 0;
2796
u32 opcode = 0;
2797
u32 encoded_size = 0;
2798
ARM64Reg encoded_reg = INVALID_REG;
2799
2800
if (size == 8)
2801
{
2802
S = (index & 4) != 0;
2803
opcode = 0;
2804
encoded_size = index & 3;
2805
if (index & 8)
2806
encoded_reg = EncodeRegToQuad(Rt);
2807
else
2808
encoded_reg = EncodeRegToDouble(Rt);
2809
2810
}
2811
else if (size == 16)
2812
{
2813
S = (index & 2) != 0;
2814
opcode = 2;
2815
encoded_size = (index & 1) << 1;
2816
if (index & 4)
2817
encoded_reg = EncodeRegToQuad(Rt);
2818
else
2819
encoded_reg = EncodeRegToDouble(Rt);
2820
2821
}
2822
else if (size == 32)
2823
{
2824
S = (index & 1) != 0;
2825
opcode = 4;
2826
encoded_size = 0;
2827
if (index & 2)
2828
encoded_reg = EncodeRegToQuad(Rt);
2829
else
2830
encoded_reg = EncodeRegToDouble(Rt);
2831
}
2832
else if (size == 64)
2833
{
2834
S = 0;
2835
opcode = 4;
2836
encoded_size = 1;
2837
if (index == 1)
2838
encoded_reg = EncodeRegToQuad(Rt);
2839
else
2840
encoded_reg = EncodeRegToDouble(Rt);
2841
}
2842
2843
EmitLoadStoreSingleStructure(0, 0, opcode, S, encoded_size, encoded_reg, Rn, Rm);
2844
}
2845
2846
// Loadstore multiple structure
2847
void ARM64FloatEmitter::LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn)
2848
{
2849
_assert_msg_(!(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", __FUNCTION__);
2850
u32 opcode = 0;
2851
if (count == 1)
2852
opcode = 7;
2853
else if (count == 2)
2854
opcode = 0xA;
2855
else if (count == 3)
2856
opcode = 6;
2857
else if (count == 4)
2858
opcode = 2;
2859
EmitLoadStoreMultipleStructure(size, 1, opcode, Rt, Rn);
2860
}
2861
void ARM64FloatEmitter::LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm)
2862
{
2863
_assert_msg_(!(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", __FUNCTION__);
2864
_assert_msg_(type == INDEX_POST, "%s only supports post indexing!", __FUNCTION__);
2865
2866
u32 opcode = 0;
2867
if (count == 1)
2868
opcode = 7;
2869
else if (count == 2)
2870
opcode = 0xA;
2871
else if (count == 3)
2872
opcode = 6;
2873
else if (count == 4)
2874
opcode = 2;
2875
EmitLoadStoreMultipleStructurePost(size, 1, opcode, Rt, Rn, Rm);
2876
}
2877
void ARM64FloatEmitter::ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn)
2878
{
2879
_assert_msg_(!(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", __FUNCTION__);
2880
u32 opcode = 0;
2881
if (count == 1)
2882
opcode = 7;
2883
else if (count == 2)
2884
opcode = 0xA;
2885
else if (count == 3)
2886
opcode = 6;
2887
else if (count == 4)
2888
opcode = 2;
2889
EmitLoadStoreMultipleStructure(size, 0, opcode, Rt, Rn);
2890
}
2891
void ARM64FloatEmitter::ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm)
2892
{
2893
_assert_msg_(!(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", __FUNCTION__);
2894
_assert_msg_(type == INDEX_POST, "%s only supports post indexing!", __FUNCTION__);
2895
2896
u32 opcode = 0;
2897
if (count == 1)
2898
opcode = 7;
2899
else if (count == 2)
2900
opcode = 0xA;
2901
else if (count == 3)
2902
opcode = 6;
2903
else if (count == 4)
2904
opcode = 2;
2905
EmitLoadStoreMultipleStructurePost(size, 0, opcode, Rt, Rn, Rm);
2906
}
2907
2908
// Scalar - 1 Source
2909
void ARM64FloatEmitter::FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top)
2910
{
2911
if (IsScalar(Rd) && IsScalar(Rn)) {
2912
EmitScalar1Source(0, 0, IsDouble(Rd), 0, Rd, Rn);
2913
} else {
2914
_assert_msg_(!IsQuad(Rd) && !IsQuad(Rn), "FMOV can't move to/from quads");
2915
int rmode = 0;
2916
int opcode = 6;
2917
int sf = 0;
2918
if (IsSingle(Rd) && !Is64Bit(Rn) && !top) {
2919
// GPR to scalar single
2920
opcode |= 1;
2921
} else if (!Is64Bit(Rd) && IsSingle(Rn) && !top) {
2922
// Scalar single to GPR - defaults are correct
2923
} else {
2924
// TODO
2925
_assert_msg_(false, "FMOV: Unhandled case");
2926
}
2927
Rd = DecodeReg(Rd);
2928
Rn = DecodeReg(Rn);
2929
Write32((sf << 31) | (0x1e2 << 20) | (rmode << 19) | (opcode << 16) | (Rn << 5) | Rd);
2930
}
2931
}
2932
2933
// Loadstore paired
2934
void ARM64FloatEmitter::LDP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
2935
{
2936
EncodeLoadStorePair(size, true, type, Rt, Rt2, Rn, imm);
2937
}
2938
void ARM64FloatEmitter::STP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm)
2939
{
2940
EncodeLoadStorePair(size, false, type, Rt, Rt2, Rn, imm);
2941
}
2942
2943
// Loadstore register offset
2944
void ARM64FloatEmitter::STR(u8 size, ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
2945
{
2946
EncodeLoadStoreRegisterOffset(size, false, Rt, Rn, Rm);
2947
}
2948
void ARM64FloatEmitter::LDR(u8 size, ARM64Reg Rt, ARM64Reg Rn, const ArithOption &Rm)
2949
{
2950
EncodeLoadStoreRegisterOffset(size, true, Rt, Rn, Rm);
2951
}
2952
2953
void ARM64FloatEmitter::FABS(ARM64Reg Rd, ARM64Reg Rn)
2954
{
2955
EmitScalar1Source(0, 0, IsDouble(Rd), 1, Rd, Rn);
2956
}
2957
void ARM64FloatEmitter::FNEG(ARM64Reg Rd, ARM64Reg Rn)
2958
{
2959
EmitScalar1Source(0, 0, IsDouble(Rd), 2, Rd, Rn);
2960
}
2961
void ARM64FloatEmitter::FSQRT(ARM64Reg Rd, ARM64Reg Rn)
2962
{
2963
EmitScalar1Source(0, 0, IsDouble(Rd), 3, Rd, Rn);
2964
}
2965
2966
// Scalar - pairwise
2967
void ARM64FloatEmitter::FADDP(ARM64Reg Rd, ARM64Reg Rn) {
2968
EmitScalarPairwise(1, IsDouble(Rd), 0b01101, Rd, Rn);
2969
}
2970
void ARM64FloatEmitter::FMAXP(ARM64Reg Rd, ARM64Reg Rn) {
2971
EmitScalarPairwise(1, IsDouble(Rd), 0b01111, Rd, Rn);
2972
}
2973
void ARM64FloatEmitter::FMINP(ARM64Reg Rd, ARM64Reg Rn) {
2974
EmitScalarPairwise(1, IsDouble(Rd) ? 3 : 2, 0b01111, Rd, Rn);
2975
}
2976
void ARM64FloatEmitter::FMAXNMP(ARM64Reg Rd, ARM64Reg Rn) {
2977
EmitScalarPairwise(1, IsDouble(Rd), 0b01100, Rd, Rn);
2978
}
2979
void ARM64FloatEmitter::FMINNMP(ARM64Reg Rd, ARM64Reg Rn) {
2980
EmitScalarPairwise(1, IsDouble(Rd) ? 3 : 2, 0b01100, Rd, Rn);
2981
}
2982
2983
// Scalar - 2 Source
2984
void ARM64FloatEmitter::FADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2985
{
2986
EmitScalar2Source(0, 0, IsDouble(Rd), 2, Rd, Rn, Rm);
2987
}
2988
void ARM64FloatEmitter::FMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2989
{
2990
EmitScalar2Source(0, 0, IsDouble(Rd), 0, Rd, Rn, Rm);
2991
}
2992
void ARM64FloatEmitter::FSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2993
{
2994
EmitScalar2Source(0, 0, IsDouble(Rd), 3, Rd, Rn, Rm);
2995
}
2996
void ARM64FloatEmitter::FDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
2997
{
2998
EmitScalar2Source(0, 0, IsDouble(Rd), 1, Rd, Rn, Rm);
2999
}
3000
void ARM64FloatEmitter::FMAX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3001
{
3002
EmitScalar2Source(0, 0, IsDouble(Rd), 4, Rd, Rn, Rm);
3003
}
3004
void ARM64FloatEmitter::FMIN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3005
{
3006
EmitScalar2Source(0, 0, IsDouble(Rd), 5, Rd, Rn, Rm);
3007
}
3008
void ARM64FloatEmitter::FMAXNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3009
{
3010
EmitScalar2Source(0, 0, IsDouble(Rd), 6, Rd, Rn, Rm);
3011
}
3012
void ARM64FloatEmitter::FMINNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3013
{
3014
EmitScalar2Source(0, 0, IsDouble(Rd), 7, Rd, Rn, Rm);
3015
}
3016
void ARM64FloatEmitter::FNMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3017
{
3018
EmitScalar2Source(0, 0, IsDouble(Rd), 8, Rd, Rn, Rm);
3019
}
3020
3021
void ARM64FloatEmitter::FMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) {
3022
EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 0);
3023
}
3024
void ARM64FloatEmitter::FMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) {
3025
EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 1);
3026
}
3027
void ARM64FloatEmitter::FNMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) {
3028
EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 2);
3029
}
3030
void ARM64FloatEmitter::FNMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) {
3031
EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 3);
3032
}
3033
3034
void ARM64FloatEmitter::EmitScalar3Source(bool isDouble, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra, int opcode) {
3035
int type = isDouble ? 1 : 0;
3036
Rd = DecodeReg(Rd);
3037
Rn = DecodeReg(Rn);
3038
Rm = DecodeReg(Rm);
3039
Ra = DecodeReg(Ra);
3040
int o1 = opcode >> 1;
3041
int o0 = opcode & 1;
3042
m_emit->Write32((0x1F << 24) | (type << 22) | (o1 << 21) | (Rm << 16) | (o0 << 15) | (Ra << 10) | (Rn << 5) | Rd);
3043
}
3044
3045
// Scalar floating point immediate
3046
void ARM64FloatEmitter::FMOV(ARM64Reg Rd, uint8_t imm8)
3047
{
3048
EmitScalarImm(0, 0, 0, 0, Rd, imm8);
3049
}
3050
3051
// Vector
3052
void ARM64FloatEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3053
{
3054
EmitThreeSame(0, 0, 3, Rd, Rn, Rm);
3055
}
3056
void ARM64FloatEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3057
{
3058
EmitThreeSame(1, 0, 3, Rd, Rn, Rm);
3059
}
3060
void ARM64FloatEmitter::BSL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3061
{
3062
EmitThreeSame(1, 1, 3, Rd, Rn, Rm);
3063
}
3064
void ARM64FloatEmitter::BIT(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3065
EmitThreeSame(1, 2, 3, Rd, Rn, Rm);
3066
}
3067
void ARM64FloatEmitter::BIF(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3068
EmitThreeSame(1, 3, 3, Rd, Rn, Rm);
3069
}
3070
void ARM64FloatEmitter::DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index)
3071
{
3072
u32 imm5 = 0;
3073
3074
if (size == 8)
3075
{
3076
imm5 = 1;
3077
imm5 |= index << 1;
3078
}
3079
else if (size == 16)
3080
{
3081
imm5 = 2;
3082
imm5 |= index << 2;
3083
}
3084
else if (size == 32)
3085
{
3086
imm5 = 4;
3087
imm5 |= index << 3;
3088
}
3089
else if (size == 64)
3090
{
3091
imm5 = 8;
3092
imm5 |= index << 4;
3093
}
3094
3095
EmitCopy(IsQuad(Rd), 0, imm5, 0, Rd, Rn);
3096
}
3097
void ARM64FloatEmitter::FABS(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3098
{
3099
Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0xF, Rd, Rn);
3100
}
3101
void ARM64FloatEmitter::FADD(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3102
{
3103
EmitThreeSame(0, size >> 6, 0x1A, Rd, Rn, Rm);
3104
}
3105
void ARM64FloatEmitter::FADDP(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3106
EmitThreeSame(1, size >> 6, 0x1A, Rd, Rn, Rm);
3107
}
3108
void ARM64FloatEmitter::FMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3109
{
3110
EmitThreeSame(0, size >> 6, 0x1E, Rd, Rn, Rm);
3111
}
3112
void ARM64FloatEmitter::FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3113
{
3114
EmitThreeSame(0, size >> 6, 0x19, Rd, Rn, Rm);
3115
}
3116
void ARM64FloatEmitter::FMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3117
{
3118
EmitThreeSame(0, 2 | size >> 6, 0x1E, Rd, Rn, Rm);
3119
}
3120
void ARM64FloatEmitter::FCVTL(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3121
{
3122
Emit2RegMisc(false, 0, size >> 6, 0x17, Rd, Rn);
3123
}
3124
void ARM64FloatEmitter::FCVTL2(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3125
{
3126
Emit2RegMisc(true, 0, size >> 6, 0x17, Rd, Rn);
3127
}
3128
void ARM64FloatEmitter::FCVTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3129
{
3130
Emit2RegMisc(IsQuad(Rd), 0, dest_size >> 5, 0x16, Rd, Rn);
3131
}
3132
void ARM64FloatEmitter::FCVTZS(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3133
{
3134
Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0x1B, Rd, Rn);
3135
}
3136
void ARM64FloatEmitter::FCVTZU(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3137
{
3138
Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0x1B, Rd, Rn);
3139
}
3140
void ARM64FloatEmitter::FCVTZS(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale) {
3141
int imm = size * 2 - scale;
3142
EmitShiftImm(IsQuad(Rd), false, imm >> 3, imm & 7, 0x1F, Rd, Rn);
3143
}
3144
void ARM64FloatEmitter::FCVTZU(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale) {
3145
int imm = size * 2 - scale;
3146
EmitShiftImm(IsQuad(Rd), true, imm >> 3, imm & 7, 0x1F, Rd, Rn);
3147
}
3148
void ARM64FloatEmitter::FDIV(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3149
{
3150
EmitThreeSame(1, size >> 6, 0x1F, Rd, Rn, Rm);
3151
}
3152
void ARM64FloatEmitter::FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3153
{
3154
EmitThreeSame(1, size >> 6, 0x1B, Rd, Rn, Rm);
3155
}
3156
void ARM64FloatEmitter::UMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3157
{
3158
EmitThreeSame(1, EncodeSize(size), 0xD, Rd, Rn, Rm);
3159
}
3160
void ARM64FloatEmitter::UMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3161
{
3162
EmitThreeSame(1, EncodeSize(size), 0xC, Rd, Rn, Rm);
3163
}
3164
void ARM64FloatEmitter::SMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3165
{
3166
EmitThreeSame(0, EncodeSize(size), 0xD, Rd, Rn, Rm);
3167
}
3168
void ARM64FloatEmitter::SMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3169
{
3170
EmitThreeSame(0, EncodeSize(size), 0xC, Rd, Rn, Rm);
3171
}
3172
void ARM64FloatEmitter::FNEG(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3173
{
3174
Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0xF, Rd, Rn);
3175
}
3176
void ARM64FloatEmitter::FRSQRTE(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3177
{
3178
Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0x1D, Rd, Rn);
3179
}
3180
void ARM64FloatEmitter::FSUB(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3181
{
3182
EmitThreeSame(0, 2 | (size >> 6), 0x1A, Rd, Rn, Rm);
3183
}
3184
void ARM64FloatEmitter::FMLS(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3185
{
3186
EmitThreeSame(0, 2 | (size >> 6), 0x19, Rd, Rn, Rm);
3187
}
3188
void ARM64FloatEmitter::NOT(ARM64Reg Rd, ARM64Reg Rn)
3189
{
3190
Emit2RegMisc(IsQuad(Rd), 1, 0, 5, Rd, Rn);
3191
}
3192
void ARM64FloatEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3193
{
3194
EmitThreeSame(0, 2, 3, Rd, Rn, Rm);
3195
}
3196
void ARM64FloatEmitter::REV16(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3197
{
3198
Emit2RegMisc(IsQuad(Rd), 0, size >> 4, 1, Rd, Rn);
3199
}
3200
void ARM64FloatEmitter::REV32(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3201
{
3202
Emit2RegMisc(IsQuad(Rd), 1, size >> 4, 0, Rd, Rn);
3203
}
3204
void ARM64FloatEmitter::REV64(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3205
{
3206
Emit2RegMisc(IsQuad(Rd), 0, size >> 4, 0, Rd, Rn);
3207
}
3208
void ARM64FloatEmitter::SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3209
{
3210
Emit2RegMisc(IsQuad(Rd), 0, size >> 6, 0x1D, Rd, Rn);
3211
}
3212
void ARM64FloatEmitter::UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3213
{
3214
Emit2RegMisc(IsQuad(Rd), 1, size >> 6, 0x1D, Rd, Rn);
3215
}
3216
void ARM64FloatEmitter::SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale)
3217
{
3218
int imm = size * 2 - scale;
3219
EmitShiftImm(IsQuad(Rd), 0, imm >> 3, imm & 7, 0x1C, Rd, Rn);
3220
}
3221
void ARM64FloatEmitter::UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale)
3222
{
3223
int imm = size * 2 - scale;
3224
EmitShiftImm(IsQuad(Rd), 1, imm >> 3, imm & 7, 0x1C, Rd, Rn);
3225
}
3226
void ARM64FloatEmitter::SQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3227
{
3228
Emit2RegMisc(false, 0, dest_size >> 4, 0x14, Rd, Rn);
3229
}
3230
void ARM64FloatEmitter::SQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3231
{
3232
Emit2RegMisc(true, 0, dest_size >> 4, 0x14, Rd, Rn);
3233
}
3234
void ARM64FloatEmitter::UQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3235
{
3236
Emit2RegMisc(false, 1, dest_size >> 4, 0x14, Rd, Rn);
3237
}
3238
void ARM64FloatEmitter::UQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3239
{
3240
Emit2RegMisc(true, 1, dest_size >> 4, 0x14, Rd, Rn);
3241
}
3242
void ARM64FloatEmitter::XTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3243
{
3244
Emit2RegMisc(false, 0, dest_size >> 4, 0x12, Rd, Rn);
3245
}
3246
void ARM64FloatEmitter::XTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn)
3247
{
3248
Emit2RegMisc(true, 0, dest_size >> 4, 0x12, Rd, Rn);
3249
}
3250
3251
void ARM64FloatEmitter::CMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3252
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3253
EmitThreeSame(true, size >> 4, 0b10001, Rd, Rn, Rm);
3254
}
3255
3256
void ARM64FloatEmitter::CMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3257
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3258
EmitThreeSame(false, size >> 4, 0b00111, Rd, Rn, Rm);
3259
}
3260
3261
void ARM64FloatEmitter::CMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3262
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3263
EmitThreeSame(false, size >> 4, 0b00110, Rd, Rn, Rm);
3264
}
3265
3266
void ARM64FloatEmitter::CMHI(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3267
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3268
EmitThreeSame(true, size >> 4, 0b00110, Rd, Rn, Rm);
3269
}
3270
3271
void ARM64FloatEmitter::CMHS(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3272
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3273
EmitThreeSame(true, size >> 4, 0b00111, Rd, Rn, Rm);
3274
}
3275
3276
void ARM64FloatEmitter::CMTST(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) {
3277
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3278
EmitThreeSame(false, size >> 4, 0b10001, Rd, Rn, Rm);
3279
}
3280
3281
void ARM64FloatEmitter::CMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn) {
3282
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3283
Emit2RegMisc(IsQuad(Rd), false, size >> 4, 0b01001, Rd, Rn);
3284
}
3285
3286
void ARM64FloatEmitter::CMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn) {
3287
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3288
Emit2RegMisc(IsQuad(Rd), true, size >> 4, 0b01000, Rd, Rn);
3289
}
3290
3291
void ARM64FloatEmitter::CMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn) {
3292
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3293
Emit2RegMisc(IsQuad(Rd), false, size >> 4, 0b01000, Rd, Rn);
3294
}
3295
3296
void ARM64FloatEmitter::CMLE(u8 size, ARM64Reg Rd, ARM64Reg Rn) {
3297
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3298
Emit2RegMisc(IsQuad(Rd), true, size >> 4, 0b01001, Rd, Rn);
3299
}
3300
3301
void ARM64FloatEmitter::CMLT(u8 size, ARM64Reg Rd, ARM64Reg Rn) {
3302
_assert_msg_(!IsQuad(Rd) || size != 64, "%s cannot be used for scalar double", __FUNCTION__);
3303
Emit2RegMisc(IsQuad(Rd), false, size >> 4, 0b01010, Rd, Rn);
3304
}
3305
3306
// Move
3307
void ARM64FloatEmitter::DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3308
{
3309
u32 imm5 = 0;
3310
3311
if (size == 8)
3312
imm5 = 1;
3313
else if (size == 16)
3314
imm5 = 2;
3315
else if (size == 32)
3316
imm5 = 4;
3317
else if (size == 64)
3318
imm5 = 8;
3319
3320
EmitCopy(IsQuad(Rd), 0, imm5, 1, Rd, Rn);
3321
3322
}
3323
void ARM64FloatEmitter::INS(u8 size, ARM64Reg Rd, u8 index, ARM64Reg Rn)
3324
{
3325
u32 imm5 = 0;
3326
3327
if (size == 8)
3328
{
3329
imm5 = 1;
3330
imm5 |= index << 1;
3331
}
3332
else if (size == 16)
3333
{
3334
imm5 = 2;
3335
imm5 |= index << 2;
3336
}
3337
else if (size == 32)
3338
{
3339
imm5 = 4;
3340
imm5 |= index << 3;
3341
}
3342
else if (size == 64)
3343
{
3344
imm5 = 8;
3345
imm5 |= index << 4;
3346
}
3347
3348
EmitCopy(1, 0, imm5, 3, Rd, Rn);
3349
}
3350
void ARM64FloatEmitter::INS(u8 size, ARM64Reg Rd, u8 index1, ARM64Reg Rn, u8 index2)
3351
{
3352
u32 imm5 = 0, imm4 = 0;
3353
3354
if (size == 8)
3355
{
3356
imm5 = 1;
3357
imm5 |= index1 << 1;
3358
imm4 = index2;
3359
}
3360
else if (size == 16)
3361
{
3362
imm5 = 2;
3363
imm5 |= index1 << 2;
3364
imm4 = index2 << 1;
3365
}
3366
else if (size == 32)
3367
{
3368
imm5 = 4;
3369
imm5 |= index1 << 3;
3370
imm4 = index2 << 2;
3371
}
3372
else if (size == 64)
3373
{
3374
imm5 = 8;
3375
imm5 |= index1 << 4;
3376
imm4 = index2 << 3;
3377
}
3378
3379
EmitCopy(1, 1, imm5, imm4, Rd, Rn);
3380
}
3381
3382
void ARM64FloatEmitter::UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index)
3383
{
3384
bool b64Bit = Is64Bit(Rd);
3385
_assert_msg_(Rd < SP, "%s destination must be a GPR!", __FUNCTION__);
3386
_assert_msg_(!(b64Bit && size != 64), "%s must have a size of 64 when destination is 64bit!", __FUNCTION__);
3387
u32 imm5 = 0;
3388
3389
if (size == 8)
3390
{
3391
imm5 = 1;
3392
imm5 |= index << 1;
3393
}
3394
else if (size == 16)
3395
{
3396
imm5 = 2;
3397
imm5 |= index << 2;
3398
}
3399
else if (size == 32)
3400
{
3401
imm5 = 4;
3402
imm5 |= index << 3;
3403
}
3404
else if (size == 64)
3405
{
3406
imm5 = 8;
3407
imm5 |= index << 4;
3408
}
3409
3410
EmitCopy(b64Bit, 0, imm5, 7, Rd, Rn);
3411
}
3412
void ARM64FloatEmitter::SMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index)
3413
{
3414
bool b64Bit = Is64Bit(Rd);
3415
_assert_msg_(Rd < SP, "%s destination must be a GPR!", __FUNCTION__);
3416
_assert_msg_(size != 64, "%s doesn't support 64bit destination. Use UMOV!", __FUNCTION__);
3417
u32 imm5 = 0;
3418
3419
if (size == 8)
3420
{
3421
imm5 = 1;
3422
imm5 |= index << 1;
3423
}
3424
else if (size == 16)
3425
{
3426
imm5 = 2;
3427
imm5 |= index << 2;
3428
}
3429
else if (size == 32)
3430
{
3431
imm5 = 4;
3432
imm5 |= index << 3;
3433
}
3434
3435
EmitCopy(b64Bit, 0, imm5, 5, Rd, Rn);
3436
}
3437
3438
void ARM64FloatEmitter::EncodeModImm(bool Q, u8 op, u8 cmode, u8 o2, ARM64Reg Rd, u8 abcdefgh) {
3439
Rd = DecodeReg(Rd);
3440
u8 abc = abcdefgh >> 5;
3441
u8 defgh = abcdefgh & 0x1F;
3442
Write32((Q << 30) | (op << 29) | (0xF << 24) | (abc << 16) | (cmode << 12) | (o2 << 11) | (1 << 10) | (defgh << 5) | Rd);
3443
}
3444
3445
void ARM64FloatEmitter::FMOV(u8 size, ARM64Reg Rd, u8 imm8) {
3446
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles", __FUNCTION__);
3447
_assert_msg_(size == 32 || size == 64, "%s: unsupported size", __FUNCTION__);
3448
_assert_msg_(IsQuad(Rd) || size == 32, "Use non-SIMD FMOV to load one double imm8");
3449
EncodeModImm(IsQuad(Rd), size >> 6, 0b1111, 0, Rd, imm8);
3450
}
3451
3452
void ARM64FloatEmitter::MOVI(u8 size, ARM64Reg Rd, u8 imm8, u8 shift, bool MSL) {
3453
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles", __FUNCTION__);
3454
_assert_msg_(size == 8 || size == 16 || size == 32 || size == 64, "%s: unsupported size %d", __FUNCTION__, size);
3455
_assert_msg_((shift & 7) == 0 && shift < size, "%s: unsupported shift %d", __FUNCTION__, shift);
3456
_assert_msg_(!MSL || (size == 32 && shift > 0 && shift <= 16), "MOVI MSL shift requires size 32, shift must be 8 or 16");
3457
_assert_msg_(size != 64 || shift == 0, "MOVI 64-bit imm cannot be shifted");
3458
3459
u8 cmode = 0;
3460
if (size == 8)
3461
cmode = 0b1110;
3462
else if (size == 16)
3463
cmode = 0b1000 | (shift >> 2);
3464
else if (MSL)
3465
cmode = 0b1100 | (shift >> 3);
3466
else if (size == 32)
3467
cmode = (shift >> 2);
3468
else if (size == 64)
3469
cmode = 0b1110;
3470
else
3471
_assert_msg_(false, "%s: unhandled case", __FUNCTION__);
3472
3473
EncodeModImm(IsQuad(Rd), size >> 6, cmode, 0, Rd, imm8);
3474
}
3475
3476
void ARM64FloatEmitter::MVNI(u8 size, ARM64Reg Rd, u8 imm8, u8 shift, bool MSL) {
3477
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles", __FUNCTION__);
3478
_assert_msg_(size == 16 || size == 32, "%s: unsupported size %d", __FUNCTION__, size);
3479
_assert_msg_((shift & 7) == 0 && shift < size, "%s: unsupported shift %d", __FUNCTION__, shift);
3480
_assert_msg_(!MSL || (size == 32 && shift > 0 && shift <= 16), "MVNI MSL shift requires size 32, shift must be 8 or 16");
3481
3482
u8 cmode = 0;
3483
if (size == 16)
3484
cmode = 0b1000 | (shift >> 2);
3485
else if (MSL)
3486
cmode = 0b1100 | (shift >> 3);
3487
else if (size == 32)
3488
cmode = (shift >> 2);
3489
else
3490
_assert_msg_(false, "%s: unhandled case", __FUNCTION__);
3491
3492
EncodeModImm(IsQuad(Rd), 1, cmode, 0, Rd, imm8);
3493
}
3494
3495
void ARM64FloatEmitter::ORR(u8 size, ARM64Reg Rd, u8 imm8, u8 shift) {
3496
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles", __FUNCTION__);
3497
_assert_msg_(size == 16 || size == 32, "%s: unsupported size %d", __FUNCTION__, size);
3498
_assert_msg_((shift & 7) == 0 && shift < size, "%s: unsupported shift %d", __FUNCTION__, shift);
3499
3500
u8 cmode = 0;
3501
if (size == 16)
3502
cmode = 0b1001 | (shift >> 2);
3503
else if (size == 32)
3504
cmode = 0b0001 | (shift >> 2);
3505
else
3506
_assert_msg_(false, "%s: unhandled case", __FUNCTION__);
3507
3508
EncodeModImm(IsQuad(Rd), 0, cmode, 0, Rd, imm8);
3509
}
3510
3511
void ARM64FloatEmitter::BIC(u8 size, ARM64Reg Rd, u8 imm8, u8 shift) {
3512
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles", __FUNCTION__);
3513
_assert_msg_(size == 16 || size == 32, "%s: unsupported size %d", __FUNCTION__, size);
3514
_assert_msg_((shift & 7) == 0 && shift < size, "%s: unsupported shift %d", __FUNCTION__, shift);
3515
3516
u8 cmode = 0;
3517
if (size == 16)
3518
cmode = 0b1001 | (shift >> 2);
3519
else if (size == 32)
3520
cmode = 0b0001 | (shift >> 2);
3521
else
3522
_assert_msg_(false, "%s: unhandled case", __FUNCTION__);
3523
3524
EncodeModImm(IsQuad(Rd), 1, cmode, 0, Rd, imm8);
3525
}
3526
3527
// One source
3528
void ARM64FloatEmitter::FCVT(u8 size_to, u8 size_from, ARM64Reg Rd, ARM64Reg Rn)
3529
{
3530
u32 dst_encoding = 0;
3531
u32 src_encoding = 0;
3532
3533
if (size_to == 16)
3534
dst_encoding = 3;
3535
else if (size_to == 32)
3536
dst_encoding = 0;
3537
else if (size_to == 64)
3538
dst_encoding = 1;
3539
3540
if (size_from == 16)
3541
src_encoding = 3;
3542
else if (size_from == 32)
3543
src_encoding = 0;
3544
else if (size_from == 64)
3545
src_encoding = 1;
3546
3547
Emit1Source(0, 0, src_encoding, 4 | dst_encoding, Rd, Rn);
3548
}
3549
3550
void ARM64FloatEmitter::SCVTF(ARM64Reg Rd, ARM64Reg Rn)
3551
{
3552
if (IsScalar(Rn)) {
3553
// Source is in FP register (like destination!). We must use a vector encoding.
3554
bool sign = false;
3555
Rd = DecodeReg(Rd);
3556
Rn = DecodeReg(Rn);
3557
int sz = IsDouble(Rn);
3558
Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (Rn << 5) | Rd);
3559
} else {
3560
bool sf = Is64Bit(Rn);
3561
u32 type = 0;
3562
if (IsDouble(Rd))
3563
type = 1;
3564
EmitConversion(sf, 0, type, 0, 2, Rd, Rn);
3565
}
3566
}
3567
3568
void ARM64FloatEmitter::UCVTF(ARM64Reg Rd, ARM64Reg Rn)
3569
{
3570
if (IsScalar(Rn)) {
3571
// Source is in FP register (like destination!). We must use a vector encoding.
3572
bool sign = true;
3573
Rd = DecodeReg(Rd);
3574
Rn = DecodeReg(Rn);
3575
int sz = IsDouble(Rn);
3576
Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (Rn << 5) | Rd);
3577
} else {
3578
bool sf = Is64Bit(Rn);
3579
u32 type = 0;
3580
if (IsDouble(Rd))
3581
type = 1;
3582
3583
EmitConversion(sf, 0, type, 0, 3, Rd, Rn);
3584
}
3585
}
3586
3587
void ARM64FloatEmitter::SCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale)
3588
{
3589
if (IsScalar(Rn)) {
3590
int imm = (IsDouble(Rn) ? 64 : 32) * 2 - scale;
3591
Rd = DecodeReg(Rd);
3592
Rn = DecodeReg(Rn);
3593
3594
Write32((1 << 30) | (0 << 29) | (0x1F << 24) | (imm << 16) | (0x1C << 11) | (1 << 10) | (Rn << 5) | Rd);
3595
} else {
3596
bool sf = Is64Bit(Rn);
3597
u32 type = 0;
3598
if (IsDouble(Rd))
3599
type = 1;
3600
3601
EmitConversion2(sf, 0, false, type, 0, 2, 64 - scale, Rd, Rn);
3602
}
3603
}
3604
3605
void ARM64FloatEmitter::UCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale)
3606
{
3607
if (IsScalar(Rn)) {
3608
int imm = (IsDouble(Rn) ? 64 : 32) * 2 - scale;
3609
Rd = DecodeReg(Rd);
3610
Rn = DecodeReg(Rn);
3611
3612
Write32((1 << 30) | (1 << 29) | (0x1F << 24) | (imm << 16) | (0x1C << 11) | (1 << 10) | (Rn << 5) | Rd);
3613
} else {
3614
bool sf = Is64Bit(Rn);
3615
u32 type = 0;
3616
if (IsDouble(Rd))
3617
type = 1;
3618
3619
EmitConversion2(sf, 0, false, type, 0, 3, 64 - scale, Rd, Rn);
3620
}
3621
}
3622
3623
void ARM64FloatEmitter::FCMP(ARM64Reg Rn, ARM64Reg Rm)
3624
{
3625
EmitCompare(0, 0, 0, 0, Rn, Rm);
3626
}
3627
void ARM64FloatEmitter::FCMP(ARM64Reg Rn)
3628
{
3629
EmitCompare(0, 0, 0, 8, Rn, (ARM64Reg)0);
3630
}
3631
void ARM64FloatEmitter::FCMPE(ARM64Reg Rn, ARM64Reg Rm)
3632
{
3633
EmitCompare(0, 0, 0, 0x10, Rn, Rm);
3634
}
3635
void ARM64FloatEmitter::FCMPE(ARM64Reg Rn)
3636
{
3637
EmitCompare(0, 0, 0, 0x18, Rn, (ARM64Reg)0);
3638
}
3639
void ARM64FloatEmitter::FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3640
{
3641
EmitThreeSame(0, size >> 6, 0x1C, Rd, Rn, Rm);
3642
}
3643
void ARM64FloatEmitter::FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3644
{
3645
Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0xD, Rd, Rn);
3646
}
3647
void ARM64FloatEmitter::FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3648
{
3649
EmitThreeSame(1, size >> 6, 0x1C, Rd, Rn, Rm);
3650
}
3651
void ARM64FloatEmitter::FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3652
{
3653
Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0xC, Rd, Rn);
3654
}
3655
void ARM64FloatEmitter::FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3656
{
3657
EmitThreeSame(1, 2 | (size >> 6), 0x1C, Rd, Rn, Rm);
3658
}
3659
void ARM64FloatEmitter::FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3660
{
3661
Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0x0C, Rd, Rn);
3662
}
3663
void ARM64FloatEmitter::FCMLE(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3664
{
3665
Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0xD, Rd, Rn);
3666
}
3667
void ARM64FloatEmitter::FCMLT(u8 size, ARM64Reg Rd, ARM64Reg Rn)
3668
{
3669
Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0xE, Rd, Rn);
3670
}
3671
3672
void ARM64FloatEmitter::FCSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
3673
{
3674
EmitCondSelect(0, 0, cond, Rd, Rn, Rm);
3675
}
3676
3677
void ARM64FloatEmitter::FCCMP(ARM64Reg Rn, ARM64Reg Rm, u8 nzcv, CCFlags cond) {
3678
EmitCondCompare(0, 0, cond, 0, nzcv, Rn, Rm);
3679
}
3680
3681
void ARM64FloatEmitter::FCCMPE(ARM64Reg Rn, ARM64Reg Rm, u8 nzcv, CCFlags cond) {
3682
EmitCondCompare(0, 0, cond, 1, nzcv, Rn, Rm);
3683
}
3684
3685
// Permute
3686
void ARM64FloatEmitter::UZP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3687
{
3688
EmitPermute(size, 1, Rd, Rn, Rm);
3689
}
3690
void ARM64FloatEmitter::TRN1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3691
{
3692
EmitPermute(size, 2, Rd, Rn, Rm);
3693
}
3694
void ARM64FloatEmitter::ZIP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3695
{
3696
EmitPermute(size, 3, Rd, Rn, Rm);
3697
}
3698
void ARM64FloatEmitter::UZP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3699
{
3700
EmitPermute(size, 5, Rd, Rn, Rm);
3701
}
3702
void ARM64FloatEmitter::TRN2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3703
{
3704
EmitPermute(size, 6, Rd, Rn, Rm);
3705
}
3706
void ARM64FloatEmitter::ZIP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
3707
{
3708
EmitPermute(size, 7, Rd, Rn, Rm);
3709
}
3710
3711
void ARM64FloatEmitter::EXT(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, int index) {
3712
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__);
3713
3714
bool quad = IsQuad(Rd);
3715
_assert_msg_(index >= 0 && index < 16 && (quad || index < 8), "%s start index out of bounds", __FUNCTION__);
3716
_assert_msg_(IsQuad(Rd) == IsQuad(Rn) && IsQuad(Rd) == IsQuad(Rm), "%s operands not same size", __FUNCTION__);
3717
3718
Rd = DecodeReg(Rd);
3719
Rn = DecodeReg(Rn);
3720
Rm = DecodeReg(Rm);
3721
3722
Write32((quad << 30) | (0x17 << 25) | (Rm << 16) | (index << 11) | (Rn << 5) | Rd);
3723
}
3724
3725
// Shift by immediate
3726
void ARM64FloatEmitter::SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift)
3727
{
3728
SSHLL(src_size, Rd, Rn, shift, false);
3729
}
3730
void ARM64FloatEmitter::SSHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift)
3731
{
3732
SSHLL(src_size, Rd, Rn, shift, true);
3733
}
3734
void ARM64FloatEmitter::SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift)
3735
{
3736
SHRN(dest_size, Rd, Rn, shift, false);
3737
}
3738
void ARM64FloatEmitter::SHRN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift)
3739
{
3740
SHRN(dest_size, Rd, Rn, shift, true);
3741
}
3742
void ARM64FloatEmitter::USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift)
3743
{
3744
USHLL(src_size, Rd, Rn, shift, false);
3745
}
3746
void ARM64FloatEmitter::USHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift)
3747
{
3748
USHLL(src_size, Rd, Rn, shift, true);
3749
}
3750
void ARM64FloatEmitter::SHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) {
3751
SHLL(src_size, Rd, Rn, false);
3752
}
3753
void ARM64FloatEmitter::SHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) {
3754
SHLL(src_size, Rd, Rn, true);
3755
}
3756
void ARM64FloatEmitter::SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn)
3757
{
3758
SXTL(src_size, Rd, Rn, false);
3759
}
3760
void ARM64FloatEmitter::SXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn)
3761
{
3762
SXTL(src_size, Rd, Rn, true);
3763
}
3764
void ARM64FloatEmitter::UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn)
3765
{
3766
UXTL(src_size, Rd, Rn, false);
3767
}
3768
void ARM64FloatEmitter::UXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn)
3769
{
3770
UXTL(src_size, Rd, Rn, true);
3771
}
3772
3773
static u32 EncodeImmShiftLeft(u8 src_size, u32 shift) {
3774
return src_size + shift;
3775
}
3776
3777
static u32 EncodeImmShiftRight(u8 src_size, u32 shift) {
3778
return src_size * 2 - shift;
3779
}
3780
3781
void ARM64FloatEmitter::SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper)
3782
{
3783
_assert_msg_(shift < src_size, "%s shift amount must less than the element size!", __FUNCTION__);
3784
u32 imm = EncodeImmShiftLeft(src_size, shift);
3785
EmitShiftImm(upper, 0, imm >> 3, imm & 7, 0x14, Rd, Rn);
3786
}
3787
3788
void ARM64FloatEmitter::USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper)
3789
{
3790
_assert_msg_(shift < src_size, "%s shift amount must less than the element size!", __FUNCTION__);
3791
u32 imm = EncodeImmShiftLeft(src_size, shift);
3792
EmitShiftImm(upper, 1, imm >> 3, imm & 7, 0x14, Rd, Rn);
3793
}
3794
3795
void ARM64FloatEmitter::SHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper) {
3796
_assert_msg_(src_size <= 32, "%s shift amount cannot be 64", __FUNCTION__);
3797
Emit2RegMisc(upper, 1, src_size >> 4, 0b10011, Rd, Rn);
3798
}
3799
3800
void ARM64FloatEmitter::SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper)
3801
{
3802
_assert_msg_(shift > 0, "%s shift amount must be greater than zero!", __FUNCTION__);
3803
_assert_msg_(shift <= dest_size, "%s shift amount must less than or equal to the element size!", __FUNCTION__);
3804
u32 imm = EncodeImmShiftRight(dest_size, shift);
3805
EmitShiftImm(upper, 0, imm >> 3, imm & 7, 0x10, Rd, Rn);
3806
}
3807
3808
void ARM64FloatEmitter::SHL(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) {
3809
_assert_msg_(shift < dest_size, "%s shift amount must less than the element size!", __FUNCTION__);
3810
u32 imm = EncodeImmShiftLeft(dest_size, shift);
3811
EmitShiftImm(IsQuad(Rd), false, imm >> 3, imm & 7, 0xA, Rd, Rn);
3812
}
3813
3814
void ARM64FloatEmitter::USHR(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) {
3815
_assert_msg_(shift < dest_size, "%s shift amount must less than the element size!", __FUNCTION__);
3816
u32 imm = EncodeImmShiftRight(dest_size, shift);
3817
EmitShiftImm(IsQuad(Rd), true, imm >> 3, imm & 7, 0x0, Rd, Rn);
3818
}
3819
3820
void ARM64FloatEmitter::SSHR(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) {
3821
_assert_msg_(shift < dest_size, "%s shift amount must less than the element size!", __FUNCTION__);
3822
u32 imm = EncodeImmShiftRight(dest_size, shift);
3823
EmitShiftImm(IsQuad(Rd), false, imm >> 3, imm & 7, 0x0, Rd, Rn);
3824
}
3825
3826
void ARM64FloatEmitter::SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper)
3827
{
3828
SSHLL(src_size, Rd, Rn, 0, upper);
3829
}
3830
3831
void ARM64FloatEmitter::UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper)
3832
{
3833
USHLL(src_size, Rd, Rn, 0, upper);
3834
}
3835
3836
// vector x indexed element
3837
void ARM64FloatEmitter::FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index)
3838
{
3839
_assert_msg_(size == 32 || size == 64, "%s only supports 32bit or 64bit size!", __FUNCTION__);
3840
3841
bool L = false;
3842
bool H = false;
3843
if (size == 32) {
3844
L = index & 1;
3845
H = (index >> 1) & 1;
3846
} else if (size == 64) {
3847
H = index == 1;
3848
}
3849
3850
EmitVectorxElement(0, 2 | (size >> 6), L, 0x9, H, Rd, Rn, Rm);
3851
}
3852
3853
void ARM64FloatEmitter::FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index)
3854
{
3855
_assert_msg_(size == 32 || size == 64, "%s only supports 32bit or 64bit size!", __FUNCTION__);
3856
3857
bool L = false;
3858
bool H = false;
3859
if (size == 32) {
3860
L = index & 1;
3861
H = (index >> 1) & 1;
3862
} else if (size == 64) {
3863
H = index == 1;
3864
}
3865
3866
EmitVectorxElement(0, 2 | (size >> 6), L, 1, H, Rd, Rn, Rm);
3867
}
3868
3869
void ARM64FloatEmitter::ABI_PushRegisters(uint32_t registers, uint32_t fp_registers) {
3870
_assert_msg_((registers & 0x60000000) == 0, "ABI_PushRegisters: Do not include FP and LR, those are handled non-conditionally");
3871
3872
ARM64Reg gprs[32]{}, fprs[32]{};
3873
int num_gprs = 0, num_fprs = 0;
3874
for (int i = 0; i < 29; i++) {
3875
if (registers & (1U << i))
3876
gprs[num_gprs++] = (ARM64Reg)(X0 + i);
3877
}
3878
3879
for (int i = 0; i < 32; i++) {
3880
if (fp_registers & (1U << i))
3881
fprs[num_fprs++] = (ARM64Reg)(D0 + i);
3882
}
3883
3884
u32 stack_size = 16 + ROUND_UP(num_gprs * 8, 16) + ROUND_UP(num_fprs * 8, 16);
3885
3886
// Stack is required to be quad-word aligned.
3887
if (stack_size < 256) {
3888
m_emit->STP(INDEX_PRE, FP, LR, SP, -(s32)stack_size);
3889
} else {
3890
m_emit->SUB(SP, SP, stack_size);
3891
m_emit->STP(INDEX_UNSIGNED, FP, LR, SP, 0);
3892
}
3893
m_emit->MOVfromSP(X29); // Set new frame pointer
3894
int offset = 16;
3895
for (int i = 0; i < num_gprs / 2; i++) {
3896
m_emit->STP(INDEX_SIGNED, gprs[i*2], gprs[i*2+1], X29, offset);
3897
offset += 16;
3898
}
3899
if (num_gprs & 1) {
3900
m_emit->STR(INDEX_UNSIGNED, gprs[num_gprs - 1], X29, offset);
3901
offset += 16;
3902
}
3903
3904
for (int i = 0; i < num_fprs / 2; i++) {
3905
STP(64, INDEX_SIGNED, fprs[i * 2], fprs[i * 2 + 1], SP, offset);
3906
offset += 16;
3907
}
3908
if (num_fprs & 1) {
3909
STR(64, INDEX_UNSIGNED, fprs[num_fprs - 1], X29, offset);
3910
offset += 16;
3911
}
3912
// Now offset should be == stack_size.
3913
}
3914
3915
void ARM64FloatEmitter::ABI_PopRegisters(uint32_t registers, uint32_t fp_registers) {
3916
ARM64Reg gprs[32]{}, fprs[32]{};
3917
int num_gprs = 0, num_fprs = 0;
3918
for (int i = 0; i < 29; i++) {
3919
if (registers & (1U << i))
3920
gprs[num_gprs++] = (ARM64Reg)(X0 + i);
3921
}
3922
3923
for (int i = 0; i < 32; i++) {
3924
if (fp_registers & (1U << i))
3925
fprs[num_fprs++] = (ARM64Reg)(D0 + i);
3926
}
3927
3928
u32 stack_size = 16 + ROUND_UP(num_gprs * 8, 16) + ROUND_UP(num_fprs * 8, 16);
3929
3930
// SP points to the bottom. We're gonna walk it upwards.
3931
// Reload FP, LR.
3932
m_emit->LDP(INDEX_SIGNED, FP, LR, SP, 0);
3933
int offset = 16;
3934
for (int i = 0; i < num_gprs / 2; i++) {
3935
m_emit->LDP(INDEX_SIGNED, gprs[i*2], gprs[i*2+1], SP, offset);
3936
offset += 16;
3937
}
3938
// Do the straggler.
3939
if (num_gprs & 1) {
3940
m_emit->LDR(INDEX_UNSIGNED, gprs[num_gprs-1], SP, offset);
3941
offset += 16;
3942
}
3943
3944
// Time for the FP regs.
3945
for (int i = 0; i < num_fprs / 2; i++) {
3946
LDP(64, INDEX_SIGNED, fprs[i * 2], fprs[i * 2 + 1], SP, offset);
3947
offset += 16;
3948
}
3949
// Do the straggler.
3950
if (num_fprs & 1) {
3951
LDR(64, INDEX_UNSIGNED, fprs[num_fprs-1], SP, offset);
3952
offset += 16;
3953
}
3954
// Now offset should be == stack_size.
3955
3956
// Restore the stack pointer.
3957
m_emit->ADD(SP, SP, stack_size);
3958
}
3959
3960
void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
3961
// It's probably okay to AND by extra bits.
3962
if (!Is64Bit(Rn))
3963
imm &= 0xFFFFFFFF;
3964
if (!TryANDI2R(Rd, Rn, imm)) {
3965
_assert_msg_(scratch != INVALID_REG, "ANDI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
3966
MOVI2R(scratch, imm);
3967
AND(Rd, Rn, scratch);
3968
}
3969
}
3970
3971
void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
3972
_assert_msg_(Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "ORRI2R - more bits in imm than Rn");
3973
if (!TryORRI2R(Rd, Rn, imm)) {
3974
_assert_msg_(scratch != INVALID_REG, "ORRI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
3975
MOVI2R(scratch, imm);
3976
ORR(Rd, Rn, scratch);
3977
}
3978
}
3979
3980
void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
3981
_assert_msg_(Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "EORI2R - more bits in imm than Rn");
3982
if (!TryEORI2R(Rd, Rn, imm)) {
3983
_assert_msg_(scratch != INVALID_REG, "EORI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
3984
MOVI2R(scratch, imm);
3985
EOR(Rd, Rn, scratch);
3986
}
3987
}
3988
3989
void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
3990
if (!Is64Bit(Rn))
3991
imm &= 0xFFFFFFFF;
3992
unsigned int n, imm_s, imm_r;
3993
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
3994
ANDS(Rd, Rn, imm_r, imm_s, n != 0);
3995
} else if (imm == 0) {
3996
ANDS(Rd, Rn, Is64Bit(Rn) ? ZR : WZR);
3997
} else {
3998
_assert_msg_(scratch != INVALID_REG, "ANDSI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm);
3999
MOVI2R(scratch, imm);
4000
ANDS(Rd, Rn, scratch);
4001
}
4002
}
4003
4004
void ARM64XEmitter::ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
4005
if (!TryADDI2R(Rd, Rn, imm)) {
4006
_assert_msg_(scratch != INVALID_REG, "ADDI2R - failed to construct arithmetic immediate value from %08x, need scratch", (u32)imm);
4007
MOVI2R(scratch, imm);
4008
ADD(Rd, Rn, scratch);
4009
}
4010
}
4011
4012
void ARM64XEmitter::SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
4013
if (!TrySUBI2R(Rd, Rn, imm)) {
4014
_assert_msg_(scratch != INVALID_REG, "SUBI2R - failed to construct arithmetic immediate value from %08x, need scratch", (u32)imm);
4015
MOVI2R(scratch, imm);
4016
SUB(Rd, Rn, scratch);
4017
}
4018
}
4019
4020
void ARM64XEmitter::CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
4021
if (!TryCMPI2R(Rn, imm)) {
4022
_assert_msg_(scratch != INVALID_REG, "CMPI2R - failed to construct arithmetic immediate value from %08x, need scratch", (u32)imm);
4023
MOVI2R(scratch, imm);
4024
CMP(Rn, scratch);
4025
}
4026
}
4027
4028
bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
4029
s64 negated = Is64Bit(Rn) ? -(s64)imm : -(s32)(u32)imm;
4030
u32 val;
4031
bool shift;
4032
if (imm == 0) {
4033
// Prefer MOV (ORR) instead of ADD for moves.
4034
MOV(Rd, Rn);
4035
return true;
4036
} else if (IsImmArithmetic(imm, &val, &shift)) {
4037
ADD(Rd, Rn, val, shift);
4038
return true;
4039
} else if (IsImmArithmetic((u64)negated, &val, &shift)) {
4040
SUB(Rd, Rn, val, shift);
4041
return true;
4042
} else {
4043
return false;
4044
}
4045
}
4046
4047
bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
4048
s64 negated = Is64Bit(Rn) ? -(s64)imm : -(s32)(u32)imm;
4049
u32 val;
4050
bool shift;
4051
if (imm == 0) {
4052
// Prefer MOV (ORR) instead of ADD for moves.
4053
MOV(Rd, Rn);
4054
return true;
4055
} else if (IsImmArithmetic(imm, &val, &shift)) {
4056
SUB(Rd, Rn, val, shift);
4057
return true;
4058
} else if (IsImmArithmetic((u64)negated, &val, &shift)) {
4059
ADD(Rd, Rn, val, shift);
4060
return true;
4061
} else {
4062
return false;
4063
}
4064
}
4065
4066
bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u64 imm) {
4067
s64 negated = Is64Bit(Rn) ? -(s64)imm : -(s32)(u32)imm;
4068
u32 val;
4069
bool shift;
4070
if (IsImmArithmetic(imm, &val, &shift)) {
4071
CMP(Rn, val, shift);
4072
return true;
4073
} else if (IsImmArithmetic((u64)negated, &val, &shift)) {
4074
CMN(Rn, val, shift);
4075
return true;
4076
} else {
4077
return false;
4078
}
4079
}
4080
4081
bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
4082
if (!Is64Bit(Rn))
4083
imm &= 0xFFFFFFFF;
4084
u32 n, imm_r, imm_s;
4085
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
4086
AND(Rd, Rn, imm_r, imm_s, n != 0);
4087
return true;
4088
} else if (imm == 0) {
4089
MOVI2R(Rd, 0);
4090
return true;
4091
} else {
4092
return false;
4093
}
4094
}
4095
bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
4096
_assert_msg_(Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "TryORRI2R - more bits in imm than Rn");
4097
u32 n, imm_r, imm_s;
4098
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
4099
ORR(Rd, Rn, imm_r, imm_s, n != 0);
4100
return true;
4101
} else if (imm == 0) {
4102
if (Rd != Rn) {
4103
MOV(Rd, Rn);
4104
}
4105
return true;
4106
} else {
4107
return false;
4108
}
4109
}
4110
bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) {
4111
_assert_msg_(Is64Bit(Rn) || (imm & 0xFFFFFFFF00000000UL) == 0, "TryEORI2R - more bits in imm than Rn");
4112
u32 n, imm_r, imm_s;
4113
if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) {
4114
EOR(Rd, Rn, imm_r, imm_s, n != 0);
4115
return true;
4116
} else if (imm == 0) {
4117
if (Rd != Rn) {
4118
MOV(Rd, Rn);
4119
}
4120
return true;
4121
} else {
4122
return false;
4123
}
4124
}
4125
4126
float FPImm8ToFloat(uint8_t bits) {
4127
int sign = bits >> 7;
4128
uint32_t f = 0;
4129
f |= (sign << 31);
4130
int bit6 = (bits >> 6) & 1;
4131
uint32_t exp = ((!bit6) << 7) | (0x7C * bit6) | ((bits >> 4) & 3);
4132
uint32_t mantissa = (bits & 0xF) << 19;
4133
f |= exp << 23;
4134
f |= mantissa;
4135
float fl;
4136
memcpy(&fl, &f, sizeof(float));
4137
return fl;
4138
}
4139
4140
bool FPImm8FromFloat(float value, uint8_t *immOut) {
4141
uint32_t f;
4142
memcpy(&f, &value, sizeof(float));
4143
uint32_t mantissa4 = (f & 0x7FFFFF) >> 19;
4144
uint32_t exponent = (f >> 23) & 0xFF;
4145
uint32_t sign = f >> 31;
4146
if ((exponent >> 7) == ((exponent >> 6) & 1))
4147
return false;
4148
uint8_t imm8 = (sign << 7) | ((!(exponent >> 7)) << 6) | ((exponent & 3) << 4) | mantissa4;
4149
float newFloat = FPImm8ToFloat(imm8);
4150
if (newFloat == value) {
4151
*immOut = imm8;
4152
return true;
4153
} else {
4154
return false;
4155
}
4156
}
4157
4158
void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool negate) {
4159
_assert_msg_(!IsDouble(Rd), "MOVI2F does not yet support double precision");
4160
uint8_t imm8;
4161
if (value == 0.0) {
4162
if (std::signbit(value)) {
4163
negate = !negate;
4164
}
4165
FMOV(Rd, IsDouble(Rd) ? ZR : WZR);
4166
if (negate) {
4167
FNEG(Rd, Rd);
4168
}
4169
// TODO: There are some other values we could generate with the float-imm instruction, like 1.0...
4170
} else if (negate && FPImm8FromFloat(-value, &imm8)) {
4171
FMOV(Rd, imm8);
4172
} else if (FPImm8FromFloat(value, &imm8)) {
4173
FMOV(Rd, imm8);
4174
if (negate) {
4175
FNEG(Rd, Rd);
4176
}
4177
} else {
4178
_assert_msg_(scratch != INVALID_REG, "Failed to find a way to generate FP immediate %f without scratch", value);
4179
u32 ival;
4180
if (negate) {
4181
value = -value;
4182
}
4183
memcpy(&ival, &value, sizeof(ival));
4184
m_emit->MOVI2R(scratch, ival);
4185
FMOV(Rd, scratch);
4186
}
4187
}
4188
4189
// TODO: Quite a few values could be generated easily using the MOVI instruction and friends.
4190
void ARM64FloatEmitter::MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch, bool negate) {
4191
_assert_msg_(!IsSingle(Rd), "%s doesn't support singles", __FUNCTION__);
4192
int ival;
4193
memcpy(&ival, &value, 4);
4194
uint8_t imm8;
4195
if (ival == 0) { // Make sure to not catch negative zero here
4196
// Prefer MOVI 0, which may have no latency on some CPUs.
4197
MOVI(32, Rd, 0);
4198
if (negate)
4199
FNEG(32, Rd, Rd);
4200
} else if (negate && FPImm8FromFloat(-value, &imm8)) {
4201
FMOV(32, Rd, imm8);
4202
} else if (FPImm8FromFloat(value, &imm8)) {
4203
FMOV(32, Rd, imm8);
4204
if (negate) {
4205
FNEG(32, Rd, Rd);
4206
}
4207
} else if (TryAnyMOVI(32, Rd, ival)) {
4208
if (negate) {
4209
FNEG(32, Rd, Rd);
4210
}
4211
} else if (TryAnyMOVI(32, Rd, ival ^ 0x80000000)) {
4212
if (!negate) {
4213
FNEG(32, Rd, Rd);
4214
}
4215
} else {
4216
_assert_msg_(scratch != INVALID_REG, "Failed to find a way to generate FP immediate %f without scratch", value);
4217
if (negate) {
4218
ival ^= 0x80000000;
4219
}
4220
m_emit->MOVI2R(scratch, ival);
4221
DUP(32, Rd, scratch);
4222
}
4223
}
4224
4225
bool ARM64FloatEmitter::TryMOVI(u8 size, ARM64Reg Rd, uint64_t elementValue) {
4226
if (size == 8) {
4227
// Can always do 8.
4228
MOVI(size, Rd, elementValue & 0xFF);
4229
return true;
4230
} else if (size == 16) {
4231
if ((elementValue & 0xFF00) == 0) {
4232
MOVI(size, Rd, elementValue & 0xFF, 0);
4233
return true;
4234
} else if ((elementValue & 0x00FF) == 0) {
4235
MOVI(size, Rd, (elementValue >> 8) & 0xFF, 8);
4236
return true;
4237
} else if ((elementValue & 0xFF00) == 0xFF00) {
4238
MVNI(size, Rd, ~elementValue & 0xFF, 0);
4239
return true;
4240
} else if ((elementValue & 0x00FF) == 0x00FF) {
4241
MVNI(size, Rd, (~elementValue >> 8) & 0xFF, 8);
4242
return true;
4243
}
4244
4245
return false;
4246
} else if (size == 32) {
4247
for (int shift = 0; shift < 32; shift += 8) {
4248
uint32_t mask = 0xFFFFFFFF &~ (0xFF << shift);
4249
if ((elementValue & mask) == 0) {
4250
MOVI(size, Rd, (elementValue >> shift) & 0xFF, shift);
4251
return true;
4252
} else if ((elementValue & mask) == mask) {
4253
MVNI(size, Rd, (~elementValue >> shift) & 0xFF, shift);
4254
return true;
4255
}
4256
}
4257
4258
// Maybe an MSL shift will work?
4259
for (int shift = 8; shift <= 16; shift += 8) {
4260
uint32_t mask = 0xFFFFFFFF & ~(0xFF << shift);
4261
uint32_t ones = (1 << shift) - 1;
4262
uint32_t notOnes = 0xFFFFFF00 << shift;
4263
if ((elementValue & mask) == ones) {
4264
MOVI(size, Rd, (elementValue >> shift) & 0xFF, shift, true);
4265
return true;
4266
} else if ((elementValue & mask) == notOnes) {
4267
MVNI(size, Rd, (elementValue >> shift) & 0xFF, shift, true);
4268
return true;
4269
}
4270
}
4271
4272
return false;
4273
} else if (size == 64) {
4274
uint8_t imm8 = 0;
4275
for (int i = 0; i < 8; ++i) {
4276
uint8_t byte = (elementValue >> (i * 8)) & 0xFF;
4277
if (byte != 0 && byte != 0xFF)
4278
return false;
4279
4280
if (byte == 0xFF)
4281
imm8 |= 1 << i;
4282
}
4283
4284
// Didn't run into any partial bytes, so size 64 is doable.
4285
MOVI(size, Rd, imm8);
4286
return true;
4287
}
4288
return false;
4289
}
4290
4291
bool ARM64FloatEmitter::TryAnyMOVI(u8 size, ARM64Reg Rd, uint64_t elementValue) {
4292
// Try the original size first in case that's more optimal.
4293
if (TryMOVI(size, Rd, elementValue))
4294
return true;
4295
4296
uint64_t value = elementValue;
4297
if (size != 64) {
4298
uint64_t masked = elementValue & ((1ULL << size) - 1ULL);
4299
for (int i = size; i < 64; ++i) {
4300
value |= masked << i;
4301
}
4302
}
4303
4304
for (int attempt = 8; attempt <= 64; attempt += attempt) {
4305
// Original size was already attempted above.
4306
if (attempt != size) {
4307
if (TryMOVI(attempt, Rd, value))
4308
return true;
4309
}
4310
}
4311
4312
return false;
4313
}
4314
4315
void ARM64XEmitter::SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) {
4316
u32 val;
4317
bool shift;
4318
if (IsImmArithmetic(imm, &val, &shift)) {
4319
SUBS(Rd, Rn, val, shift);
4320
} else {
4321
_assert_msg_(scratch != INVALID_REG, "SUBSI2R - failed to construct immediate value from %08x, need scratch", (u32)imm);
4322
MOVI2R(scratch, imm);
4323
SUBS(Rd, Rn, scratch);
4324
}
4325
}
4326
4327
void ARM64CodeBlock::PoisonMemory(int offset) {
4328
// So we can adjust region to writable space. Might be zero.
4329
ptrdiff_t writable = m_writable - m_code;
4330
4331
u32 *ptr = (u32 *)(region + offset + writable);
4332
u32 *maxptr = (u32 *)(region + region_size - offset + writable);
4333
// If our memory isn't a multiple of u32 then this won't write the last remaining bytes with anything
4334
// Less than optimal, but there would be nothing we could do but throw a runtime warning anyway.
4335
// AArch64: 0xD4200000 = BRK 0
4336
while (ptr < maxptr)
4337
*ptr++ = 0xD4200000;
4338
}
4339
4340
} // namespace
4341
4342