Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
wine-mirror
GitHub Repository: wine-mirror/wine
Path: blob/master/libs/capstone/arch/AArch64/AArch64AddressingModes.h
4393 views
1
//===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
2
//
3
// The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// This file contains the AArch64 addressing mode implementation stuff.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#ifndef CS_AARCH64_ADDRESSINGMODES_H
15
#define CS_AARCH64_ADDRESSINGMODES_H
16
17
/* Capstone Disassembly Engine */
18
/* By Nguyen Anh Quynh <[email protected]>, 2013-2019 */
19
20
#include "../../MathExtras.h"
21
22
/// AArch64_AM - AArch64 Addressing Mode Stuff
23
24
//===----------------------------------------------------------------------===//
25
// Shifts
26
//
27
typedef enum AArch64_AM_ShiftExtendType {
28
AArch64_AM_InvalidShiftExtend = -1,
29
AArch64_AM_LSL = 0,
30
AArch64_AM_LSR,
31
AArch64_AM_ASR,
32
AArch64_AM_ROR,
33
AArch64_AM_MSL,
34
35
AArch64_AM_UXTB,
36
AArch64_AM_UXTH,
37
AArch64_AM_UXTW,
38
AArch64_AM_UXTX,
39
40
AArch64_AM_SXTB,
41
AArch64_AM_SXTH,
42
AArch64_AM_SXTW,
43
AArch64_AM_SXTX,
44
} AArch64_AM_ShiftExtendType;
45
46
/// getShiftName - Get the string encoding for the shift type.
47
static inline const char *AArch64_AM_getShiftExtendName(AArch64_AM_ShiftExtendType ST)
48
{
49
switch (ST) {
50
default: return NULL; // never reach
51
case AArch64_AM_LSL: return "lsl";
52
case AArch64_AM_LSR: return "lsr";
53
case AArch64_AM_ASR: return "asr";
54
case AArch64_AM_ROR: return "ror";
55
case AArch64_AM_MSL: return "msl";
56
case AArch64_AM_UXTB: return "uxtb";
57
case AArch64_AM_UXTH: return "uxth";
58
case AArch64_AM_UXTW: return "uxtw";
59
case AArch64_AM_UXTX: return "uxtx";
60
case AArch64_AM_SXTB: return "sxtb";
61
case AArch64_AM_SXTH: return "sxth";
62
case AArch64_AM_SXTW: return "sxtw";
63
case AArch64_AM_SXTX: return "sxtx";
64
}
65
}
66
67
/// getShiftType - Extract the shift type.
68
static inline AArch64_AM_ShiftExtendType AArch64_AM_getShiftType(unsigned Imm)
69
{
70
switch ((Imm >> 6) & 0x7) {
71
default: return AArch64_AM_InvalidShiftExtend;
72
case 0: return AArch64_AM_LSL;
73
case 1: return AArch64_AM_LSR;
74
case 2: return AArch64_AM_ASR;
75
case 3: return AArch64_AM_ROR;
76
case 4: return AArch64_AM_MSL;
77
}
78
}
79
80
/// getShiftValue - Extract the shift value.
81
static inline unsigned AArch64_AM_getShiftValue(unsigned Imm)
82
{
83
return Imm & 0x3f;
84
}
85
86
static inline unsigned AArch64_AM_getShifterImm(AArch64_AM_ShiftExtendType ST, unsigned Imm)
87
{
88
// assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
89
unsigned STEnc = 0;
90
91
switch (ST) {
92
default: // llvm_unreachable("Invalid shift requested");
93
case AArch64_AM_LSL: STEnc = 0; break;
94
case AArch64_AM_LSR: STEnc = 1; break;
95
case AArch64_AM_ASR: STEnc = 2; break;
96
case AArch64_AM_ROR: STEnc = 3; break;
97
case AArch64_AM_MSL: STEnc = 4; break;
98
}
99
100
return (STEnc << 6) | (Imm & 0x3f);
101
}
102
103
//===----------------------------------------------------------------------===//
104
// Extends
105
//
106
107
/// getArithShiftValue - get the arithmetic shift value.
108
static inline unsigned AArch64_AM_getArithShiftValue(unsigned Imm)
109
{
110
return Imm & 0x7;
111
}
112
113
/// getExtendType - Extract the extend type for operands of arithmetic ops.
114
static inline AArch64_AM_ShiftExtendType AArch64_AM_getExtendType(unsigned Imm)
115
{
116
// assert((Imm & 0x7) == Imm && "invalid immediate!");
117
switch (Imm) {
118
default: // llvm_unreachable("Compiler bug!");
119
case 0: return AArch64_AM_UXTB;
120
case 1: return AArch64_AM_UXTH;
121
case 2: return AArch64_AM_UXTW;
122
case 3: return AArch64_AM_UXTX;
123
case 4: return AArch64_AM_SXTB;
124
case 5: return AArch64_AM_SXTH;
125
case 6: return AArch64_AM_SXTW;
126
case 7: return AArch64_AM_SXTX;
127
}
128
}
129
130
static inline AArch64_AM_ShiftExtendType AArch64_AM_getArithExtendType(unsigned Imm)
131
{
132
return AArch64_AM_getExtendType((Imm >> 3) & 0x7);
133
}
134
135
/// Mapping from extend bits to required operation:
136
/// shifter: 000 ==> uxtb
137
/// 001 ==> uxth
138
/// 010 ==> uxtw
139
/// 011 ==> uxtx
140
/// 100 ==> sxtb
141
/// 101 ==> sxth
142
/// 110 ==> sxtw
143
/// 111 ==> sxtx
144
static inline unsigned AArch64_AM_getExtendEncoding(AArch64_AM_ShiftExtendType ET)
145
{
146
switch (ET) {
147
default: // llvm_unreachable("Invalid extend type requested");
148
case AArch64_AM_UXTB: return 0; break;
149
case AArch64_AM_UXTH: return 1; break;
150
case AArch64_AM_UXTW: return 2; break;
151
case AArch64_AM_UXTX: return 3; break;
152
case AArch64_AM_SXTB: return 4; break;
153
case AArch64_AM_SXTH: return 5; break;
154
case AArch64_AM_SXTW: return 6; break;
155
case AArch64_AM_SXTX: return 7; break;
156
}
157
}
158
159
/// getArithExtendImm - Encode the extend type and shift amount for an
160
/// arithmetic instruction:
161
/// imm: 3-bit extend amount
162
/// {5-3} = shifter
163
/// {2-0} = imm3
164
static inline unsigned AArch64_AM_getArithExtendImm(AArch64_AM_ShiftExtendType ET, unsigned Imm)
165
{
166
// assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
167
return (AArch64_AM_getExtendEncoding(ET) << 3) | (Imm & 0x7);
168
}
169
170
/// getMemDoShift - Extract the "do shift" flag value for load/store
171
/// instructions.
172
static inline bool AArch64_AM_getMemDoShift(unsigned Imm)
173
{
174
return (Imm & 0x1) != 0;
175
}
176
177
/// getExtendType - Extract the extend type for the offset operand of
178
/// loads/stores.
179
static inline AArch64_AM_ShiftExtendType AArch64_AM_getMemExtendType(unsigned Imm)
180
{
181
return AArch64_AM_getExtendType((Imm >> 1) & 0x7);
182
}
183
184
static inline uint64_t ror(uint64_t elt, unsigned size)
185
{
186
return ((elt & 1) << (size-1)) | (elt >> 1);
187
}
188
189
/// processLogicalImmediate - Determine if an immediate value can be encoded
190
/// as the immediate operand of a logical instruction for the given register
191
/// size. If so, return true with "encoding" set to the encoded value in
192
/// the form N:immr:imms.
193
static inline bool AArch64_AM_processLogicalImmediate(uint64_t Imm, unsigned RegSize, uint64_t *Encoding)
194
{
195
unsigned Size, Immr, N;
196
uint32_t CTO, I;
197
uint64_t Mask, NImms;
198
199
if (Imm == 0ULL || Imm == ~0ULL ||
200
(RegSize != 64 && (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize))))) {
201
return false;
202
}
203
204
// First, determine the element size.
205
Size = RegSize;
206
do {
207
uint64_t Mask;
208
209
Size /= 2;
210
Mask = (1ULL << Size) - 1;
211
if ((Imm & Mask) != ((Imm >> Size) & Mask)) {
212
Size *= 2;
213
break;
214
}
215
} while (Size > 2);
216
217
// Second, determine the rotation to make the element be: 0^m 1^n.
218
Mask = ((uint64_t)-1LL) >> (64 - Size);
219
Imm &= Mask;
220
221
if (isShiftedMask_64(Imm)) {
222
I = CountTrailingZeros_32(Imm);
223
// assert(I < 64 && "undefined behavior");
224
CTO = CountTrailingOnes_32(Imm >> I);
225
} else {
226
unsigned CLO;
227
228
Imm |= ~Mask;
229
if (!isShiftedMask_64(~Imm))
230
return false;
231
232
CLO = CountLeadingOnes_32(Imm);
233
I = 64 - CLO;
234
CTO = CLO + CountTrailingOnes_32(Imm) - (64 - Size);
235
}
236
237
// Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
238
// to our target value, where I is the number of RORs to go the opposite
239
// direction.
240
// assert(Size > I && "I should be smaller than element size");
241
Immr = (Size - I) & (Size - 1);
242
243
// If size has a 1 in the n'th bit, create a value that has zeroes in
244
// bits [0, n] and ones above that.
245
NImms = ~(Size-1) << 1;
246
247
// Or the CTO value into the low bits, which must be below the Nth bit
248
// bit mentioned above.
249
NImms |= (CTO-1);
250
251
// Extract the seventh bit and toggle it to create the N field.
252
N = ((NImms >> 6) & 1) ^ 1;
253
254
*Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f);
255
256
return true;
257
}
258
259
/// isLogicalImmediate - Return true if the immediate is valid for a logical
260
/// immediate instruction of the given register size. Return false otherwise.
261
static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize)
262
{
263
uint64_t encoding;
264
return AArch64_AM_processLogicalImmediate(imm, regSize, &encoding);
265
}
266
267
/// encodeLogicalImmediate - Return the encoded immediate value for a logical
268
/// immediate instruction of the given register size.
269
static inline uint64_t AArch64_AM_encodeLogicalImmediate(uint64_t imm, unsigned regSize)
270
{
271
uint64_t encoding = 0;
272
273
bool res = AArch64_AM_processLogicalImmediate(imm, regSize, &encoding);
274
// assert(res && "invalid logical immediate");
275
(void)res;
276
277
return encoding;
278
}
279
280
/// decodeLogicalImmediate - Decode a logical immediate value in the form
281
/// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
282
/// integer value it represents with regSize bits.
283
static inline uint64_t AArch64_AM_decodeLogicalImmediate(uint64_t val, unsigned regSize)
284
{
285
// Extract the N, imms, and immr fields.
286
unsigned N = (val >> 12) & 1;
287
unsigned immr = (val >> 6) & 0x3f;
288
unsigned imms = val & 0x3f;
289
unsigned i, size, R, S;
290
uint64_t pattern;
291
292
// assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
293
int len = 31 - CountLeadingZeros_32((N << 6) | (~imms & 0x3f));
294
295
// assert(len >= 0 && "undefined logical immediate encoding");
296
size = (1 << len);
297
R = immr & (size - 1);
298
S = imms & (size - 1);
299
300
// assert(S != size - 1 && "undefined logical immediate encoding");
301
pattern = (1ULL << (S + 1)) - 1;
302
303
for (i = 0; i < R; ++i)
304
pattern = ror(pattern, size);
305
306
// Replicate the pattern to fill the regSize.
307
while (size != regSize) {
308
pattern |= (pattern << size);
309
size *= 2;
310
}
311
312
return pattern;
313
}
314
315
/// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
316
/// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
317
/// is a valid encoding for an integer value with regSize bits.
318
static inline bool AArch64_AM_isValidDecodeLogicalImmediate(uint64_t val, unsigned regSize)
319
{
320
unsigned size, S;
321
int len;
322
// Extract the N and imms fields needed for checking.
323
unsigned N = (val >> 12) & 1;
324
unsigned imms = val & 0x3f;
325
326
if (regSize == 32 && N != 0) // undefined logical immediate encoding
327
return false;
328
len = 31 - CountLeadingZeros_32((N << 6) | (~imms & 0x3f));
329
if (len < 0) // undefined logical immediate encoding
330
return false;
331
size = (1 << len);
332
S = imms & (size - 1);
333
if (S == size - 1) // undefined logical immediate encoding
334
return false;
335
336
return true;
337
}
338
339
//===----------------------------------------------------------------------===//
340
// Floating-point Immediates
341
//
342
static inline float AArch64_AM_getFPImmFloat(unsigned Imm)
343
{
344
// We expect an 8-bit binary encoding of a floating-point number here.
345
union {
346
uint32_t I;
347
float F;
348
} FPUnion;
349
350
uint8_t Sign = (Imm >> 7) & 0x1;
351
uint8_t Exp = (Imm >> 4) & 0x7;
352
uint8_t Mantissa = Imm & 0xf;
353
354
// 8-bit FP iEEEE Float Encoding
355
// abcd efgh aBbbbbbc defgh000 00000000 00000000
356
//
357
// where B = NOT(b);
358
359
FPUnion.I = 0;
360
FPUnion.I |= ((uint32_t)Sign) << 31;
361
FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
362
FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
363
FPUnion.I |= (Exp & 0x3) << 23;
364
FPUnion.I |= Mantissa << 19;
365
366
return FPUnion.F;
367
}
368
369
//===--------------------------------------------------------------------===//
370
// AdvSIMD Modified Immediates
371
//===--------------------------------------------------------------------===//
372
373
// 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
374
static inline bool AArch64_AM_isAdvSIMDModImmType1(uint64_t Imm)
375
{
376
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
377
((Imm & 0xffffff00ffffff00ULL) == 0);
378
}
379
380
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType1(uint64_t Imm)
381
{
382
return (Imm & 0xffULL);
383
}
384
385
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType1(uint8_t Imm)
386
{
387
uint64_t EncVal = Imm;
388
389
return (EncVal << 32) | EncVal;
390
}
391
392
// 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
393
static inline bool AArch64_AM_isAdvSIMDModImmType2(uint64_t Imm)
394
{
395
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
396
((Imm & 0xffff00ffffff00ffULL) == 0);
397
}
398
399
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType2(uint64_t Imm)
400
{
401
return (Imm & 0xff00ULL) >> 8;
402
}
403
404
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType2(uint8_t Imm)
405
{
406
uint64_t EncVal = Imm;
407
return (EncVal << 40) | (EncVal << 8);
408
}
409
410
// 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
411
static inline bool AArch64_AM_isAdvSIMDModImmType3(uint64_t Imm)
412
{
413
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
414
((Imm & 0xff00ffffff00ffffULL) == 0);
415
}
416
417
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType3(uint64_t Imm)
418
{
419
return (Imm & 0xff0000ULL) >> 16;
420
}
421
422
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType3(uint8_t Imm)
423
{
424
uint64_t EncVal = Imm;
425
return (EncVal << 48) | (EncVal << 16);
426
}
427
428
// abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
429
static inline bool AArch64_AM_isAdvSIMDModImmType4(uint64_t Imm)
430
{
431
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
432
((Imm & 0x00ffffff00ffffffULL) == 0);
433
}
434
435
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType4(uint64_t Imm)
436
{
437
return (Imm & 0xff000000ULL) >> 24;
438
}
439
440
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType4(uint8_t Imm)
441
{
442
uint64_t EncVal = Imm;
443
return (EncVal << 56) | (EncVal << 24);
444
}
445
446
// 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
447
static inline bool AArch64_AM_isAdvSIMDModImmType5(uint64_t Imm)
448
{
449
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
450
(((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
451
((Imm & 0xff00ff00ff00ff00ULL) == 0);
452
}
453
454
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType5(uint64_t Imm)
455
{
456
return (Imm & 0xffULL);
457
}
458
459
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType5(uint8_t Imm)
460
{
461
uint64_t EncVal = Imm;
462
return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
463
}
464
465
// abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
466
static inline bool AArch64_AM_isAdvSIMDModImmType6(uint64_t Imm)
467
{
468
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
469
(((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
470
((Imm & 0x00ff00ff00ff00ffULL) == 0);
471
}
472
473
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType6(uint64_t Imm)
474
{
475
return (Imm & 0xff00ULL) >> 8;
476
}
477
478
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType6(uint8_t Imm)
479
{
480
uint64_t EncVal = Imm;
481
return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
482
}
483
484
// 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
485
static inline bool AArch64_AM_isAdvSIMDModImmType7(uint64_t Imm)
486
{
487
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
488
((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
489
}
490
491
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType7(uint64_t Imm)
492
{
493
return (Imm & 0xff00ULL) >> 8;
494
}
495
496
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType7(uint8_t Imm)
497
{
498
uint64_t EncVal = Imm;
499
return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
500
}
501
502
// 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
503
static inline bool AArch64_AM_isAdvSIMDModImmType8(uint64_t Imm)
504
{
505
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
506
((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
507
}
508
509
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType8(uint8_t Imm)
510
{
511
uint64_t EncVal = Imm;
512
return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
513
}
514
515
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType8(uint64_t Imm)
516
{
517
return (Imm & 0x00ff0000ULL) >> 16;
518
}
519
520
// abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
521
static inline bool AArch64_AM_isAdvSIMDModImmType9(uint64_t Imm)
522
{
523
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
524
((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
525
((Imm >> 56) == (Imm & 0x000000ffULL));
526
}
527
528
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType9(uint64_t Imm)
529
{
530
return (Imm & 0xffULL);
531
}
532
533
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType9(uint8_t Imm)
534
{
535
uint64_t EncVal = Imm;
536
EncVal |= (EncVal << 8);
537
EncVal |= (EncVal << 16);
538
EncVal |= (EncVal << 32);
539
540
return EncVal;
541
}
542
543
// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
544
// cmode: 1110, op: 1
545
static inline bool AArch64_AM_isAdvSIMDModImmType10(uint64_t Imm)
546
{
547
uint64_t ByteA = Imm & 0xff00000000000000ULL;
548
uint64_t ByteB = Imm & 0x00ff000000000000ULL;
549
uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
550
uint64_t ByteD = Imm & 0x000000ff00000000ULL;
551
uint64_t ByteE = Imm & 0x00000000ff000000ULL;
552
uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
553
uint64_t ByteG = Imm & 0x000000000000ff00ULL;
554
uint64_t ByteH = Imm & 0x00000000000000ffULL;
555
556
return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
557
(ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
558
(ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
559
(ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
560
(ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
561
(ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
562
(ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
563
(ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
564
}
565
566
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType10(uint64_t Imm)
567
{
568
uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
569
uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
570
uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
571
uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
572
uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
573
uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
574
uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
575
uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
576
577
uint8_t EncVal = BitA;
578
579
EncVal <<= 1;
580
EncVal |= BitB;
581
EncVal <<= 1;
582
EncVal |= BitC;
583
EncVal <<= 1;
584
EncVal |= BitD;
585
EncVal <<= 1;
586
EncVal |= BitE;
587
EncVal <<= 1;
588
EncVal |= BitF;
589
EncVal <<= 1;
590
EncVal |= BitG;
591
EncVal <<= 1;
592
EncVal |= BitH;
593
594
return EncVal;
595
}
596
597
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType10(uint8_t Imm)
598
{
599
uint64_t EncVal = 0;
600
601
if (Imm & 0x80)
602
EncVal |= 0xff00000000000000ULL;
603
604
if (Imm & 0x40)
605
EncVal |= 0x00ff000000000000ULL;
606
607
if (Imm & 0x20)
608
EncVal |= 0x0000ff0000000000ULL;
609
610
if (Imm & 0x10)
611
EncVal |= 0x000000ff00000000ULL;
612
613
if (Imm & 0x08)
614
EncVal |= 0x00000000ff000000ULL;
615
616
if (Imm & 0x04)
617
EncVal |= 0x0000000000ff0000ULL;
618
619
if (Imm & 0x02)
620
EncVal |= 0x000000000000ff00ULL;
621
622
if (Imm & 0x01)
623
EncVal |= 0x00000000000000ffULL;
624
625
return EncVal;
626
}
627
628
// aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
629
static inline bool AArch64_AM_isAdvSIMDModImmType11(uint64_t Imm)
630
{
631
uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
632
633
return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
634
(BString == 0x1f || BString == 0x20) &&
635
((Imm & 0x0007ffff0007ffffULL) == 0);
636
}
637
638
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType11(uint64_t Imm)
639
{
640
uint8_t BitA = (Imm & 0x80000000ULL) != 0;
641
uint8_t BitB = (Imm & 0x20000000ULL) != 0;
642
uint8_t BitC = (Imm & 0x01000000ULL) != 0;
643
uint8_t BitD = (Imm & 0x00800000ULL) != 0;
644
uint8_t BitE = (Imm & 0x00400000ULL) != 0;
645
uint8_t BitF = (Imm & 0x00200000ULL) != 0;
646
uint8_t BitG = (Imm & 0x00100000ULL) != 0;
647
uint8_t BitH = (Imm & 0x00080000ULL) != 0;
648
649
uint8_t EncVal = BitA;
650
EncVal <<= 1;
651
EncVal |= BitB;
652
EncVal <<= 1;
653
EncVal |= BitC;
654
EncVal <<= 1;
655
EncVal |= BitD;
656
EncVal <<= 1;
657
EncVal |= BitE;
658
EncVal <<= 1;
659
EncVal |= BitF;
660
EncVal <<= 1;
661
EncVal |= BitG;
662
EncVal <<= 1;
663
EncVal |= BitH;
664
665
return EncVal;
666
}
667
668
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType11(uint8_t Imm)
669
{
670
uint64_t EncVal = 0;
671
672
if (Imm & 0x80)
673
EncVal |= 0x80000000ULL;
674
675
if (Imm & 0x40)
676
EncVal |= 0x3e000000ULL;
677
else
678
EncVal |= 0x40000000ULL;
679
680
if (Imm & 0x20)
681
EncVal |= 0x01000000ULL;
682
683
if (Imm & 0x10)
684
EncVal |= 0x00800000ULL;
685
686
if (Imm & 0x08)
687
EncVal |= 0x00400000ULL;
688
689
if (Imm & 0x04)
690
EncVal |= 0x00200000ULL;
691
692
if (Imm & 0x02)
693
EncVal |= 0x00100000ULL;
694
695
if (Imm & 0x01)
696
EncVal |= 0x00080000ULL;
697
698
return (EncVal << 32) | EncVal;
699
}
700
701
// aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
702
static inline bool AArch64_AM_isAdvSIMDModImmType12(uint64_t Imm)
703
{
704
uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
705
return ((BString == 0xff || BString == 0x100) &&
706
((Imm & 0x0000ffffffffffffULL) == 0));
707
}
708
709
static inline uint8_t AArch64_AM_encodeAdvSIMDModImmType12(uint64_t Imm)
710
{
711
uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
712
uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
713
uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
714
uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
715
uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
716
uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
717
uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
718
uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
719
720
uint8_t EncVal = BitA;
721
EncVal <<= 1;
722
EncVal |= BitB;
723
EncVal <<= 1;
724
EncVal |= BitC;
725
EncVal <<= 1;
726
EncVal |= BitD;
727
EncVal <<= 1;
728
EncVal |= BitE;
729
EncVal <<= 1;
730
EncVal |= BitF;
731
EncVal <<= 1;
732
EncVal |= BitG;
733
EncVal <<= 1;
734
EncVal |= BitH;
735
736
return EncVal;
737
}
738
739
static inline uint64_t AArch64_AM_decodeAdvSIMDModImmType12(uint8_t Imm)
740
{
741
uint64_t EncVal = 0;
742
if (Imm & 0x80)
743
EncVal |= 0x8000000000000000ULL;
744
745
if (Imm & 0x40)
746
EncVal |= 0x3fc0000000000000ULL;
747
else
748
EncVal |= 0x4000000000000000ULL;
749
750
if (Imm & 0x20)
751
EncVal |= 0x0020000000000000ULL;
752
753
if (Imm & 0x10)
754
EncVal |= 0x0010000000000000ULL;
755
756
if (Imm & 0x08)
757
EncVal |= 0x0008000000000000ULL;
758
759
if (Imm & 0x04)
760
EncVal |= 0x0004000000000000ULL;
761
762
if (Imm & 0x02)
763
EncVal |= 0x0002000000000000ULL;
764
765
if (Imm & 0x01)
766
EncVal |= 0x0001000000000000ULL;
767
768
return (EncVal << 32) | EncVal;
769
}
770
771
/// Returns true if Imm is the concatenation of a repeating pattern of type T.
772
static inline bool AArch64_AM_isSVEMaskOfIdenticalElements8(int64_t Imm)
773
{
774
#define _VECSIZE (sizeof(int64_t)/sizeof(int8_t))
775
unsigned int i;
776
union {
777
int64_t Whole;
778
int8_t Parts[_VECSIZE];
779
} Vec;
780
781
Vec.Whole = Imm;
782
783
for(i = 1; i < _VECSIZE; i++) {
784
if (Vec.Parts[i] != Vec.Parts[0])
785
return false;
786
}
787
#undef _VECSIZE
788
789
return true;
790
}
791
792
static inline bool AArch64_AM_isSVEMaskOfIdenticalElements16(int64_t Imm)
793
{
794
#define _VECSIZE (sizeof(int64_t)/sizeof(int16_t))
795
unsigned int i;
796
union {
797
int64_t Whole;
798
int16_t Parts[_VECSIZE];
799
} Vec;
800
801
Vec.Whole = Imm;
802
803
for(i = 1; i < _VECSIZE; i++) {
804
if (Vec.Parts[i] != Vec.Parts[0])
805
return false;
806
}
807
#undef _VECSIZE
808
809
return true;
810
}
811
812
static inline bool AArch64_AM_isSVEMaskOfIdenticalElements32(int64_t Imm)
813
{
814
#define _VECSIZE (sizeof(int64_t)/sizeof(int32_t))
815
unsigned int i;
816
union {
817
int64_t Whole;
818
int32_t Parts[_VECSIZE];
819
} Vec;
820
821
Vec.Whole = Imm;
822
823
for(i = 1; i < _VECSIZE; i++) {
824
if (Vec.Parts[i] != Vec.Parts[0])
825
return false;
826
}
827
#undef _VECSIZE
828
829
return true;
830
}
831
832
static inline bool AArch64_AM_isSVEMaskOfIdenticalElements64(int64_t Imm)
833
{
834
return true;
835
}
836
837
static inline bool isSVECpyImm8(int64_t Imm)
838
{
839
bool IsImm8 = (int8_t)Imm == Imm;
840
841
return IsImm8 || (uint8_t)Imm == Imm;
842
}
843
844
static inline bool isSVECpyImm16(int64_t Imm)
845
{
846
bool IsImm8 = (int8_t)Imm == Imm;
847
bool IsImm16 = (int16_t)(Imm & ~0xff) == Imm;
848
849
return IsImm8 || IsImm16 || (uint16_t)(Imm & ~0xff) == Imm;
850
}
851
852
static inline bool isSVECpyImm32(int64_t Imm)
853
{
854
bool IsImm8 = (int8_t)Imm == Imm;
855
bool IsImm16 = (int16_t)(Imm & ~0xff) == Imm;
856
857
return IsImm8 || IsImm16;
858
}
859
860
static inline bool isSVECpyImm64(int64_t Imm)
861
{
862
bool IsImm8 = (int8_t)Imm == Imm;
863
bool IsImm16 = (int16_t)(Imm & ~0xff) == Imm;
864
865
return IsImm8 || IsImm16;
866
}
867
868
/// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
869
static inline bool AArch64_AM_isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm)
870
{
871
union {
872
int64_t D;
873
int32_t S[2];
874
int16_t H[4];
875
int8_t B[8];
876
} Vec = {Imm};
877
878
if (isSVECpyImm64(Vec.D))
879
return false;
880
881
if (AArch64_AM_isSVEMaskOfIdenticalElements32(Imm) &&
882
isSVECpyImm32(Vec.S[0]))
883
return false;
884
885
if (AArch64_AM_isSVEMaskOfIdenticalElements16(Imm) &&
886
isSVECpyImm16(Vec.H[0]))
887
return false;
888
889
if (AArch64_AM_isSVEMaskOfIdenticalElements8(Imm) &&
890
isSVECpyImm8(Vec.B[0]))
891
return false;
892
893
return isLogicalImmediate(Vec.D, 64);
894
}
895
896
inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth)
897
{
898
int Shift;
899
900
for (Shift = 0; Shift <= RegWidth - 16; Shift += 16)
901
if ((Value & ~(0xffffULL << Shift)) == 0)
902
return true;
903
904
return false;
905
}
906
907
inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
908
{
909
if (RegWidth == 32)
910
Value &= 0xffffffffULL;
911
912
// "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
913
if (Value == 0 && Shift != 0)
914
return false;
915
916
return (Value & ~(0xffffULL << Shift)) == 0;
917
}
918
919
inline static bool AArch64_AM_isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
920
{
921
// MOVZ takes precedence over MOVN.
922
if (isAnyMOVZMovAlias(Value, RegWidth))
923
return false;
924
925
Value = ~Value;
926
if (RegWidth == 32)
927
Value &= 0xffffffffULL;
928
929
return isMOVZMovAlias(Value, Shift, RegWidth);
930
}
931
932
inline static bool AArch64_AM_isAnyMOVWMovAlias(uint64_t Value, int RegWidth)
933
{
934
if (isAnyMOVZMovAlias(Value, RegWidth))
935
return true;
936
937
// It's not a MOVZ, but it might be a MOVN.
938
Value = ~Value;
939
if (RegWidth == 32)
940
Value &= 0xffffffffULL;
941
942
return isAnyMOVZMovAlias(Value, RegWidth);
943
}
944
945
#endif
946
947