Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/assembler_aarch32.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
4
* Copyright (c) 2015, Linaro Ltd. All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#ifndef CPU_AARCH32_VM_ASSEMBLER_AARCH32_HPP
28
#define CPU_AARCH32_VM_ASSEMBLER_AARCH32_HPP
29
30
#include "asm/register.hpp"
31
#include "vm_version_aarch32.hpp"
32
33
// Definitions of various symbolic names for machine registers
34
35
// Here we define how many integer and double precision floating point
36
// registers are used for passing parameters by the C and Java calling
37
// conventions. Each double precision floating point register can be used
38
// as two single precision registers.
39
40
class Argument VALUE_OBJ_CLASS_SPEC {
41
public:
42
enum {
43
n_int_register_parameters_c = 4, // c_rarg0, c_rarg1, c_rarg2, c_rarg3
44
#ifdef HARD_FLOAT_CC
45
n_float_register_parameters_c = 8, // c_farg0, c_farg1, ..., c_farg7
46
#else // HARD_FLOAT_CC
47
n_float_register_parameters_c = 0, // 0 registers used to pass arguments
48
#endif // HARD_FLOAT_CC
49
n_int_register_parameters_j = 4, // j_rarg0, j_rarg1, j_rarg2, j_rarg3
50
#ifdef HARD_FLOAT_CC
51
n_float_register_parameters_j = 8 // j_farg0, j_farg1, ..., j_farg7
52
#else // HARD_FLOAT_CC
53
n_float_register_parameters_j = 0 // 0 registers used to pass arguments
54
#endif // HARD_FLOAT_CC
55
};
56
};
57
58
// Symbolic names for the register arguments used by the C calling convention
59
// (the calling convention for C runtime calls and calls to JNI native
60
// methods)
61
62
REGISTER_DECLARATION(Register, c_rarg0, r0);
63
REGISTER_DECLARATION(Register, c_rarg1, r1);
64
REGISTER_DECLARATION(Register, c_rarg2, r2);
65
REGISTER_DECLARATION(Register, c_rarg3, r3);
66
67
// Symbolic names for the register arguments used by the Java calling
68
// convention (the calling convention for calls to compiled Java methods)
69
70
// We have control over the convention for Java so we can do what we please.
71
// What pleases us is to offset the Java calling convention so that when
72
// we call a suitable JNI method the arguments are lined up and we don't
73
// have to do much shuffling. A suitable JNI method is non-static and with
74
// a small number of arguments.
75
//
76
// |-----------------------------------|
77
// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 |
78
// |-----------------------------------|
79
// | r0 r1 r2 r3 |
80
// |-----------------------------------|
81
// | j_rarg3 j_rarg0 j_rarg1 j_rarg2 |
82
// |-----------------------------------|
83
84
85
REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
86
REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
87
REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
88
REGISTER_DECLARATION(Register, j_rarg3, c_rarg0);
89
90
// Common register aliases used in assembler code
91
92
// These registers are used to hold VM data either temporarily within a method
93
// or across method calls. According to AAPCS, r0-r3 and r12 are caller-saved,
94
// the rest are callee-saved.
95
96
// These 4 aliases are used in the template interpreter only.
97
98
REGISTER_DECLARATION(Register, rdispatch, r4); // Address of dispatch table
99
REGISTER_DECLARATION(Register, rbcp, r5); // Bytecode pointer
100
REGISTER_DECLARATION(Register, rlocals, r6); // Address of local variables section of current frame
101
REGISTER_DECLARATION(Register, rcpool, r7); // Address of constant pool cache
102
103
// The following aliases are used in all VM components.
104
105
REGISTER_DECLARATION(Register, rthread, r8); // Address of current thread
106
REGISTER_DECLARATION(Register, rscratch1, r9); // Scratch register
107
REGISTER_DECLARATION(Register, rmethod, r10); // Address of current method
108
REGISTER_DECLARATION(Register, rfp, r11); // Frame pointer
109
REGISTER_DECLARATION(Register, rscratch2, r12); // Scratch register
110
REGISTER_DECLARATION(Register, sp, r13); // Stack pointer
111
REGISTER_DECLARATION(Register, lr, r14); // Link register
112
REGISTER_DECLARATION(Register, r15_pc, r15); // Program counter
113
114
115
extern "C" void entry(CodeBuffer *cb);
116
117
118
#define assert_cond(ARG1) assert(ARG1, #ARG1)
119
120
class Assembler;
121
122
class Instruction_aarch32 {
123
unsigned insn;
124
#ifdef ASSERT
125
unsigned bits;
126
#endif
127
Assembler *assem;
128
129
public:
130
131
Instruction_aarch32(class Assembler *as) {
132
#ifdef ASSERT
133
bits = 0;
134
#endif
135
insn = 0;
136
assem = as;
137
}
138
139
inline ~Instruction_aarch32();
140
141
unsigned &get_insn() { return insn; }
142
#ifdef ASSERT
143
unsigned &get_bits() { return bits; }
144
#endif
145
146
static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) {
147
union {
148
unsigned u;
149
int n;
150
};
151
152
u = val << (31 - hi);
153
n = n >> (31 - hi + lo);
154
return n;
155
}
156
157
static inline uint32_t extract(uint32_t val, int msb, int lsb) {
158
int nbits = msb - lsb + 1;
159
assert_cond(msb >= lsb);
160
uint32_t mask = (1U << nbits) - 1;
161
uint32_t result = val >> lsb;
162
result &= mask;
163
return result;
164
}
165
166
static inline int32_t sextract(uint32_t val, int msb, int lsb) {
167
uint32_t uval = extract(val, msb, lsb);
168
return extend(uval, msb - lsb);
169
}
170
171
static void patch(address a, int msb, int lsb, unsigned long val) {
172
int nbits = msb - lsb + 1;
173
guarantee(val < (1U << nbits), "Field too big for insn");
174
assert_cond(msb >= lsb);
175
unsigned mask = (1U << nbits) - 1;
176
val <<= lsb;
177
mask <<= lsb;
178
unsigned target = *(unsigned *)a;
179
target &= ~mask;
180
target |= val;
181
*(unsigned *)a = target;
182
}
183
184
static void spatch(address a, int msb, int lsb, long val) {
185
int nbits = msb - lsb + 1;
186
long chk = val >> (nbits - 1);
187
guarantee (chk == -1 || chk == 0, "Field too big for insn");
188
unsigned uval = val;
189
unsigned mask = (1U << nbits) - 1;
190
uval &= mask;
191
uval <<= lsb;
192
mask <<= lsb;
193
unsigned target = *(unsigned *)a;
194
target &= ~mask;
195
target |= uval;
196
*(unsigned *)a = target;
197
}
198
199
/* void f(unsigned val, int msb, int lsb) {
200
int nbits = msb - lsb + 1;
201
guarantee(val < (1U << nbits), "Field too big for insn");
202
assert_cond(msb >= lsb);
203
unsigned mask = (1U << nbits) - 1;
204
val <<= lsb;
205
mask <<= lsb;
206
insn |= val;
207
assert_cond((bits & mask) == 0);
208
#ifdef ASSERT
209
bits |= mask;
210
#endif
211
}*/
212
213
void f(unsigned val, int msb, int lsb) {
214
int nbits = msb - lsb + 1;
215
guarantee(val < (1U << nbits), "Field too big for insn");
216
assert_cond(msb >= lsb);
217
unsigned mask = (1U << nbits) - 1;
218
val <<= lsb;
219
mask <<= lsb;
220
insn &= ~mask;
221
insn |= val;
222
#ifdef ASSERT
223
bits |= mask;
224
#endif
225
}
226
227
void f(unsigned val, int bit) {
228
f(val, bit, bit);
229
}
230
231
void sf(long val, int msb, int lsb) {
232
int nbits = msb - lsb + 1;
233
long chk = val >> (nbits - 1);
234
guarantee (chk == -1 || chk == 0, "Field too big for insn");
235
unsigned uval = val;
236
unsigned mask = (1U << nbits) - 1;
237
uval &= mask;
238
f(uval, lsb + nbits - 1, lsb);
239
}
240
241
void rf(Register r, int lsb) {
242
f(r->encoding_nocheck(), lsb + 3, lsb);
243
}
244
245
void rf(FloatRegister r, int lsb) {
246
f(r->encoding_nocheck(), lsb + 4, lsb);
247
}
248
249
unsigned get(int msb = 31, int lsb = 0) {
250
int nbits = msb - lsb + 1;
251
unsigned mask = ((1U << nbits) - 1) << lsb;
252
assert_cond((bits & mask) == mask);
253
return (insn & mask) >> lsb;
254
}
255
256
void fixed(unsigned value, unsigned mask) {
257
assert_cond ((mask & bits) == 0);
258
#ifdef ASSERT
259
bits |= mask;
260
#endif
261
insn |= value;
262
}
263
};
264
265
#define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use)
266
267
static inline unsigned long uabs(long n) { return uabs((jlong)n); }
268
269
#define S_DFLT ::lsl()
270
#define C_DFLT AL
271
272
273
// Shift for base reg + reg offset addressing
274
class shift_op {
275
public:
276
enum shift_kind { LSL, LSR, ASR, ROR };
277
private:
278
enum shift_source { imm_s, reg_s };
279
enum shift_source _source;
280
enum shift_kind _op;
281
int _shift;
282
Register _reg;
283
284
bool check_valid() {
285
if(imm_s == _source) {
286
switch(_op) {
287
case LSL: return _shift >= 0 && _shift <= 31;
288
case ROR: return _shift >= 1 && _shift <= 32;
289
default: return _shift >= 1 && _shift <= 32;
290
}
291
}
292
return true; //Don't check register shifts
293
}
294
public:
295
// Default shift is lsl(0)
296
shift_op()
297
: _source(imm_s), _op(LSL), _shift(0) { }
298
shift_op(enum shift_kind op, int shift)
299
: _source(imm_s), _op(op), _shift(shift) {
300
if(!shift) {
301
// All zero shift encodings map to LSL 0
302
_shift = 0;
303
_op = LSL;
304
}
305
int pshift = _shift;
306
if(-1 == _shift && ROR == _op) {
307
// This is an RRX, make shift valid for the check
308
_shift = 1;
309
pshift = 0; //set to zero
310
}
311
assert(check_valid(), "Invalid shift quantity");
312
_shift = pshift; //restore shift
313
}
314
shift_op(enum shift_kind op, Register r)
315
: _source(reg_s), _op(op), _reg(r) {}
316
317
shift_kind kind() const {
318
return _op;
319
}
320
321
int shift() const {
322
assert(imm_s == _source, "Not an immediate shift");
323
return _shift % 32;
324
}
325
Register reg() const {
326
assert(reg_s == _source, "Not a register shift");
327
return _reg;
328
}
329
bool is_register() {
330
return reg_s == _source;
331
}
332
bool operator==(const shift_op& other) const {
333
if(imm_s == _source && imm_s == other._source) {
334
return _op == other._op && _shift == other._shift;
335
} else if (reg_s == _source && imm_s == _source) {
336
return _op == other._op && _reg == other._reg;
337
}
338
return false;
339
}
340
bool operator!=(const shift_op& other) const {
341
return !( *this == other);
342
}
343
};
344
class lsl : public shift_op {
345
public:
346
lsl(int sft = 0): shift_op(LSL, sft) { }
347
lsl(Register r): shift_op(LSL, r) { }
348
};
349
class lsr : public shift_op {
350
public:
351
lsr(int sft = 0): shift_op(LSR, sft) { }
352
lsr(Register r): shift_op(LSR, r) { }
353
};
354
class asr : public shift_op {
355
public:
356
asr(int sft = 0): shift_op(ASR, sft) { }
357
asr(Register r): shift_op(ASR, r) { }
358
};
359
class ror : public shift_op {
360
public:
361
ror(int sft = 0): shift_op(ROR, sft) {}
362
ror(Register r): shift_op(ROR, r) { }
363
};
364
class rrx : public shift_op {
365
public:
366
rrx(): shift_op(ROR, -1) {}
367
};
368
369
370
// Addressing modes
371
class Address VALUE_OBJ_CLASS_SPEC {
372
public:
373
enum access_mode { no_mode, imm, reg, lit };
374
//literal is class of imm? -> potentially have to split later if some instructions work
375
// with one but not other although can be determined from registers.
376
enum wb_mode { off, pre, post };
377
378
enum reg_op { ADD, SUB };
379
380
private:
381
Register _base;
382
Register _index;
383
int _offset;
384
enum access_mode _acc_mode;
385
enum wb_mode _wb_mode;
386
enum reg_op _as_op;
387
shift_op _shift;
388
389
RelocationHolder _rspec;
390
391
// Typically we use AddressLiterals we want to use their rval
392
// However in some situations we want the lval (effect address) of
393
// the item. We provide a special factory for making those lvals.
394
bool _is_lval;
395
396
// If the target is far we'll need to load the ea of this to a
397
// register to reach it. Otherwise if near we can do PC-relative
398
// addressing.
399
address _target;
400
401
public:
402
Address()
403
: _acc_mode(no_mode) { }
404
//immediate & literal
405
Address(Register r, enum wb_mode mode = off)
406
: _base(r), _index(noreg), _offset(0), _acc_mode(imm), _wb_mode(mode),
407
_shift(lsl()), _target(0) {
408
assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed.");
409
}
410
Address(Register r, int o, enum wb_mode mode = off)
411
: _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode),
412
_shift(lsl()), _target(0) {
413
assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed.");
414
}
415
Address(Register r, long o, enum wb_mode mode = off)
416
: _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode),
417
_shift(lsl()), _target(0) {
418
assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed.");
419
}
420
Address(Register r, unsigned long o, enum wb_mode mode = off)
421
: _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode),
422
_shift(lsl()), _target(0) {
423
assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed.");
424
}
425
Address(Register r, unsigned int o, enum wb_mode mode = off)
426
: _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode),
427
_shift(lsl()), _target(0) {
428
assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed.");
429
}
430
#ifdef ASSERT
431
Address(Register r, ByteSize disp)
432
: _base(r), _index(noreg), _offset(in_bytes(disp)), _acc_mode(imm), _wb_mode(off),
433
_shift(lsl()), _target(0) {
434
assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed.");
435
}
436
#endif
437
438
439
//Register-offset
440
Address(Register r, Register r1, shift_op shift = lsl(), enum reg_op op = ADD,
441
enum wb_mode wbm = off)
442
: _base(r), _index(r1), _offset(0), _acc_mode(reg), _wb_mode(wbm), _as_op(op),
443
_shift(shift), _target(0) {
444
assert(!shift.is_register(), "Can't shift a register-offset address by a register");
445
}
446
447
Address(address target, RelocationHolder const& rspec)
448
: _acc_mode(lit),
449
_base(sp),
450
_wb_mode(off),
451
_rspec(rspec),
452
_is_lval(false),
453
_target(target)
454
{ }
455
Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type);
456
457
private:
458
//Could be either
459
void AddressConstruct(Register base, RegisterOrConstant index, enum reg_op op, shift_op shift,
460
enum wb_mode mode);
461
public:
462
463
Address(Register base, RegisterOrConstant index, enum reg_op op, enum wb_mode mode) {
464
AddressConstruct(base, index, op, lsl(), mode);
465
}
466
Address(Register base, RegisterOrConstant index, shift_op shift = lsl(), enum reg_op op = ADD,
467
enum wb_mode mode = off) {
468
if(shift.kind() != lsl().kind()) {
469
assert(index.is_register(), "should be");
470
}
471
AddressConstruct(base, index, op, shift, mode);
472
}
473
474
475
Register base() const {
476
//in aarch64 this didn't apply to preindex mode -> why?
477
guarantee(_acc_mode == imm || _acc_mode == reg, "wrong mode");
478
return _base;
479
}
480
long offset() const {
481
return _offset;
482
}
483
Register index() const {
484
return _index;
485
}
486
shift_op shift() const {
487
return _shift;
488
}
489
reg_op op() const {
490
return _as_op;
491
}
492
access_mode get_mode() const {
493
return _acc_mode;
494
}
495
wb_mode get_wb_mode() const {
496
return _wb_mode;
497
}
498
bool uses(Register reg) const { return _base == reg || _index == reg; }
499
address target() const { return _target; }
500
const RelocationHolder& rspec() const { return _rspec; }
501
502
void encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const;
503
504
void fp_encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const;
505
506
void lea(MacroAssembler *, Register) const;
507
508
typedef enum {
509
IDT_BOOLEAN = T_BOOLEAN,
510
IDT_CHAR = T_CHAR,
511
IDT_FLOAT = T_FLOAT,
512
IDT_DOUBLE = T_DOUBLE,
513
IDT_BYTE = T_BYTE,
514
IDT_SHORT = T_SHORT,
515
IDT_INT = T_INT,
516
IDT_LONG = T_LONG,
517
IDT_OBJECT = T_OBJECT,
518
IDT_ARRAY = T_ARRAY,
519
IDT_ADDRESS = T_ADDRESS,
520
IDT_METADATA = T_METADATA,
521
// not really a data type, denotes the use when address value is needed
522
// itself, and Address instance is not used to fetch actual data from memory
523
IDT_LEA = 100,
524
// multi-word memory access insn (ldmia/stmia etc)
525
IDT_MULTIWORD
526
} InsnDataType;
527
528
inline static InsnDataType toInsnDataType(BasicType type) {
529
return (InsnDataType)type;
530
}
531
532
Address safe_for(InsnDataType type, MacroAssembler *, Register temp);
533
bool is_safe_for(InsnDataType);
534
535
static bool offset_ok_for_immed(long offset, InsnDataType type);
536
static bool shift_ok_for_index(shift_op shift, InsnDataType type);
537
};
538
539
// Convience classes
540
class RuntimeAddress: public Address {
541
public:
542
RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {}
543
};
544
545
class OopAddress: public Address {
546
public:
547
OopAddress(address target) : Address(target, relocInfo::oop_type){}
548
};
549
550
class ExternalAddress: public Address {
551
private:
552
static relocInfo::relocType reloc_for_target(address target) {
553
// Sometimes ExternalAddress is used for values which aren't
554
// exactly addresses, like the card table base.
555
// external_word_type can't be used for values in the first page
556
// so just skip the reloc in that case.
557
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
558
}
559
560
public:
561
ExternalAddress(address target) : Address(target, reloc_for_target(target)) {}
562
};
563
564
class InternalAddress: public Address {
565
public:
566
InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
567
};
568
569
570
const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers;
571
572
class Assembler : public AbstractAssembler {
573
void emit_long(jint x) {
574
AbstractAssembler::emit_int32(x);
575
}
576
577
public:
578
//TODO REMOVE shift_kind from here once done
579
enum shift_kind { LSL, LSR, ASR, ROR };
580
// NOTE RRX is a special case of ROR with shift = 0#
581
582
// Helper functions for shifts
583
// Here to allow compiler to find global shift_op without :: prefix as lsl is a
584
// standalone instruction
585
#define HELPER(NAME) \
586
shift_op NAME(int sft = 0) { return ::NAME(sft); } \
587
shift_op NAME(Register r) { return ::NAME(r); }
588
HELPER(lsl);
589
HELPER(lsr);
590
HELPER(asr);
591
HELPER(ror);
592
shift_op rrx() { return ::rrx(); }
593
#undef HELPER
594
595
typedef enum {
596
EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV
597
} Condition;
598
599
enum { instruction_size = 4 };
600
601
static const uint32_t nop_insn = 0xe1a00000;
602
603
Address adjust(Register base, int offset, bool preIncrement) {
604
if (preIncrement)
605
return Address(base, offset, Address::pre);
606
else
607
return Address(base, offset, Address::post);
608
}
609
610
Address adjust(Register base, Register index, shift_op shift,
611
enum Address::reg_op op, bool preIncrement) {
612
return Address(base, index, shift, op, preIncrement ? Address::pre : Address::post);
613
}
614
615
Address pre(Register base, int offset) {
616
return adjust(base, offset, true);
617
}
618
619
Address pre(Register base, Register index, shift_op shift, enum Address::reg_op op) {
620
return adjust(base, index, shift, op, true);
621
}
622
623
Address post (Register base, int offset) {
624
return adjust(base, offset, false);
625
}
626
627
Instruction_aarch32* current;
628
629
void set_current(Instruction_aarch32* i) { current = i; }
630
631
void f(unsigned val, int msb, int lsb) {
632
current->f(val, msb, lsb);
633
}
634
void f(unsigned val, int msb) {
635
current->f(val, msb, msb);
636
}
637
void sf(long val, int msb, int lsb) {
638
current->sf(val, msb, lsb);
639
}
640
void rf(Register reg, int lsb) {
641
current->rf(reg, lsb);
642
}
643
void rf(FloatRegister reg, int lsb) {
644
current->rf(reg, lsb);
645
}
646
void fixed(unsigned value, unsigned mask) {
647
current->fixed(value, mask);
648
}
649
650
void emit() {
651
emit_long(current->get_insn());
652
assert_cond(current->get_bits() == 0xffffffff);
653
current = NULL;
654
}
655
656
typedef void (Assembler::* uncond_branch_insn)(address dest);
657
typedef void (Assembler::* cond_branch_insn)(address dest, Condition cond);
658
typedef void (Assembler::* cond_ldst_insn)(Register Rt, address dest, Condition cond);
659
typedef void (Assembler::* cond_fp_ldst_insn)(FloatRegister Vd, address dest, Condition cond);
660
661
void wrap_label(Label &L, uncond_branch_insn insn);
662
void wrap_label(Label &L, Condition cond, cond_branch_insn insn);
663
void wrap_label(Register r, Label &L, Condition cond, cond_ldst_insn insn);
664
void wrap_label(FloatRegister r, Label &L, Condition cond, cond_fp_ldst_insn insn);
665
666
#undef INSN
667
668
// AARCH32 Instructions
669
// Defined roughly in the order they are found in
670
// ARM Archicture Reference Manual, section 5
671
672
#define ZERO_ADDR_REG r0
673
#define ONES_ADDR_REG r15
674
675
// Data processing (register & register-shifted-register)
676
void reg_instr(int decode, shift_op shift, Condition cond, bool s) {
677
f(cond, 31, 28), f(0b000, 27, 25), f(decode, 24, 21), f(s, 20);
678
f(shift.shift(), 11, 7), f(shift.kind(), 6, 5), f(0, 4);
679
}
680
void reg_shift_reg_instr(int decode, enum shift_op::shift_kind kind,
681
Condition cond, bool s) {
682
f(cond, 31, 28), f(0b000, 27, 25), f(decode, 24, 21), f(s, 20);
683
f(0, 7), f(kind, 6, 5), f(1, 4);
684
}
685
686
#define INSN(NAME, decode, s_flg) \
687
void NAME(Register Rd, Register Rn, Register Rm, shift_op shift = S_DFLT, \
688
Condition cond = C_DFLT) { \
689
starti; \
690
if(shift.is_register()) { \
691
reg_shift_reg_instr(decode, shift.kind(), cond, s_flg); \
692
rf(Rn, 16), rf(Rd, 12), rf(shift.reg(), 8), rf(Rm, 0); \
693
} else { \
694
reg_instr(decode, shift, cond, s_flg); \
695
rf(Rn, 16), rf(Rd, 12), rf(Rm, 0); \
696
} \
697
}
698
INSN(andr, 0b0000, 0);
699
INSN(eor, 0b0001, 0);
700
INSN(sub, 0b0010, 0);
701
INSN(rsb, 0b0011, 0);
702
INSN(add, 0b0100, 0);
703
INSN(adc, 0b0101, 0);
704
INSN(sbc, 0b0110, 0);
705
INSN(rsc, 0b0111, 0);
706
INSN(orr, 0b1100, 0);
707
INSN(bic, 0b1110, 0);
708
709
INSN(ands, 0b0000, 1);
710
INSN(eors, 0b0001, 1);
711
INSN(subs, 0b0010, 1);
712
INSN(rsbs, 0b0011, 1);
713
INSN(adds, 0b0100, 1);
714
INSN(adcs, 0b0101, 1);
715
INSN(sbcs, 0b0110, 1);
716
INSN(rscs, 0b0111, 1);
717
INSN(orrs, 0b1100, 1);
718
INSN(bics, 0b1110, 1);
719
720
#undef INSN
721
722
#define INSN(NAME, decode) \
723
void NAME(Register Rn, Register Rm, Condition cond) { \
724
NAME(Rn, Rm, S_DFLT, cond); \
725
} \
726
void NAME(Register Rn, Register Rm, shift_op shift = S_DFLT, \
727
Condition cond = C_DFLT) { \
728
starti; \
729
if(shift.is_register()) { \
730
reg_shift_reg_instr(decode, shift.kind(), cond, true); \
731
rf(Rn, 16), f(0b0000, 15, 12), rf(shift.reg(), 8), rf(Rm, 0); \
732
} else { \
733
reg_instr(decode, shift, cond, true); \
734
rf(Rn, 16), f(0, 15, 12), rf(Rm, 0); \
735
} \
736
}
737
INSN(tst, 0b1000);
738
INSN(teq, 0b1001);
739
INSN(cmp, 0b1010);
740
INSN(cmn, 0b1011);
741
#undef INSN
742
743
// TODO appears that if Rd = 15 and s flag set then perhaps different method
744
void mov_internal(int decode, Register Rd, Register Rnm, shift_op shift, bool s, Condition cond) {
745
starti;
746
if(shift.is_register()) {
747
reg_shift_reg_instr(decode, shift.kind(), cond, s);
748
f(0b0000, 19, 16), rf(Rd, 12), rf(shift.reg(), 8), rf(Rnm, 0);
749
} else {
750
reg_instr(decode, shift, cond, s);
751
f(0, 19, 16), rf(Rd, 12), rf(Rnm, 0);
752
}
753
}
754
void mov(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) {
755
mov_internal(0b1101, Rd, Rm, shift, false, cond);
756
}
757
void movs(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) {
758
mov_internal(0b1101, Rd, Rm, shift, true, cond);
759
}
760
void mov(Register Rd, Register Rm, Condition cond = C_DFLT) {
761
mov_internal(0b1101, Rd, Rm, S_DFLT, false, cond);
762
}
763
void movs(Register Rd, Register Rm, Condition cond = C_DFLT) {
764
mov_internal(0b1101, Rd, Rm, S_DFLT, true, cond);
765
}
766
767
void mvn(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) {
768
mov_internal(0b1111, Rd, Rm, shift, false, cond);
769
}
770
void mvns(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) {
771
mov_internal(0b1111, Rd, Rm, shift, true, cond);
772
}
773
void mvn(Register Rd, Register Rm, Condition cond = C_DFLT) {
774
mov_internal(0b1111, Rd, Rm, S_DFLT, false, cond);
775
}
776
void mvns(Register Rd, Register Rm, Condition cond = C_DFLT) {
777
mov_internal(0b1111, Rd, Rm, S_DFLT, true, cond);
778
}
779
780
#define INSN(NAME, type, s_flg, ASSERTION) \
781
void NAME(Register Rd, Register Rm, unsigned shift, Condition cond = C_DFLT) { \
782
assert_cond(ASSERTION); \
783
if(s_flg) movs(Rd, Rm, shift_op(type, shift), cond); \
784
else mov(Rd, Rm, shift_op(type, shift), cond); \
785
}
786
INSN(lsl, shift_op::LSL, 0, true);
787
INSN(lsr, shift_op::LSR, 0, true);
788
INSN(asr, shift_op::ASR, 0, true);
789
INSN(ror, shift_op::ROR, 0, shift != 0); //shift == 0 => RRX
790
791
INSN(lsls, shift_op::LSL, 1, true);
792
INSN(lsrs, shift_op::LSR, 1, true);
793
INSN(asrs, shift_op::ASR, 1, true);
794
INSN(rors, shift_op::ROR, 1, shift != 0); //shift == 0 => RRX
795
#undef INSN
796
797
#define INSN(NAME, type, s_flg) \
798
void NAME(Register Rd, Register Rm, Condition cond = C_DFLT) { \
799
if(s_flg) movs(Rd, Rm, shift_op(type, 0), cond); \
800
else mov(Rd, Rm, shift_op(type, 0), cond); \
801
}
802
INSN(rrx, shift_op::LSR, 0);
803
INSN(rrxs, shift_op::LSR, 1);
804
#undef INSN
805
806
//Data processing (register-shifted-register)
807
#define INSN(NAME, type, s_flg) \
808
void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \
809
if(s_flg) movs(Rd, Rn, shift_op(type, Rm), cond); \
810
else mov(Rd, Rn, shift_op(type, Rm), cond); \
811
}
812
INSN(lsl, shift_op::LSL, 0);
813
INSN(lsr, shift_op::LSR, 0);
814
INSN(asr, shift_op::ASR, 0);
815
INSN(ror, shift_op::ROR, 0);
816
817
INSN(lsls, shift_op::LSL, 1);
818
INSN(lsrs, shift_op::LSR, 1);
819
INSN(asrs, shift_op::ASR, 1);
820
INSN(rors, shift_op::ROR, 1);
821
#undef INSN
822
823
bool imm_instr(int decode, Register Rd, Register Rn, int imm, Condition cond,
824
bool s) {
825
if(!is_valid_for_imm12(imm))
826
return false;
827
{
828
starti;
829
f(cond, 31, 28), f(0b001, 27, 25), f(decode, 24, 21), f(s, 20), rf(Rn, 16);
830
int imm12 = encode_imm12(imm);
831
rf(Rd, 12), f(imm12, 11, 0);
832
}
833
return true;
834
}
835
836
#define INSN(NAME, decode, s_flg) \
837
inline void NAME(Register Rd, Register Rn, unsigned imm, Condition cond = C_DFLT) {\
838
bool status = imm_instr(decode, Rd, Rn, imm, cond, s_flg); \
839
assert(status, "invalid imm"); \
840
}
841
INSN(andr, 0b0000, 0);
842
INSN(eor, 0b0001, 0);
843
INSN(orr, 0b1100, 0);
844
INSN(bic, 0b1110, 0);
845
846
INSN(ands, 0b0000, 1);
847
INSN(eors, 0b0001, 1);
848
INSN(orrs, 0b1100, 1);
849
INSN(bics, 0b1110, 1);
850
//NOTE: arithmetic immediate instructions are defined below to allow dispatch.
851
#undef INSN
852
protected:
853
// Mov data to destination register in the shortest number of instructions
854
// possible.
855
void mov_immediate(Register dst, uint32_t imm32, Condition cond, bool s);
856
// Mov data to destination register but always emit enough instructions that would
857
// permit any 32-bit constant to be loaded. (Allow for rewriting later).
858
void mov_immediate32(Register dst, uint32_t imm32, Condition cond, bool s);
859
860
void add_sub_imm(int decode, Register Rd, Register Rn, int imm,
861
Condition cond, bool s);
862
863
public:
864
#define INSN(NAME, decode, s_flg) \
865
inline void NAME(Register Rd, Register Rn, int imm, Condition cond = C_DFLT) { \
866
add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \
867
} \
868
inline void NAME(Register Rd, Register Rn, unsigned imm, \
869
Condition cond = C_DFLT) { \
870
add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \
871
} \
872
inline void NAME(Register Rd, Register Rn, long imm, Condition cond = C_DFLT) { \
873
add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \
874
} \
875
inline void NAME(Register Rd, Register Rn, unsigned long imm, \
876
Condition cond = C_DFLT) { \
877
add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \
878
} \
879
/*Addition dispatch - place in macroassembler?*/ \
880
void NAME(Register Rd, Register Rn, RegisterOrConstant operand, \
881
Condition cond = C_DFLT) { \
882
if(operand.is_register()) { \
883
NAME(Rd, Rn, (Register)operand.as_register(), lsl(), cond); \
884
} else { \
885
NAME(Rd, Rn, (unsigned)operand.as_constant(), cond); \
886
} \
887
} \
888
inline void NAME(Register Rd, Register Rn, unsigned imm, Register Rtmp, \
889
Condition cond = C_DFLT) { \
890
if (Assembler::operand_valid_for_add_sub_immediate(imm)) \
891
NAME(Rd, Rn, imm, cond); \
892
else { \
893
mov_immediate(Rtmp, imm, cond, false); \
894
NAME(Rd, Rn, Rtmp, cond); \
895
} \
896
} \
897
//Note that the RegisterOrConstant version can't take a shift even though
898
// one of the instructions dispatched to can
899
INSN(sub, 0b0010, 0);
900
INSN(rsb, 0b0011, 0);
901
INSN(add, 0b0100, 0);
902
INSN(adc, 0b0101, 0);
903
INSN(sbc, 0b0110, 0);
904
INSN(rsc, 0b0111, 0);
905
906
INSN(subs, 0b0010, 1);
907
INSN(rsbs, 0b0011, 1);
908
INSN(adds, 0b0100, 1);
909
INSN(adcs, 0b0101, 1);
910
INSN(sbcs, 0b0110, 1);
911
INSN(rscs, 0b0111, 1);
912
#undef INSN
913
//No need to do reverse as register subtracted from immediate
914
915
// alias for mvn
916
void inv(Register Rd, Register Rn, Condition cond = C_DFLT) {
917
mvn(Rd, Rn, cond);
918
}
919
//alias for rsb
920
void neg(Register Rd, Register Rn, Condition cond = C_DFLT) {
921
rsb(Rd, Rn, 0, cond);
922
}
923
void negs(Register Rd, Register Rn, Condition cond = C_DFLT) {
924
rsbs(Rd, Rn, 0, cond);
925
}
926
927
// PC-rel. addressing
928
void adr_encode(Register Rd, int imm, Condition cond) {
929
if (is_valid_for_imm12(imm) || is_valid_for_imm12(-imm)) {
930
add_sub_imm(0b0100, Rd, r15_pc, imm, cond, false); //opcode for add
931
} else {
932
int adjust = 0;
933
if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2)) {
934
adjust = 8; // mov_w/mov_t
935
} else {
936
adjust = 16; // mov and 3 orr
937
}
938
mov_immediate32(Rd, imm - adjust, cond, false);
939
add(Rd, r15_pc, Rd, cond);
940
}
941
}
942
943
void adr(Register Rd, address dest, Condition cond = C_DFLT);
944
945
void adr(Register Rd, const Address &dest, Condition cond = C_DFLT);
946
947
void adr(Register Rd, Label &L, Condition cond = C_DFLT) {
948
wrap_label(Rd, L, cond, &Assembler::Assembler::adr);
949
}
950
951
private:
952
friend void entry(CodeBuffer *cb);
953
#define INSN(NAME, decode, s_flg) \
954
inline void NAME(Register Rd, unsigned imm, Condition cond = C_DFLT) { \
955
bool status = imm_instr(decode, Rd, ZERO_ADDR_REG, imm, cond, s_flg); \
956
assert(status, "invalid imm"); \
957
} \
958
inline void NAME(Register Rd, int imm, Condition cond = C_DFLT) { \
959
bool status = imm_instr(decode, Rd, ZERO_ADDR_REG, imm, cond, s_flg); \
960
assert(status, "invalid imm"); \
961
}
962
INSN(mov_i, 0b1101, 0);
963
INSN(mvn_i, 0b1111, 0);
964
965
INSN(movs_i, 0b1101, 1);
966
INSN(mvns_i, 0b1111, 1);
967
#undef INSN
968
969
void movw_i(Register Rd, unsigned imm, Condition cond = C_DFLT) {
970
starti;
971
assert(imm < (1 << 16), "Immediate too big for movw");
972
f(cond, 31, 28), f(0b00110000, 27, 20), f(imm >> 12, 19, 16);
973
rf(Rd, 12), f(imm & 0xfff, 11, 0);
974
}
975
976
void movt_i(Register Rd, unsigned imm, Condition cond = C_DFLT) {
977
starti;
978
assert(imm < (1 << 16), "Immediate too big for movt");
979
f(cond, 31, 28), f(0b00110100, 27, 20), f(imm >> 12, 19, 16);
980
rf(Rd, 12), f(imm & 0xfff, 11, 0);
981
}
982
public:
983
984
#define INSN(NAME, decode) \
985
inline void NAME(Register Rn, int imm, Condition cond = C_DFLT) { \
986
bool status = imm_instr(decode, ZERO_ADDR_REG, Rn, imm, cond, true); \
987
assert(status, "invalid imm"); \
988
} \
989
inline void NAME(Register Rn, unsigned imm, Condition cond = C_DFLT) { \
990
bool status = imm_instr(decode, ZERO_ADDR_REG, Rn, imm, cond, true); \
991
assert(status, "invalid imm"); \
992
} \
993
inline void NAME(Register Rn, int imm, Register Rtmp, Condition cond = C_DFLT) { \
994
if (Assembler::operand_valid_for_add_sub_immediate(imm)) \
995
NAME(Rn, imm, cond); \
996
else { \
997
mov_immediate(Rtmp, imm, cond, false); \
998
NAME(Rn, Rtmp, cond); \
999
} \
1000
} \
1001
inline void NAME(Register Rn, unsigned imm, Register Rtmp, Condition cond = C_DFLT) { \
1002
if (Assembler::operand_valid_for_add_sub_immediate(imm)) \
1003
NAME(Rn, imm, cond); \
1004
else { \
1005
mov_immediate(Rtmp, imm, cond, false); \
1006
NAME(Rn, Rtmp, cond); \
1007
} \
1008
}
1009
INSN(tst, 0b1000);
1010
INSN(teq, 0b1001);
1011
INSN(cmp, 0b1010);
1012
INSN(cmn, 0b1011);
1013
#undef INSN
1014
1015
1016
// Multiply and multiply accumulate
1017
void mult_instr(int decode, Register a, Register b, Register c,
1018
Register d, Condition cond, bool s) {
1019
starti;
1020
f(cond, 31, 28), f(0b0000, 27, 24), f(decode, 23, 21), f(s, 20);
1021
rf(a, 16), rf(b, 12), rf(c, 8), rf(d, 0), f(0b1001, 7, 4);
1022
}
1023
1024
void mul(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) {
1025
mult_instr(0b000, Rd, ZERO_ADDR_REG, Rm, Rn, cond, false);
1026
}
1027
void muls(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) {
1028
mult_instr(0b000, Rd, ZERO_ADDR_REG, Rm, Rn, cond, true);
1029
}
1030
1031
void mla(Register Rd, Register Rn, Register Rm, Register Ra, Condition cond = C_DFLT) {
1032
mult_instr(0b001, Rd, Ra, Rm, Rn, cond, false);
1033
}
1034
void mlas(Register Rd, Register Rn, Register Rm, Register Ra, Condition cond = C_DFLT) {
1035
mult_instr(0b001, Rd, Ra, Rm, Rn, cond, true);
1036
}
1037
1038
void mls(Register Rd, Register Rn, Register Rm, Register Ra, Condition cond = C_DFLT) {
1039
mult_instr(0b011, Rd, Ra, Rm, Rn, cond, false);
1040
}
1041
1042
void umaal(Register RdLo, Register RdHi, Register Rn, Register Rm, Condition cond = C_DFLT) {
1043
mult_instr(0b010, RdHi, RdLo, Rm, Rn, cond, false);
1044
}
1045
1046
#define INSN(NAME, decode, s_flg) \
1047
void NAME(Register RdLo, Register RdHi, Register Rn, Register Rm, \
1048
Condition cond = C_DFLT) { \
1049
mult_instr(decode, RdHi, RdLo, Rm, Rn, cond, s_flg); \
1050
}
1051
INSN(umull, 0b100, 0);
1052
INSN(umlal, 0b101, 0);
1053
INSN(smull, 0b110, 0);
1054
INSN(smlal, 0b111, 0);
1055
1056
INSN(umulls, 0b100, 1);
1057
INSN(umlals, 0b101, 1);
1058
INSN(smulls, 0b110, 1);
1059
INSN(smlals, 0b111, 1);
1060
1061
#undef INSN
1062
1063
//Saturating addition and subtraction
1064
#define INSN(NAME, decode) \
1065
void NAME(Register Rd, Register Rm, Register Rn, Condition cond = C_DFLT) { \
1066
starti; \
1067
f(cond, 31, 28), f( 0b00010, 27, 23), f(decode, 22, 21), f(0, 20); \
1068
rf(Rn, 16), rf(Rd, 12), f( 0b00000101, 11, 4), rf(Rm, 0); \
1069
}
1070
INSN(qadd, 0b00);
1071
INSN(qsub, 0b01);
1072
INSN(qdadd, 0b10);
1073
INSN(qdsub, 0b11);
1074
#undef INSN
1075
1076
// Halfword multiply and multiply accumulate
1077
void mul_instr(int decode, Register Ra, Register Rb, Register Rc, Register Rd,
1078
bool N, bool M, Condition cond) {
1079
starti;
1080
f(cond, 31, 28), f(0b00010, 27, 23), f(decode, 22, 21), f(0, 20);
1081
rf(Ra, 16), rf(Rb, 12), rf(Rc, 8), f(1, 7), f(M, 6), f(N, 5), f(0, 4);
1082
rf(Rd, 0);
1083
}
1084
1085
#define INSN(NAME, decode, N, M) \
1086
void NAME(Register Rd, Register Rn, Register Rm, Register Ra, \
1087
Condition cond = C_DFLT) { \
1088
mul_instr(decode, Rd, Ra, Rm, Rn, N, M, cond); \
1089
}
1090
INSN(smlabb, 0b00, 0, 0);
1091
INSN(smlabt, 0b00, 0, 1)
1092
INSN(smlatb, 0b00, 1, 0)
1093
INSN(smlatt, 0b00, 1, 1)
1094
1095
INSN(smlawb, 0b01, 0, 0);
1096
INSN(smlawt, 0b01, 0, 1);
1097
#undef INSN
1098
1099
#define INSN(NAME, decode, N, M) \
1100
void NAME(Register RdLo, Register RdHi, Register Rn, Register Rm, \
1101
Condition cond = C_DFLT) { \
1102
mul_instr(decode, RdHi, RdLo, Rm, Rn, N, M, cond); \
1103
}
1104
INSN(smlalbb, 0b10, 0, 0);
1105
INSN(smlalbt, 0b10, 0, 1);
1106
INSN(smlaltb, 0b10, 1, 0);
1107
INSN(smlaltt, 0b10, 1, 1);
1108
#undef INSN
1109
1110
#define INSN(NAME, decode, N, M) \
1111
void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \
1112
mul_instr(decode, Rd, ZERO_ADDR_REG, Rm, Rn, N, M, cond); \
1113
}
1114
INSN(smulwb, 0b01, 1, 0);
1115
INSN(smulwt, 0b01, 1, 1);
1116
1117
INSN(smulbb, 0b11, 0, 0);
1118
INSN(smulbt, 0b11, 0, 1);
1119
INSN(smultb, 0b11, 1, 0);
1120
INSN(smultt, 0b11, 1, 1);
1121
#undef INSN
1122
1123
// For Extra load/store instructions, see load/store section
1124
// For Synchronization primitives, see load/store section
1125
1126
// MSR(immediate), and hints
1127
#define INSN(NAME, decode) \
1128
void NAME(Condition cond = C_DFLT) { \
1129
starti; \
1130
f(cond, 31, 28), f(0b001100100000, 27, 16), f(0b11110000, 15, 8); \
1131
f(decode, 7, 0); \
1132
}
1133
INSN(nop, 0b000);
1134
INSN(yield, 0b001);
1135
INSN(wfe, 0b010);
1136
INSN(wfi, 0b011);
1137
INSN(sev, 0b100);
1138
void dbg(int dbg_hint, Condition cond = C_DFLT) {
1139
f(cond, 31, 28), f(0b001100100000, 27, 16), f(0b11110000, 15, 8);
1140
f(0b1111, 7, 4); f(dbg_hint, 3, 0);
1141
}
1142
#undef INSN
1143
1144
//TODO Misc instructions
1145
void bkpt(unsigned imm) {
1146
starti;
1147
f(AL, 31, 28), f(0b00010010, 27, 20);
1148
f(imm >> 4, 19, 8), f(0b0111, 7, 4), f(imm & 0xf, 3, 0);
1149
}
1150
void hlt(unsigned imm) {
1151
bkpt(imm);
1152
// FIXME This seemed like the best option!
1153
}
1154
1155
// Load/store register (all modes)
1156
void load_store_instr(Register Rt, const Address &adr, int op, int op2, int a, int b,
1157
Condition cond) {
1158
starti;
1159
f(cond, 31, 28), f(op, 27, 25), f(a, 22), f(b, 20);
1160
if(op2 >= 0)
1161
f(op2, 7, 4);
1162
//Destination
1163
rf(Rt, 12);
1164
adr.encode(current, code_section(), pc());
1165
}
1166
1167
bool encodeable(int decode, address dest) {
1168
long offset = dest - pc();
1169
switch(decode) {
1170
case 0b010:
1171
// LDR, LDRB, STR, STRB
1172
return uabs(offset) < (1 << 12);
1173
case 0b000:
1174
//LDRD, LDRH, LDRSB, LDRSH, STRH, STRD
1175
return uabs(offset) < (1 << 8);
1176
default:
1177
ShouldNotReachHere();
1178
}
1179
return false;
1180
}
1181
1182
1183
1184
#define INSN_INT(NAME, op, op2, a, b, isload) \
1185
void NAME(Register Rt, address dest, Condition cond = C_DFLT) { \
1186
if(encodeable(op, dest)) { /* Plan A */ \
1187
long offset = dest - pc(); \
1188
NAME(Rt, Address(r15_pc, offset), cond); \
1189
} else if(isload){ /* Plan B */ \
1190
/* TODO check we don't have to relocate this*/ \
1191
mov_immediate(Rt, (uint32_t)dest, cond, false); \
1192
NAME(Rt, Address(Rt, 0), cond); \
1193
} else { /* There is no plan C */ \
1194
ShouldNotReachHere(); \
1195
} \
1196
} \
1197
void NAME(Register Rt, address dest, relocInfo::relocType rtype, \
1198
Condition cond = C_DFLT) { \
1199
guarantee(rtype == relocInfo::internal_word_type, \
1200
"only internal_word_type relocs make sense here"); \
1201
NAME(Rt, InternalAddress(dest), cond); \
1202
} \
1203
void NAME(Register Rt, Label &L, Condition cond = C_DFLT) { \
1204
wrap_label(Rt, L, cond, &Assembler::NAME); \
1205
}
1206
1207
#define INSN(NAME, op, op2, a, b, isload) \
1208
void NAME(Register Rt, const Address &adr, Condition cond = C_DFLT) { \
1209
load_store_instr(Rt, adr, op, op2, a, b, cond); \
1210
} \
1211
INSN_INT(NAME, op, op2, a, b, isload);
1212
INSN(ldr, 0b010, -1, 0, 1, 1);
1213
INSN(ldrb, 0b010, -1, 1, 1, 1);
1214
1215
INSN(ldrsb, 0b000, 0b1101, 0, 1, 1);
1216
INSN(ldrh, 0b000, 0b1011, 0, 1, 1);
1217
INSN(ldrsh, 0b000, 0b1111, 0, 1, 1);
1218
1219
INSN(str, 0b010, -1, 0, 0, 0);
1220
INSN(strb, 0b010, -1, 1, 0, 0);
1221
INSN(strh, 0b000, 0b1011, 0, 0, 0);
1222
//Note LDRD & STRD are defined with the load/store multiple instructions
1223
1224
//TODO Need to introduce ldrsb ldrsh - then check that the encoding works properly!
1225
#undef INSN
1226
1227
1228
//Synchronization primitives
1229
void sync_instr(int decode, Register Ra, Register Rb, Register Rc, Register Rd,
1230
Condition cond) {
1231
starti;
1232
f(cond, 31, 28), f(0b0001, 27, 24), f(decode, 23, 20), rf(Ra, 16), rf(Rb, 12);
1233
rf(Rc, 8), f(0b1001, 7, 4), rf(Rd, 0);
1234
}
1235
1236
#define INSN(NAME, decode) \
1237
void NAME(Register Rd, Register Rt, Register Rn, Condition cond = C_DFLT) { \
1238
assert(r15_pc != Rn, "Unpredictable"); \
1239
sync_instr(decode, Rn, Rd, ONES_ADDR_REG, Rt, cond); \
1240
}
1241
INSN( strex, 0b1000);
1242
INSN(strexd, 0b1010);
1243
INSN(strexb, 0b1100);
1244
INSN(strexh, 0b1110);
1245
#undef INSN
1246
1247
#define INSN(NAME, decode) \
1248
void NAME(Register Rt, Register Rn, Condition cond = C_DFLT) { \
1249
assert(r15_pc != Rn, "Unpredictable"); \
1250
sync_instr(decode, Rn, Rt, ONES_ADDR_REG, ONES_ADDR_REG, cond); \
1251
}
1252
INSN(ldrex, 0b1001);
1253
INSN(ldrexd, 0b1011);
1254
INSN(ldrexb, 0b1101);
1255
INSN(ldrexh, 0b1111);
1256
#undef INSN
1257
1258
// Media instructions
1259
void media_instr(int decode, int decode2, Condition cond) {
1260
f(cond, 31, 28), f(0b011, 27, 25), f(decode, 24, 20);
1261
f(decode2, 7, 5), f(1, 4);
1262
}
1263
1264
#define INSN(NAME, decode, decode2) \
1265
void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \
1266
starti; \
1267
media_instr(0b00000 | decode, decode2, cond); \
1268
rf(Rn, 16), rf(Rd, 12), f(0b1111, 11, 8), rf(Rm, 0); \
1269
}
1270
INSN(sadd16, 0b01, 0b000);
1271
INSN(sasx, 0b01, 0b001);
1272
INSN(ssax, 0b01, 0b010);
1273
INSN(ssub16, 0b01, 0b011);
1274
INSN(sadd8, 0b01, 0b100);
1275
INSN(ssub8, 0b01, 0b111);
1276
//Saturating
1277
INSN(qadd16, 0b10, 0b000);
1278
INSN(qasx, 0b10, 0b001);
1279
INSN(qsax, 0b10, 0b010);
1280
INSN(qsub16, 0b10, 0b011);
1281
INSN(qadd8, 0b10, 0b100);
1282
INSN(qsub8, 0b10, 0b111);
1283
//Halving
1284
INSN(shadd16, 0b11, 0b000);
1285
INSN(shasx, 0b11, 0b001);
1286
INSN(shsax, 0b11, 0b010);
1287
INSN(shsub16, 0b11, 0b011);
1288
INSN(shadd8, 0b11, 0b100);
1289
INSN(shsub8, 0b11, 0b111);
1290
1291
//Now unsigned
1292
INSN(uadd16, 0b101, 0b000);
1293
INSN(uasx, 0b101, 0b001);
1294
INSN(usax, 0b101, 0b010);
1295
INSN(usub16, 0b101, 0b011);
1296
INSN(uadd8, 0b101, 0b100);
1297
INSN(usub8, 0b101, 0b111);
1298
//Saturating
1299
INSN(uqadd16, 0b110, 0b000);
1300
INSN(uqasx, 0b110, 0b001);
1301
INSN(uqsax, 0b110, 0b010);
1302
INSN(uqsub16, 0b110, 0b011);
1303
INSN(uqadd8, 0b110, 0b100);
1304
INSN(uqsub8, 0b110, 0b111);
1305
//Halving
1306
INSN(uhadd16, 0b111, 0b000);
1307
INSN(uhasx, 0b111, 0b001);
1308
INSN(uhsax, 0b111, 0b010);
1309
INSN(uhsub16, 0b111, 0b011);
1310
INSN(uhadd8, 0b111, 0b100);
1311
INSN(uhsub8, 0b111, 0b111);
1312
#undef INSN
1313
1314
//Packing, unpacking, saturation and reversal
1315
// Note rotation can only be one of ROR #0 ROR #8 ROR #16 ROR #24
1316
void extend_instr(int decode, int decode2, int decode3, Register Rd, Register Rn,
1317
Register Rm, shift_op shift, Condition cond) {
1318
starti;
1319
assert(0 == shift.shift() ||
1320
shift_op::ROR == shift.kind(), "Only ROR may be used for op");
1321
// All zero shifts are mapped to LSL #0
1322
int shift_enc = 0;
1323
switch(shift.shift()) {
1324
case 0: break;
1325
case 8: shift_enc = 1; break;
1326
case 16: shift_enc = 2; break;
1327
case 24: shift_enc = 3; break;
1328
default: assert(false, "Invalid shift quantity");
1329
}
1330
media_instr(0b01000 | decode, decode2, cond);
1331
rf(Rn, 16), rf(Rd, 12), f(shift_enc, 11, 10), f(decode3, 9, 8), rf(Rm, 0);
1332
}
1333
1334
#define INSN(NAME, decode, decode2) \
1335
void NAME(Register Rd, Register Rn, Register Rm, shift_op shift = ::ror(), \
1336
Condition cond = C_DFLT) { \
1337
assert(0xf != Rn->encoding_nocheck(), "Rn = pc makes different instruction"); \
1338
extend_instr(decode, decode2, 0b00, Rd, Rn, Rm, shift, cond); \
1339
}
1340
INSN(sxtab16, 0b000, 0b011);
1341
INSN(sxtab, 0b010, 0b011);
1342
INSN(sxtah, 0b011, 0b011);
1343
INSN(uxtab16, 0b100, 0b011);
1344
INSN(uxtab, 0b110, 0b011);
1345
INSN(uxtah, 0b111, 0b011);
1346
#undef INSN
1347
1348
#define INSN(NAME, decode, decode2) \
1349
void NAME(Register Rd, Register Rm, shift_op shift = ::ror(), \
1350
Condition cond = C_DFLT) { \
1351
extend_instr(decode, decode2, 0b00, Rd, ONES_ADDR_REG, Rm, shift, cond); \
1352
}
1353
INSN(sxtb16, 0b000, 0b011);
1354
INSN(sxtb, 0b010, 0b011);
1355
INSN(sxth, 0b011, 0b011);
1356
INSN(uxtb16, 0b100, 0b011);
1357
INSN(uxtb, 0b110, 0b011);
1358
INSN(uxth, 0b111, 0b011);
1359
#undef INSN
1360
1361
//Reverse instructions
1362
#define INSN(NAME, decode, decode2) \
1363
void NAME(Register Rd, Register Rm, Condition cond = C_DFLT) { \
1364
extend_instr(decode, decode2, 0b11, Rd, ONES_ADDR_REG, Rm, ::ror(24), cond); \
1365
}
1366
INSN(rev, 0b011, 0b001);
1367
INSN(rev16, 0b011, 0b101);
1368
INSN(rbit, 0b111, 0b001);
1369
INSN(revsh, 0b111, 0b101);
1370
#undef INSN
1371
1372
// Signed multiply, signed and unsigned divide
1373
#define INSN(NAME, decode, decode2) \
1374
void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \
1375
starti; \
1376
media_instr(0b10000 | decode, decode2, cond); \
1377
rf(Rd, 16), f(0b1111, 15, 12), rf(Rm, 8), rf(Rn, 0); \
1378
}
1379
INSN(sdiv, 0b001, 0b000);
1380
INSN(udiv, 0b011, 0b000);
1381
//TODO ALL THE REST!
1382
#undef INSN
1383
1384
// Remainder of things
1385
//TODO USAD8
1386
#define INSN(NAME, decode, decode2) \
1387
void NAME(Register Rd, Register Rn, int lsb, int width, \
1388
Condition cond = C_DFLT) { \
1389
starti; \
1390
assert(lsb >= 0 && lsb < 32, "lsb out of range"); \
1391
assert(width > 0 && width <= 32 - lsb, "width out of range"); \
1392
media_instr(decode, decode2, cond); \
1393
f(width - 1, 20, 16), rf(Rd, 12), f(lsb, 11, 7), rf(Rn, 0); \
1394
}
1395
INSN(sbfx, 0b11010, 0b010);
1396
INSN(ubfx, 0b11110, 0b010);
1397
#undef INSN
1398
1399
void bfi(Register Rd, Register Rn, int lsb, int width, Condition cond = C_DFLT) {
1400
assert(VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7), "unsupported on the cpu");
1401
int msb = lsb + width - 1;
1402
assert(lsb >= 0 && lsb < 32, "lsb out of range");
1403
assert(msb < 32 && msb >= lsb, "width out of range");
1404
starti;
1405
media_instr(0b11100, 0b000, cond);
1406
f(msb, 20, 16), rf(Rd, 12), f(lsb, 11, 7), rf(Rn, 0);
1407
}
1408
1409
void bfc(Register Rd, int lsb, int width, Condition cond = C_DFLT) {
1410
assert(VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7), "unsupported on the cpu");
1411
int msb = lsb + width - 1;
1412
assert(lsb >= 0 && lsb < 32, "lsb out of range");
1413
assert(msb < 32 && msb >= lsb, "width out of range");
1414
starti;
1415
media_instr(0b11100, 0b000, cond);
1416
f(msb, 20, 16), rf(Rd, 12), f(lsb, 11, 7), f(0b1111, 3, 0);
1417
}
1418
1419
//Branch, branch with link, and block data transfer
1420
1421
void block_imm_instr(int decode, int w, Register Rn, unsigned regset,
1422
Condition cond) {
1423
starti;
1424
f(cond, 31, 28), f(0b10, 27, 26), f(decode | (w << 1), 25, 20);
1425
rf(Rn, 16), f(regset, 15, 0);
1426
}
1427
#define INSN(NAME, decode) \
1428
void NAME(Register Rn, unsigned regset, bool wb = true, Condition cond = C_DFLT) { \
1429
block_imm_instr(decode, wb, Rn, regset, cond); \
1430
}
1431
INSN(stmda, 0b000000);
1432
INSN(stmed, 0b000000);
1433
1434
INSN(ldmda, 0b000001);
1435
INSN(ldmfa, 0b000001);
1436
1437
//INSN(stm, 0b001000);
1438
INSN(stmia, 0b001000);
1439
INSN(stmea, 0b001000);
1440
1441
//INSN(ldm, 0b001001);
1442
INSN(ldmia, 0b001001);
1443
INSN(ldmfd, 0b001001);
1444
1445
INSN(stmdb, 0b010000);
1446
INSN(stmfd, 0b010000);
1447
1448
INSN(ldmdb, 0b010001);
1449
INSN(ldmea, 0b010001);
1450
1451
INSN(stmib, 0b011000);
1452
INSN(stmfa, 0b011000);
1453
1454
INSN(ldmib, 0b011001);
1455
INSN(ldmed, 0b011001);
1456
#undef INSN
1457
1458
unsigned count_bits(unsigned val);
1459
bool can_ldst_multiple( unsigned regset, const Address& adr);
1460
1461
//NOTE!! Have repurposed stm and ldm for auto dispatch instructions
1462
#define INSN(NAME, PREFIX) \
1463
void NAME(unsigned regset, const Address& adr, Condition cond = C_DFLT) { \
1464
assert(can_ldst_multiple(regset, adr), "Can't do anything with this!"); \
1465
int offset = adr.offset(); \
1466
switch(adr.get_wb_mode()) { \
1467
case Address::pre: \
1468
if(offset > 0) PREFIX##mib(adr.base(), regset, true, cond); \
1469
else PREFIX##mdb(adr.base(), regset, true, cond); \
1470
break; \
1471
case Address::post: \
1472
if(offset > 0) PREFIX##mia(adr.base(), regset, true, cond); \
1473
else PREFIX##mda(adr.base(), regset, offset != 0, cond); \
1474
break; \
1475
case Address::off: \
1476
if(offset > 0) PREFIX##mib(adr.base(), regset, false, cond); \
1477
else if(!offset) PREFIX##mia(adr.base(), regset, false, cond); \
1478
else PREFIX##mdb(adr.base(), regset, false, cond); \
1479
break; \
1480
default: \
1481
ShouldNotReachHere(); \
1482
} \
1483
}
1484
INSN(ldm, ld);
1485
INSN(stm, st);
1486
#undef INSN
1487
1488
//Made push and pop operate on full descending stacks
1489
#define INSN(NAME, CNAME) \
1490
inline void NAME(unsigned regset, Condition cond = C_DFLT) { \
1491
CNAME(r13, regset, true, cond); \
1492
}
1493
INSN(pop, ldmia);
1494
INSN(push, stmdb);
1495
#undef INSN
1496
1497
public:
1498
1499
#define INSN(NAME, PREFIX, op, op2, a, b, isload) \
1500
void NAME(Register Rt, const Address& adr, Condition cond = C_DFLT) { \
1501
load_store_instr(Rt, adr, op, op2, a, b, cond); \
1502
} \
1503
INSN_INT(NAME, op, op2, a, b, isload);
1504
1505
INSN(ldrd, ld, 0b000, 0b1101, 0, 0, 1);
1506
INSN(strd, st, 0b000, 0b1111, 0, 0, 0);
1507
#undef INSN
1508
#undef INSN_INT
1509
1510
// Branches
1511
1512
// For immediate branches:
1513
// The maximum range of a branch is fixed for the aarch32
1514
// architecture. In debug mode we shrink it in order to test
1515
// trampolines, but not so small that branches in the interpreter
1516
// are out of range.
1517
static const unsigned long branch_range = NOT_DEBUG(32 * M) DEBUG_ONLY(2 * M);
1518
static bool reachable_from_branch_at(address branch, address target) {
1519
return uabs(target - branch) < branch_range;
1520
}
1521
1522
void branch_imm_instr(int decode, address dest, Condition cond) {
1523
starti;
1524
// Correct PC for as it will be when executing this instruction
1525
int offset = (dest - (pc() + 8)) >> 2;
1526
assert(reachable_from_branch_at(pc(), dest), "branch target unreachable");
1527
f(cond, 31, 28), f(decode, 27, 24), sf(offset, 23, 0);
1528
}
1529
1530
void branch_reg_instr(int decode, Register Rm, Condition cond) {
1531
starti;
1532
f(cond, 31, 28), f(0b00010010, 27, 20);
1533
f(0b111111111111, 19, 8), f(decode, 7, 4), rf(Rm, 0);
1534
}
1535
1536
#define INSN(NAME, decode_imm, decode_reg) \
1537
void NAME(Register Rm, Condition cond = C_DFLT) { \
1538
branch_reg_instr(decode_reg, Rm, cond); \
1539
} \
1540
void NAME(address dest, Condition cond = C_DFLT) { \
1541
branch_imm_instr(decode_imm, dest, cond); \
1542
} \
1543
void NAME(Label &L, Condition cond = C_DFLT) { \
1544
wrap_label(L, cond, &Assembler::NAME); \
1545
} \
1546
void NAME(const Address &dest, Condition cond = C_DFLT) { \
1547
code_section()->relocate(pc(), dest.rspec()); \
1548
NAME(dest.target(), cond); \
1549
}
1550
//TODO assert type of address
1551
INSN(b, 0b1010, 0b0001); // B & BX
1552
INSN(bl, 0b1011, 0b0011); // BL & BLX
1553
#undef INSN
1554
1555
1556
//TODO Coprocessor instructions, and Supervisor Call
1557
1558
1559
// Unconditional Instructions
1560
enum barrier {OSHST = 0b0010, OSH,
1561
NSHST = 0b0110, NSH,
1562
ISHST = 0b1010, ISH,
1563
ST = 0b1110, SY};
1564
1565
void sync_instr(int decode, enum barrier option) {
1566
starti;
1567
f(0b11110, 31, 27), f(0b1010111, 26, 20), f(0b111111110000, 19, 8);
1568
f(decode, 7, 4), f(option, 3, 0);
1569
}
1570
void clrex() {
1571
sync_instr(0b0001, SY);
1572
}
1573
void dsb(enum barrier option) {
1574
sync_instr(0b0100, option);
1575
}
1576
void dmb(enum barrier option) {
1577
sync_instr(0b0101, option);
1578
}
1579
void bkpt();
1580
void isb() {
1581
sync_instr(0b0110, SY);
1582
}
1583
1584
// And the relevant instructions for ARMv6.
1585
1586
// MCR<c> <coproc>, <opc1>, <Rt>, <CRn>, <CRm>{, <opc2>}
1587
void mcr(int cpc_dex, int opc1, Register Rt, int cpc_reg_dex1,
1588
int cpc_reg_dex2, int opc2, Condition cond = C_DFLT) {
1589
starti;
1590
f(cond, 31, 28), f(0b1110, 27, 24), f(opc1, 23, 21), f(0, 20);
1591
f(cpc_reg_dex1, 19, 16), rf(Rt, 12), f(cpc_dex, 11, 8);
1592
f(opc2, 7, 5), f(1, 4), f(cpc_reg_dex2, 3, 0);
1593
}
1594
1595
// These instructions do not read the value of the register passed,
1596
// can be any. Chosen r0.
1597
void cp15dmb(Condition cond = C_DFLT) {
1598
mcr(15, 0, r0, 7, 10, 5, cond);
1599
}
1600
1601
void cp15dsb(Condition cond = C_DFLT) {
1602
mcr(15, 0, r0, 7, 10, 4, cond);
1603
}
1604
1605
void cp15isb(Condition cond = C_DFLT) {
1606
mcr(15, 0, r0, 7, 5, 4, cond);
1607
}
1608
1609
enum Membar_mask_bits {
1610
// We can use ISH for a barrier because the ARM ARM says "This
1611
// architecture assumes that all Processing Elements that use the
1612
// same operating system or hypervisor are in the same Inner
1613
// Shareable shareability domain."
1614
StoreStore = ISHST,
1615
LoadStore = ISH, //ISHLD, Changed to
1616
LoadLoad = ISH, //ISHLD,
1617
StoreLoad = ISH,
1618
AnyAny = ISH
1619
};
1620
1621
void mrs(Register Rd, Condition cond = C_DFLT) {
1622
starti;
1623
f(cond, 31, 28), f(0b00010, 27, 23), f(0, 22), f(0b00, 21, 20), f(0b1111, 19, 16);
1624
rf(Rd, 12), f(0b000000000000, 11, 0);
1625
}
1626
1627
void msr(Register Rn, bool nzcvq = true, bool g = true, Condition cond = C_DFLT) {
1628
starti;
1629
f(cond, 31, 28), f(0b00010, 27, 23), f(0, 22), f(0b10, 21, 20);
1630
f(nzcvq ? 1 : 0, 19), f(g ? 1 : 0, 18), f(0b00, 17, 16);
1631
f(0b111100000000, 15, 4), rf(Rn, 0);
1632
}
1633
1634
// Floating point operations
1635
1636
enum fpscr_cond { FP_EQ = 0b0110 << 28,
1637
FP_LT = 0b1000 << 28,
1638
FP_GT = 0b0010 << 28,
1639
FP_UN = 0b0011 << 28,
1640
FP_MASK = 0b1111 << 28 };
1641
1642
void fp_instr_base(bool is64bit, Condition cond) {
1643
f(cond, 31, 28), f(0b1110, 27, 24), f(0b101, 11, 9), f(is64bit, 8), f(0, 4);
1644
}
1645
1646
void fp_rencode(FloatRegister reg, bool is64bit, int base, int bit) {
1647
int reg_val = reg->encoding_nocheck();
1648
if(!is64bit) {
1649
f( reg_val >> 1, base + 3, base);
1650
f( reg_val & 1, bit);
1651
} else {
1652
f( reg_val & 0xf, base + 3, base);
1653
f( reg_val >> 4, bit);
1654
}
1655
}
1656
1657
void fp_instr(int decode, int op, bool is64bit, FloatRegister Rd, FloatRegister Rn,
1658
FloatRegister Rm, Condition cond) {
1659
fp_instr_base(is64bit, cond);
1660
f(decode, 23, 20), f(op, 6);
1661
// Register encoding is a bit involved
1662
// double register passed (see 'd0'-'dN' encoding), not reencode it's number
1663
fp_rencode(Rn, false, 16, 7);
1664
fp_rencode(Rd, false, 12, 22);
1665
fp_rencode(Rm, false, 0, 5);
1666
}
1667
1668
#define INSN(NAME, decode, op, is64bit) \
1669
void NAME(FloatRegister Rd, FloatRegister Rn, FloatRegister Rm, \
1670
Condition cond = C_DFLT) { \
1671
starti; \
1672
fp_instr(decode, op, is64bit, Rd, Rn, Rm, cond); \
1673
}
1674
INSN(vmla_f32, 0b0000, 0, 0);
1675
INSN(vmla_f64, 0b0000, 0, 1);
1676
INSN(vmls_f32, 0b0000, 1, 0);
1677
INSN(vmls_f64, 0b0000, 1, 1);
1678
1679
INSN(vnmla_f32, 0b0001, 1, 0);
1680
INSN(vnmla_f64, 0b0001, 1, 1);
1681
INSN(vnmls_f32, 0b0001, 0, 0);
1682
INSN(vnmls_f64, 0b0001, 0, 1);
1683
INSN(vnmul_f32, 0b0010, 1, 0);
1684
INSN(vnmul_f64, 0b0010, 1, 1);
1685
INSN(vmul_f32, 0b0010, 0, 0);
1686
INSN(vmul_f64, 0b0010, 0, 1);
1687
1688
INSN(vadd_f32, 0b0011, 0, 0);
1689
INSN(vadd_f64, 0b0011, 0, 1);
1690
INSN(vsub_f32, 0b0011, 1, 0);
1691
INSN(vsub_f64, 0b0011, 1, 1);
1692
1693
INSN(vdiv_f32, 0b1000, 0, 0);
1694
INSN(vdiv_f64, 0b1000, 0, 1);
1695
1696
INSN(vfnma_f32, 0b1001, 1, 0);
1697
INSN(vfnma_f64, 0b1001, 1, 1);
1698
INSN(vfnms_f32, 0b1001, 0, 0);
1699
INSN(vfnms_f64, 0b1001, 0, 1);
1700
1701
INSN(vfma_f32, 0b1010, 0, 0);
1702
INSN(vfma_f64, 0b1010, 0, 1);
1703
INSN(vfms_f32, 0b1010, 1, 0);
1704
INSN(vfms_f64, 0b1010, 1, 1);
1705
#undef INSN
1706
1707
1708
void vmov_imm(FloatRegister Rd, unsigned imm, bool is64bit, Condition cond);
1709
void vmov_imm_zero(FloatRegister Rd, bool is64bit, Condition cond);
1710
1711
unsigned encode_float_fp_imm(float imm_f);
1712
1713
void vmov_f32(FloatRegister Rd, float imm, Condition cond = C_DFLT) {
1714
vmov_imm(Rd, encode_float_fp_imm(imm), false, cond);
1715
}
1716
1717
unsigned encode_double_fp_imm(double imm_f);
1718
1719
void vmov_f64(FloatRegister Rd, double imm, Condition cond = C_DFLT) {
1720
bool positive_zero = (imm == 0.0) && !signbit(imm);
1721
if(positive_zero) vmov_imm_zero(Rd, true, cond);
1722
else vmov_imm(Rd, encode_double_fp_imm(imm), true, cond);
1723
}
1724
1725
1726
#define INSN(NAME, decode, op, is64bit) \
1727
void NAME(FloatRegister Rd, FloatRegister Rm, Condition cond = C_DFLT) { \
1728
starti; \
1729
fp_instr_base(is64bit, cond); \
1730
f(0b1011, 23, 20), f(decode, 19, 16), f(op, 7, 6), f(0b00, 5, 4); \
1731
/* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \
1732
fp_rencode(Rd, false, 12, 22); \
1733
fp_rencode(Rm, false, 0, 5); \
1734
}
1735
INSN(vmov_f32, 0b0000, 0b01, 0);
1736
INSN(vmov_f64, 0b0000, 0b01, 1);
1737
INSN(vabs_f32, 0b0000, 0b11, 0);
1738
INSN(vabs_f64, 0b0000, 0b11, 1);
1739
INSN(vneg_f32, 0b0001, 0b01, 0);
1740
INSN(vneg_f64, 0b0001, 0b01, 1);
1741
INSN(vsqrt_f32, 0b0001, 0b11, 0);
1742
INSN(vsqrt_f64, 0b0001, 0b11, 1);
1743
#undef INSN
1744
1745
//ARM -> FP, FP -> ARM
1746
// NOTE - Have only implemented the double precision variant as only operating on
1747
// double registers - can still be used to copy single precision
1748
void vmov64_instr_base(FloatRegister Rm, Register Rt, Register Rt2, int op,
1749
Condition cond) {
1750
starti;
1751
f(cond, 31, 28), f(0b1100010, 27, 21), f(op, 20);
1752
rf(Rt2, 16), rf(Rt, 12), f(0b101100, 11, 6), f(1, 4);
1753
// double register passed (see 'd0'-'dN' encoding), not reencode it's number
1754
fp_rencode(Rm, false, 0, 5);
1755
}
1756
1757
void vmov_f64(FloatRegister Rm, Register Rt, Register Rt2, Condition cond = C_DFLT) {
1758
vmov64_instr_base(Rm, Rt, Rt2, 0, cond);
1759
}
1760
void vmov_f64(Register Rt, Register Rt2, FloatRegister Rm, Condition cond = C_DFLT) {
1761
vmov64_instr_base(Rm, Rt, Rt2, 1, cond);
1762
}
1763
1764
void vmov_f32(FloatRegister Rn, Register Rt, Condition cond = C_DFLT) {
1765
starti;
1766
fp_instr_base(false, cond);
1767
f(0b000, 23, 21), f(0, 20);
1768
rf(Rt, 12), f(0b101000010000, 11, 0);
1769
// double register passed (see 'd0'-'dN' encoding), not reencode it's number
1770
fp_rencode(Rn, false, 16, 7);
1771
}
1772
void vmov_f32(Register Rt, FloatRegister Rn, Condition cond = C_DFLT) {
1773
starti;
1774
fp_instr_base(false, cond);
1775
f(0b000, 23, 21), f(1, 20);
1776
rf(Rt, 12), f(0b101000010000, 11, 0);
1777
// double register passed (see 'd0'-'dN' encoding), not reencode it's number
1778
fp_rencode(Rn, false, 16, 7);
1779
}
1780
1781
// Floating-point comparison
1782
#define INSN(NAME, E, is64bit) \
1783
void NAME(FloatRegister Rd, int imm, Condition cond = C_DFLT) { \
1784
assert(0 == imm, "vector compare can only be with another vector or zero"); \
1785
starti; \
1786
fp_instr_base(is64bit, cond); \
1787
f(0b10110101, 23, 16), f(E, 7), f(0b1000000, 6, 0); \
1788
/* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \
1789
fp_rencode(Rd, false, 12, 22); \
1790
} \
1791
void NAME(FloatRegister Vd, FloatRegister Vm, Condition cond = C_DFLT) { \
1792
starti; \
1793
fp_instr_base(is64bit, cond); \
1794
f(0b10110100, 23, 16), f(E, 7), f(1, 6), f(0, 4); \
1795
/* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \
1796
fp_rencode(Vd, false, 12, 22), fp_rencode(Vm, false, 0, 5); \
1797
}
1798
INSN(vcmpe_f64, 1, 1);
1799
INSN(vcmpe_f32, 1, 0);
1800
INSN( vcmp_f64, 0, 1);
1801
INSN( vcmp_f32, 0, 0);
1802
#undef INSN
1803
1804
//Move FPSCR to ARM register
1805
void vmrs(Register Rt, Condition cond = C_DFLT) {
1806
starti;
1807
f(cond, 31, 28), f(0b111011110001, 27, 16), rf(Rt, 12), f(0b101000010000, 11, 0);
1808
}
1809
1810
//Move ARM register to FPSCR
1811
void vmsr(Register Rt, Condition cond = C_DFLT) {
1812
starti;
1813
f(cond, 31, 28), f(0b111011100001, 27, 16), rf(Rt, 12), f(0b101000010000, 11, 0);
1814
}
1815
1816
// TODO These instructions use round towards zero mode. It is possible
1817
// for the mode to be taken from the FPSCR however it doesn't do it currently
1818
#define INSN(NAME, decode2, b19, op, is64bitRd, is64bitRm, sz) \
1819
void NAME(FloatRegister Rd, FloatRegister Rm, Condition cond = C_DFLT) { \
1820
starti; \
1821
fp_instr_base(sz, cond); \
1822
f(0b1011, 23, 20), f(b19, 19), f(decode2, 18, 16), f(op, 7), f(0b100, 6, 4); \
1823
/* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \
1824
fp_rencode(Rd, false, 12, 22); \
1825
fp_rencode(Rm, false, 0, 5); \
1826
}
1827
INSN(vcvt_s32_f32, 0b101, 1, 1, 0, 0, 0);
1828
INSN(vcvt_s32_f64, 0b101, 1, 1, 0, 1, 1);
1829
INSN(vcvt_u32_f32, 0b100, 1, 1, 0, 0, 0);
1830
INSN(vcvt_u32_f64, 0b100, 1, 1, 0, 1, 1);
1831
1832
INSN(vcvt_f64_s32, 0b000, 1, 1, 1, 0, 1);
1833
INSN(vcvt_f64_u32, 0b000, 1, 0, 1, 0, 1);
1834
INSN(vcvt_f32_s32, 0b000, 1, 1, 0, 0, 0);
1835
INSN(vcvt_f32_u32, 0b000, 1, 0, 0, 0, 0);
1836
1837
INSN(vcvt_f32_f64, 0b111, 0, 1, 0, 1, 1);
1838
INSN(vcvt_f64_f32, 0b111, 0, 1, 1, 0, 0);
1839
#undef INSN
1840
1841
//Vector load/store
1842
private:
1843
void fp_ldst_instr(int decode, bool is64bit, const Address& adr, Condition cond);
1844
public:
1845
1846
#define INSN(NAME, decode, is64bit) \
1847
void NAME(FloatRegister Vd, const Address &adr, Condition cond = C_DFLT) { \
1848
starti; \
1849
fp_ldst_instr(decode, is64bit, adr, cond); \
1850
/* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \
1851
fp_rencode(Vd, false, 12, 22); \
1852
} \
1853
void NAME(FloatRegister Vd, address dest, Condition cond = C_DFLT) { \
1854
long offset = dest - pc(); \
1855
NAME(Vd, Address(r15_pc, offset), cond); \
1856
} \
1857
void NAME(FloatRegister Vd, address dest, relocInfo::relocType rtype, \
1858
Condition cond = C_DFLT) { \
1859
guarantee(rtype == relocInfo::internal_word_type, \
1860
"only internal_word_type relocs make sense here"); \
1861
NAME(Vd, InternalAddress(dest), cond); \
1862
} \
1863
void NAME(FloatRegister Vd, Label &L, Condition cond = C_DFLT) { \
1864
wrap_label(Vd, L, cond, &Assembler::NAME); \
1865
}
1866
INSN(vstr_f64, 0b10000, 1);
1867
INSN(vstr_f32, 0b10000, 0);
1868
INSN(vldr_f64, 0b10001, 1);
1869
INSN(vldr_f32, 0b10001, 0);
1870
#undef INSN
1871
1872
private:
1873
enum fp_mode { ia_wb, ia, db_wb };
1874
void fp_ldst_mul(Register Rn, int regset, bool load, bool is64bit, enum fp_mode mode, Condition cond);
1875
public:
1876
#define INSN(NAME, EXT, is64bit, load) \
1877
inline void NAME##ia##EXT(Register Rn, unsigned regset, bool wb = true, \
1878
Condition cond = C_DFLT) { \
1879
fp_ldst_mul(Rn, regset, load, is64bit, \
1880
(enum fp_mode)( ia_wb + ( wb?0:1 )), cond); \
1881
} \
1882
inline void NAME##db##EXT(Register Rn, unsigned regset, Condition cond = C_DFLT) { \
1883
fp_ldst_mul(Rn, regset, load, is64bit, db_wb, cond); \
1884
}
1885
INSN(vldm, _f32, 0, 1);
1886
INSN(vldm, _f64, 1, 1);
1887
INSN(vstm, _f32, 0, 0);
1888
INSN(vstm, _f64, 1, 0);
1889
#undef INSN
1890
1891
public:
1892
#define INSN(NAME, r) \
1893
inline void NAME(Register Rb, int imm) { \
1894
starti; \
1895
f(0b1111, 31, 28); \
1896
f(0b0101, 27, 24), f(0b01, 21, 20); \
1897
f(0b1111, 15, 12); \
1898
f(imm >= 0 ? 1 : 0, 23); \
1899
f(r, 22); \
1900
rf(Rb, 16); \
1901
f(imm >= 0 ? imm : -imm, 11, 0); \
1902
}
1903
INSN(pld, 1);
1904
INSN(pldw, 0);
1905
#undef INSN
1906
1907
#undef ZERO_ADDR_REG
1908
#undef ONES_ADDR_REG
1909
1910
/* SIMD extensions
1911
*
1912
* We just use FloatRegister in the following. They are exactly the same
1913
* as SIMD registers.
1914
*/
1915
public:
1916
enum SIMD_Align {
1917
ALIGN_STD = 0b00, ALIGN_64 = 0b01, ALIGN_128 = 0b10, ALIGN_256 = 0b11
1918
};
1919
private:
1920
void simd_ld(FloatRegister, unsigned type, unsigned size, unsigned xfer_size,
1921
const Address &addr, enum SIMD_Align align);
1922
public:
1923
#define INSN(NAME, size) \
1924
inline void NAME(FloatRegister Dd, const Address &addr, enum SIMD_Align align) { \
1925
simd_ld(Dd, 0b0111, size, 1, addr, align); \
1926
} \
1927
inline void NAME(FloatRegister Dd, FloatRegister Dd1, const Address &addr, \
1928
enum SIMD_Align align) { \
1929
assert(Dd->successor(FloatRegisterImpl::DOUBLE) == Dd1, "Must be consecutive"); \
1930
simd_ld(Dd, 0b1010, size, 2, addr, align); \
1931
} \
1932
inline void NAME(FloatRegister Dd, FloatRegister Dd1, FloatRegister Dd2, \
1933
const Address &addr, enum SIMD_Align align) { \
1934
assert(Dd->successor(FloatRegisterImpl::DOUBLE) == Dd1, "Must be consecutive"); \
1935
assert(Dd1->successor(FloatRegisterImpl::DOUBLE) == Dd2, "Must be consecutive"); \
1936
simd_ld(Dd, 0b0110, size, 3, addr, align); \
1937
} \
1938
inline void NAME(FloatRegister Dd, FloatRegister Dd1, FloatRegister Dd2, \
1939
FloatRegister Dd3, const Address &addr, enum SIMD_Align align) { \
1940
assert(Dd->successor(FloatRegisterImpl::DOUBLE) == Dd1, "Must be consecutive"); \
1941
assert(Dd1->successor(FloatRegisterImpl::DOUBLE) == Dd2, "Must be consecutive"); \
1942
assert(Dd2->successor(FloatRegisterImpl::DOUBLE) == Dd3, "Must be consecutive"); \
1943
simd_ld(Dd, 0b0010, size, 4, addr, align); \
1944
}
1945
INSN(vld1_8, 0b00);
1946
INSN(vld1_16, 0b01);
1947
INSN(vld1_32, 0b10);
1948
INSN(vld1_64, 0b11);
1949
#undef INSN
1950
1951
private:
1952
void simd_vmov(FloatRegister Dd, unsigned index, Register Rt, bool advsimd,
1953
unsigned index_bits, unsigned bit20, unsigned opc, Condition cond);
1954
public:
1955
#define INSN(NAME, advsimd, opc, index_bits) \
1956
inline void NAME(FloatRegister Rd, unsigned index, Register Rt, \
1957
Condition cond = Assembler::AL) { \
1958
simd_vmov(Rd, index, Rt, advsimd, index_bits, 0, opc, cond); \
1959
}
1960
INSN(vmov_8, true, 0b1000, 2);
1961
INSN(vmov_16, true, 0b0001, 1);
1962
INSN(vmov_32, false, 0b0000, 0);
1963
#undef INSN
1964
#define INSN(NAME, advsimd, opc, index_bits) \
1965
inline void NAME(Register Rt, FloatRegister Rd, unsigned index, \
1966
Condition cond = Assembler::AL) { \
1967
simd_vmov(Rd, index, Rt, advsimd, index_bits, 1, opc, cond); \
1968
}
1969
INSN(vmov_8s, true, 0b01000, 3);
1970
INSN(vmov_16s, true, 0b00001, 2);
1971
INSN(vmov_8u, true, 0b11000, 3);
1972
INSN(vmov_16u, true, 0b10001, 2);
1973
INSN(vmov_32, false, 0b00000, 1);
1974
#undef INSN
1975
1976
private:
1977
void simd_eor(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm, unsigned q);
1978
public:
1979
#define INSN(NAME, q) \
1980
inline void NAME(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm) { \
1981
simd_eor(Dd, Dn, Dm, q); \
1982
}
1983
INSN(veor_64, 0);
1984
INSN(veor_128, 1);
1985
#undef INSN
1986
1987
private:
1988
void simd_vmul(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm,
1989
unsigned bit24, unsigned bit9, unsigned size, unsigned mul, unsigned bit6);
1990
public:
1991
#define INSN(NAME, bit24, bit9, size, mul, bit6) \
1992
inline void NAME(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm) { \
1993
simd_vmul(Dd, Dn, Dm, bit24, bit9, size, mul, bit6); \
1994
}
1995
INSN(vmul_64_8, 0, 0, 0b00, 1, 0);
1996
INSN(vmul_64_16, 0, 0, 0b01, 1, 0);
1997
INSN(vmul_64_32, 0, 0, 0b10, 1, 0);
1998
INSN(vmulp_64_8, 1, 0, 0b00, 1, 0);
1999
INSN(vmul_128_8, 0, 0, 0b00, 1, 1);
2000
INSN(vmul_128_16, 0, 0, 0b01, 1, 1);
2001
INSN(vmul_128_32, 0, 0, 0b10, 1, 1);
2002
INSN(vmulp_128_8, 1, 0, 0b00, 1, 1);
2003
INSN(vmull_8s, 0, 0, 0b00, 0, 0);
2004
INSN(vmull_16s, 0, 0, 0b01, 0, 0);
2005
INSN(vmull_32s, 0, 0, 0b10, 0, 0);
2006
INSN(vmull_8u, 1, 0, 0b00, 0, 0);
2007
INSN(vmull_16u, 1, 0, 0b01, 0, 0);
2008
INSN(vmull_32u, 1, 0, 0b10, 0, 0);
2009
INSN(vmullp_8, 0, 1, 0b00, 0, 0);
2010
#undef INSN
2011
2012
private:
2013
void simd_vuzp(FloatRegister Dd, FloatRegister Dm, unsigned size, unsigned q);
2014
public:
2015
#define INSN(NAME, size, q) \
2016
inline void NAME(FloatRegister Dd, FloatRegister Dm) { \
2017
simd_vuzp(Dd, Dm, size, q); \
2018
}
2019
INSN(vuzp_64_8, 0b00, 0);
2020
INSN(vuzp_64_16, 0b01, 0);
2021
INSN(vuzp_64_32, 0b10, 0);
2022
INSN(vuzp_128_8, 0b00, 1);
2023
INSN(vuzp_128_16, 0b01, 1);
2024
INSN(vuzp_128_32, 0b10, 1);
2025
#undef INSN
2026
2027
private:
2028
void simd_vshl(FloatRegister Dd, FloatRegister Dm, unsigned imm, unsigned size,
2029
unsigned q, unsigned bit24, unsigned encode);
2030
public:
2031
#define INSN(NAME, size, q, bit24, encode, checkDd) \
2032
inline void NAME(FloatRegister Dd, FloatRegister Dm, unsigned imm) { \
2033
assert(!checkDd || (Dd->encoding() & 2) == 0, "Odd register"); \
2034
simd_vshl(Dd, Dm, imm, size, q, bit24, encode); \
2035
}
2036
INSN(vshl_64_8, 3, 0, 0, 0b0101, false);
2037
INSN(vshl_64_16, 4, 0, 0, 0b0101, false);
2038
INSN(vshl_64_32, 5, 0, 0, 0b0101, false);
2039
INSN(vshl_64_64, 6, 0, 0, 0b0101, false);
2040
INSN(vshl_128_8, 3, 1, 0, 0b0101, false);
2041
INSN(vshl_128_16, 4, 1, 0, 0b0101, false);
2042
INSN(vshl_128_32, 5, 1, 0, 0b0101, false);
2043
INSN(vshl_128_64, 6, 1, 0, 0b0101, false);
2044
INSN(vshll_8s, 3, 1, 0, 0b1010, true);
2045
INSN(vshll_8u, 3, 0, 1, 0b1010, true);
2046
INSN(vshll_16s, 4, 0, 0, 0b1010, true);
2047
INSN(vshll_16u, 4, 0, 1, 0b1010, true);
2048
INSN(vshll_32s, 5, 0, 0, 0b1010, true);
2049
INSN(vshll_32u, 5, 0, 1, 0b1010, true);
2050
#undef INSN
2051
2052
private:
2053
void simd_rev(FloatRegister Dd, FloatRegister Dm, unsigned q, unsigned size,
2054
unsigned op);
2055
public:
2056
#define INSN(NAME, q, size, op) \
2057
inline void NAME(FloatRegister Dd, FloatRegister Dm) { \
2058
simd_rev(Dd, Dm, q, size, op); \
2059
}
2060
INSN(vrev16_64_8, 0, 0, 2);
2061
INSN(vrev16_128_8, 1, 0, 2);
2062
INSN(vrev32_64_8, 0, 0, 1);
2063
INSN(vrev32_128_8, 1, 0, 1);
2064
INSN(vrev32_64_16, 0, 1, 1);
2065
INSN(vrev32_128_16, 1, 1, 1);
2066
INSN(vrev64_64_8, 0, 0, 0);
2067
INSN(vrev64_128_8, 1, 0, 0);
2068
INSN(vrev64_64_16, 0, 1, 0);
2069
INSN(vrev64_128_16, 1, 1, 0);
2070
INSN(vrev64_64_32, 0, 2, 0);
2071
INSN(vrev64_128_32, 1, 2, 0);
2072
#undef INSN
2073
2074
private:
2075
void v8_crc32(Register Rd, Register Rn, Register Rm, unsigned size, Condition cond);
2076
public:
2077
#define INSN(NAME, size) \
2078
inline void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \
2079
v8_crc32(Rd, Rn, Rm, size, cond); \
2080
}
2081
INSN(crc32b, 0);
2082
INSN(crc32h, 1);
2083
INSN(crc32w, 2);
2084
#undef INSN
2085
2086
Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
2087
2088
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
2089
Register tmp,
2090
int offset) {
2091
ShouldNotCallThis();
2092
return RegisterOrConstant();
2093
}
2094
2095
// Stack overflow checking
2096
virtual void bang_stack_with_offset(int offset);
2097
2098
// Immediate values checks and transformations
2099
2100
static uint32_t encode_imm12(int imm);
2101
static int decode_imm12(uint32_t imm12);
2102
static bool is_valid_for_imm12(int imm);
2103
2104
static bool is_valid_for_offset_imm(int imm, int nbits) {
2105
return uabs(imm) < (1u << nbits);
2106
}
2107
2108
static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
2109
static bool operand_valid_for_add_sub_immediate(int imm);
2110
static bool operand_valid_for_add_sub_immediate(unsigned imm);
2111
static bool operand_valid_for_add_sub_immediate(unsigned long imm);
2112
static bool operand_valid_for_add_sub_immediate(jlong imm);
2113
static bool operand_valid_for_float_immediate(float imm);
2114
static bool operand_valid_for_double_immediate(double imm);
2115
2116
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
2117
void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
2118
2119
// useful to revert back the effect of post/pre addressing modifications
2120
// applied to the base register
2121
void compensate_addr_offset(const Address &adr, Condition cond) {
2122
compensate_addr_offset(adr.base(), adr.index(), adr.shift(), adr.op() == Address::ADD, cond);
2123
}
2124
void compensate_addr_offset(Register Rd, Register Roff, shift_op shift, bool isAdd, Condition cond) {
2125
shift_op shift_back;
2126
2127
if (shift.is_register()) {
2128
switch (shift.kind()) {
2129
case shift_op::LSL:
2130
case shift_op::LSR:
2131
shift_back = asr(shift.reg());
2132
break;
2133
case shift_op::ASR:
2134
shift_back = lsl(shift.reg());
2135
break;
2136
case shift_op::ROR:
2137
Unimplemented(); // need a temp register here
2138
break;
2139
default:
2140
ShouldNotReachHere();
2141
}
2142
} else {
2143
switch (shift.kind()) {
2144
case shift_op::LSL:
2145
case shift_op::LSR:
2146
shift_back = asr(shift.shift());
2147
break;
2148
case shift_op::ASR:
2149
shift_back = lsl(shift.shift());
2150
break;
2151
case shift_op::ROR:
2152
shift_back = ror(32-shift.shift());
2153
break;
2154
default:
2155
ShouldNotReachHere();
2156
}
2157
}
2158
if (isAdd)
2159
sub(Rd, Rd, Roff, shift_back, cond);
2160
else
2161
add(Rd, Rd, Roff, shift_back, cond);
2162
}
2163
};
2164
2165
inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
2166
Assembler::Membar_mask_bits b) {
2167
return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
2168
}
2169
2170
Instruction_aarch32::~Instruction_aarch32() {
2171
assem->emit();
2172
}
2173
2174
#undef starti
2175
2176
// Invert a condition
2177
inline const Assembler::Condition operator~(const Assembler::Condition cond) {
2178
return Assembler::Condition(int(cond) ^ 1);
2179
}
2180
2181
class BiasedLockingCounters;
2182
2183
extern "C" void das(uint64_t start, int len);
2184
2185
#endif // CPU_AARCH32_VM_ASSEMBLER_AARCH32_HPP
2186
2187