Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/assembler_aarch32.cpp
32285 views
1
/*
2
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* Copyright (c) 2015, Linaro Ltd. All rights reserved.
5
* reserved. DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE
6
* HEADER.
7
*
8
* This code is free software; you can redistribute it and/or modify it
9
* under the terms of the GNU General Public License version 2 only, as
10
* published by the Free Software Foundation.
11
*
12
* This code is distributed in the hope that it will be useful, but WITHOUT
13
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15
* version 2 for more details (a copy is included in the LICENSE file that
16
* accompanied this code).
17
*
18
* You should have received a copy of the GNU General Public License version
19
* 2 along with this work; if not, write to the Free Software Foundation,
20
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21
*
22
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
23
* or visit www.oracle.com if you need additional information or have any
24
* questions.
25
*
26
*/
27
28
#include <stdio.h>
29
#include <sys/types.h>
30
31
#include "precompiled.hpp"
32
#include "asm/assembler.hpp"
33
#include "asm/assembler.inline.hpp"
34
#include "compiler/disassembler.hpp"
35
#include "interpreter/interpreter.hpp"
36
#include "memory/resourceArea.hpp"
37
#include "runtime/interfaceSupport.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "register_aarch32.hpp"
40
#include "vm_version_aarch32.hpp"
41
42
extern "C" void entry(CodeBuffer *cb);
43
44
#define __ _masm.
45
#ifdef PRODUCT
46
#define BLOCK_COMMENT(str) /* nothing */
47
#else
48
#define BLOCK_COMMENT(str) block_comment(str)
49
#endif
50
51
#define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":")
52
53
void entry(CodeBuffer *cb) {
54
55
// {
56
// for (int i = 0; i < 256; i+=16)
57
// {
58
// printf("\"%20.20g\", ", unpack(i));
59
// printf("\"%20.20g\", ", unpack(i+1));
60
// }
61
// printf("\n");
62
// }
63
64
#ifdef ASSERT
65
Assembler _masm(cb);
66
address entry = __ pc();
67
68
// Smoke test for assembler
69
// we're checking the code generation, not applicability of the code to the actual target
70
// so temporarily override the detected cpu to allow emission of all instructions
71
const ProcessorFeatures detected_features = VM_Version::features();
72
VM_Version::features(FT_ALL);
73
74
// BEGIN Generated code -- do not edit
75
// Generated by aarch32-asmtest.py
76
Label back, forth, near, near_post, near_flt, near_post_flt;
77
__ bind(back);
78
79
// ThreeRegSft
80
__ add(r8, r2, r11, ::lsr(10)); // add r8, r2, r11, lsr #10
81
__ adds(r1, r3, r7, ::asr(1), Assembler::EQ); // addEQs r1, r3, r7, asr #1
82
__ eor(r0, r9, r4, ::lsl(5)); // eor r0, r9, r4, lsl #5
83
__ eors(r9, r2, r6, ::rrx(), Assembler::GT); // eorGTs r9, r2, r6, rrx
84
__ sub(r0, r12, lr, ::lsr(0), Assembler::GT); // subGT r0, r12, lr, lsr #0
85
__ subs(r8, r2, r4, ::ror(6), Assembler::EQ); // subEQs r8, r2, r4, ror #6
86
__ rsb(r8, r9, sp, ::lsl(3)); // rsb r8, r9, sp, lsl #3
87
__ rsbs(r8, r0, r4, ::ror(16), Assembler::VS); // rsbVSs r8, r0, r4, ror #16
88
__ add(r9, r5, r1, ::lsr(15), Assembler::LE); // addLE r9, r5, r1, lsr #15
89
__ adds(r1, sp, r6, ::asr(5)); // adds r1, sp, r6, asr #5
90
__ adc(r11, sp, r7, ::asr(1), Assembler::GT); // adcGT r11, sp, r7, asr #1
91
__ adcs(r0, r8, r9, ::lsr(6)); // adcs r0, r8, r9, lsr #6
92
__ sbc(r9, r3, r6, ::ror(5)); // sbc r9, r3, r6, ror #5
93
__ sbcs(r1, sp, r5, ::asr(16), Assembler::HI); // sbcHIs r1, sp, r5, asr #16
94
__ rsc(r8, r2, r6, ::lsl(9), Assembler::CC); // rscCC r8, r2, r6, lsl #9
95
__ rscs(r10, r4, sp, ::ror(14)); // rscs r10, r4, sp, ror #14
96
__ orr(r11, sp, r5, ::lsl(15), Assembler::NE); // orrNE r11, sp, r5, lsl #15
97
__ orrs(r9, r10, r4, ::ror(14)); // orrs r9, r10, r4, ror #14
98
__ bic(r9, sp, r5, ::ror(1)); // bic r9, sp, r5, ror #1
99
__ bics(r0, r2, r7, ::asr(10)); // bics r0, r2, r7, asr #10
100
101
// ThreeRegRSR
102
__ add(sp, r6, r7, ::ror(r7)); // add sp, r6, r7, ror r7
103
__ adds(r4, r12, r6, ::ror(r7), Assembler::HI); // addHIs r4, r12, r6, ror r7
104
__ eor(r5, r6, r7, ::asr(r12), Assembler::LS); // eorLS r5, r6, r7, asr r12
105
__ eors(r8, r5, sp, ::lsl(r4), Assembler::AL); // eorALs r8, r5, sp, lsl r4
106
__ sub(r2, r12, r5, ::asr(r0)); // sub r2, r12, r5, asr r0
107
__ subs(r9, r3, r7, ::lsl(r12), Assembler::HS); // subHSs r9, r3, r7, lsl r12
108
__ rsb(r9, r12, r4, ::lsl(r6), Assembler::GT); // rsbGT r9, r12, r4, lsl r6
109
__ rsbs(r8, r2, r12, ::lsl(r1)); // rsbs r8, r2, r12, lsl r1
110
__ add(r4, r12, sp, ::lsl(sp)); // add r4, r12, sp, lsl sp
111
__ adds(r8, r11, r6, ::ror(sp)); // adds r8, r11, r6, ror sp
112
__ adc(r0, r2, r5, ::lsl(r4), Assembler::NE); // adcNE r0, r2, r5, lsl r4
113
__ adcs(r11, lr, r6, ::asr(r2)); // adcs r11, lr, r6, asr r2
114
__ sbc(r8, r10, lr, ::asr(r3), Assembler::HI); // sbcHI r8, r10, lr, asr r3
115
__ sbcs(r1, r12, r5, ::lsl(r6)); // sbcs r1, r12, r5, lsl r6
116
__ rsc(r4, r5, lr, ::ror(r10), Assembler::VS); // rscVS r4, r5, lr, ror r10
117
__ rscs(r1, r12, sp, ::lsl(r8)); // rscs r1, r12, sp, lsl r8
118
__ orr(r8, r1, r6, ::ror(r0), Assembler::VS); // orrVS r8, r1, r6, ror r0
119
__ orrs(r11, sp, r7, ::ror(r5)); // orrs r11, sp, r7, ror r5
120
__ bic(r4, lr, r6, ::lsl(r2), Assembler::AL); // bicAL r4, lr, r6, lsl r2
121
__ bics(r10, r11, sp, ::lsl(r3)); // bics r10, r11, sp, lsl r3
122
123
// TwoRegImm
124
__ add(r8, sp, (unsigned)268435462U, Assembler::HI); // addHI r8, sp, #268435462
125
__ adds(sp, lr, (unsigned)162529280U); // adds sp, lr, #162529280
126
__ eor(lr, r6, (unsigned)8192000U); // eor lr, r6, #8192000
127
__ eors(r2, r3, (unsigned)292U); // eors r2, r3, #292
128
__ sub(r4, sp, (unsigned)227540992U); // sub r4, sp, #227540992
129
__ subs(r1, lr, (unsigned)33554432U, Assembler::LT); // subLTs r1, lr, #33554432
130
__ rsb(r0, r5, (unsigned)2483027968U); // rsb r0, r5, #2483027968
131
__ rsbs(r8, r4, (unsigned)3080192U, Assembler::LO); // rsbLOs r8, r4, #3080192
132
__ add(r9, r4, (unsigned)2147483648U, Assembler::LT); // addLT r9, r4, #2147483648
133
__ adds(r8, r4, (unsigned)32768U, Assembler::AL); // addALs r8, r4, #32768
134
__ adc(r10, lr, (unsigned)10752U, Assembler::CS); // adcCS r10, lr, #10752
135
__ adcs(r10, r6, (unsigned)774144U); // adcs r10, r6, #774144
136
__ sbc(r2, r12, (unsigned)637534208U); // sbc r2, r12, #637534208
137
__ sbcs(r8, r10, (unsigned)692060160U); // sbcs r8, r10, #692060160
138
__ rsc(sp, r6, (unsigned)7405568U); // rsc sp, r6, #7405568
139
__ rscs(r10, r11, (unsigned)244318208U, Assembler::NE); // rscNEs r10, r11, #244318208
140
__ orr(r3, r7, (unsigned)66846720U, Assembler::VS); // orrVS r3, r7, #66846720
141
__ orrs(r2, r5, (unsigned)1327104U, Assembler::EQ); // orrEQs r2, r5, #1327104
142
__ bic(r8, r1, (unsigned)3744U, Assembler::VS); // bicVS r8, r1, #3744
143
__ bics(r0, r2, (unsigned)2684354560U, Assembler::LO); // bicLOs r0, r2, #2684354560
144
145
// TwoRegSft
146
__ tst(r8, sp, ::lsl(5)); // tst r8, sp, lsl #5
147
__ teq(r6, r7, ::lsr(3)); // teq r6, r7, lsr #3
148
__ cmp(r12, r4, ::ror(2)); // cmp r12, r4, ror #2
149
__ cmn(r5, r7, ::lsl(16), Assembler::LT); // cmnLT r5, r7, lsl #16
150
151
// TwoRegRSR
152
__ tst(r2, lr, ::lsr(r7)); // tst r2, lr, lsr r7
153
__ teq(r0, r2, ::ror(r5), Assembler::CC); // teqCC r0, r2, ror r5
154
__ cmp(lr, r7, ::lsr(r11), Assembler::LS); // cmpLS lr, r7, lsr r11
155
__ cmn(r10, r7, ::lsl(r11), Assembler::VS); // cmnVS r10, r7, lsl r11
156
157
// OneRegImm
158
__ tst(r2, (unsigned)557842432U); // tst r2, #557842432
159
__ teq(lr, (unsigned)7077888U, Assembler::MI); // teqMI lr, #7077888
160
__ cmp(r5, (unsigned)939524096U); // cmp r5, #939524096
161
__ cmn(r7, (unsigned)2147483650U, Assembler::LO); // cmnLO r7, #2147483650
162
163
// Shift op
164
__ lsl(r0, r4, (unsigned)23U); // lsl r0, r4, #23
165
__ lsls(r1, r4, (unsigned)9U); // lsls r1, r4, #9
166
__ lsr(r0, r10, (unsigned)3U); // lsr r0, r10, #3
167
__ lsrs(r0, r10, (unsigned)20U); // lsrs r0, r10, #20
168
__ asr(r1, r9, (unsigned)11U); // asr r1, r9, #11
169
__ asrs(r2, r11, (unsigned)10U, Assembler::VS); // asrVSs r2, r11, #10
170
171
// shift op
172
__ ror(r8, r2, (unsigned)31U, Assembler::CC); // rorCC r8, r2, #31
173
__ rors(r9, r12, (unsigned)8U); // rors r9, r12, #8
174
175
// ThreeRegNon
176
__ ror(r8, lr, r7); // ror r8, lr, r7
177
__ rors(r12, r3, r4); // rors r12, r3, r4
178
__ lsl(r12, sp, lr, Assembler::GT); // lslGT r12, sp, lr
179
__ lsls(r12, sp, r6, Assembler::AL); // lslALs r12, sp, r6
180
__ lsr(r0, r1, r9, Assembler::GT); // lsrGT r0, r1, r9
181
__ lsrs(r11, r3, r12, Assembler::GT); // lsrGTs r11, r3, r12
182
__ asr(r2, r12, r6, Assembler::LE); // asrLE r2, r12, r6
183
__ asrs(r1, r10, r6, Assembler::LT); // asrLTs r1, r10, r6
184
185
// TwoRegNon
186
__ mov(r10, r3); // mov r10, r3
187
__ movs(r0, r9); // movs r0, r9
188
189
// OneRegImm
190
__ mov_i(r3, (unsigned)656U, Assembler::VC); // movVC r3, #656
191
__ movs_i(r4, (unsigned)2064384U); // movs r4, #2064384
192
193
// TwoRegSft
194
__ mov(r12, r6, ::lsr(3)); // mov r12, r6, lsr #3
195
__ movs(r5, sp, ::asr(10), Assembler::VC); // movVCs r5, sp, asr #10
196
197
// TwoRegRSR
198
__ mov(r1, lr, ::ror(r3)); // mov r1, lr, ror r3
199
__ movs(r8, r12, ::ror(r9), Assembler::EQ); // movEQs r8, r12, ror r9
200
201
// OneRegImm16
202
__ movw_i(r11, (unsigned)53041U, Assembler::LO); // movwLO r11, #53041
203
__ movt_i(r9, (unsigned)11255U, Assembler::LO); // movtLO r9, #11255
204
205
// ThreeRegNon
206
__ mul(r1, sp, r5, Assembler::LE); // mulLE r1, sp, r5
207
__ muls(r0, r10, r11); // muls r0, r10, r11
208
209
// FourRegNon
210
__ mla(r0, r3, r12, r7); // mla r0, r3, r12, r7
211
__ mlas(r8, r11, r3, r6, Assembler::EQ); // mlaEQs r8, r11, r3, r6
212
__ umull(lr, r4, r5, r6); // umull lr, r4, r5, r6
213
__ umulls(r0, r4, r6, r7); // umulls r0, r4, r6, r7
214
__ umlal(r8, r0, r11, lr); // umlal r8, r0, r11, lr
215
__ umlals(r11, r4, lr, r7); // umlals r11, r4, lr, r7
216
__ smull(r1, r5, r6, r7, Assembler::HS); // smullHS r1, r5, r6, r7
217
__ smulls(r0, r11, r12, r5, Assembler::MI); // smullMIs r0, r11, r12, r5
218
219
// FourRegNon
220
__ umaal(r8, r9, r2, r5); // umaal r8, r9, r2, r5
221
__ mls(r0, r4, sp, lr, Assembler::EQ); // mlsEQ r0, r4, sp, lr
222
223
// ThreeRegNon
224
__ qadd(r9, r4, sp, Assembler::PL); // qaddPL r9, r4, sp
225
__ qsub(r0, r12, r5, Assembler::MI); // qsubMI r0, r12, r5
226
__ qdadd(r3, r5, r7); // qdadd r3, r5, r7
227
__ qdsub(r9, r2, r4); // qdsub r9, r2, r4
228
229
// FourRegNon
230
__ smlabb(r1, r12, r5, r6); // smlabb r1, r12, r5, r6
231
__ smlabt(r0, r10, r12, r6); // smlabt r0, r10, r12, r6
232
__ smlatb(r8, r1, r3, lr); // smlatb r8, r1, r3, lr
233
__ smlatt(r1, sp, r6, r7); // smlatt r1, sp, r6, r7
234
__ smlawb(r0, r3, r4, r6); // smlawb r0, r3, r4, r6
235
__ smlawt(r11, r4, lr, r7); // smlawt r11, r4, lr, r7
236
__ smlalbb(r0, r10, r6, r7); // smlalbb r0, r10, r6, r7
237
__ smlalbt(r3, r11, r4, lr, Assembler::LS); // smlalbtLS r3, r11, r4, lr
238
__ smlaltb(r8, r11, r3, r12); // smlaltb r8, r11, r3, r12
239
__ smlaltt(r8, r1, r3, r5); // smlaltt r8, r1, r3, r5
240
241
// ThreeRegNon
242
__ smulwb(r2, r12, sp, Assembler::HS); // smulwbHS r2, r12, sp
243
__ smulwt(r8, r12, r6); // smulwt r8, r12, r6
244
__ smulbb(r2, r6, lr, Assembler::GE); // smulbbGE r2, r6, lr
245
__ smulbt(r8, r12, r7); // smulbt r8, r12, r7
246
__ smultb(r10, r3, lr, Assembler::EQ); // smultbEQ r10, r3, lr
247
__ smultt(r0, r3, sp); // smultt r0, r3, sp
248
249
// MemoryOp
250
__ ldr(r10, Address(r7, r9, lsl(), Address::ADD, Address::post)); // ldr r10, [r7], r9
251
__ ldrb(r0, Address(r9, 196)); // ldrb r0, [r9, #196]
252
__ ldrh(lr, Address(r4, r6, lsl(), Address::ADD, Address::pre)); // ldrh lr, [r4, r6]!
253
__ ldrsb(r6, Address(__ pre(r9, 232))); // ldrsb r6, [r9, #232]!
254
__ ldrsh(r2, Address(r1, r1, lsl(), Address::ADD, Address::post)); // ldrsh r2, [r1], r1
255
__ str(r0, Address(r9, r4, lsl(), Address::ADD, Address::post)); // str r0, [r9], r4
256
__ strb(r3, Address(__ pre(r5, 92))); // strb r3, [r5, #92]!
257
__ strh(r2, Address(r8, 160)); // strh r2, [r8, #160]
258
259
// MemoryOp
260
__ ldr(r8, Address(r12, r8, lsl(), Address::ADD, Address::off)); // ldr r8, [r12, r8]
261
__ ldrb(r11, Address(__ post(r10, 16))); // ldrb r11, [r10], #16
262
__ ldrh(r11, Address(r10, r6, lsl(), Address::ADD, Address::off)); // ldrh r11, [r10, r6]
263
__ ldrsb(r5, Address(r11, r10, lsl(), Address::ADD, Address::pre)); // ldrsb r5, [r11, r10]!
264
__ ldrsh(r6, Address(r3, r7, lsl(), Address::ADD, Address::off)); // ldrsh r6, [r3, r7]
265
__ str(r7, Address(sp, r5, lsl(), Address::ADD, Address::pre)); // str r7, [sp, r5]!
266
__ strb(r2, Address(r10)); // strb r2, [r10]
267
__ strh(r6, Address(r4, r3, lsl(), Address::ADD, Address::post)); // strh r6, [r4], r3
268
269
// MemoryOp
270
__ ldr(r10, Address(r12)); // ldr r10, [r12]
271
__ ldrb(r4, Address(__ post(r11, 132))); // ldrb r4, [r11], #132
272
__ ldrh(r9, Address(r9, r12, lsl(), Address::ADD, Address::post)); // ldrh r9, [r9], r12
273
__ ldrsb(r9, Address(__ post(r3, 148))); // ldrsb r9, [r3], #148
274
__ ldrsh(r11, Address(__ pre(r2, 148))); // ldrsh r11, [r2, #148]!
275
__ str(r11, Address(sp, r11, lsl(), Address::ADD, Address::off)); // str r11, [sp, r11]
276
__ strb(r1, Address(sp, r10, lsl(), Address::ADD, Address::off)); // strb r1, [sp, r10]
277
__ strh(r10, Address(lr, r9, lsl(), Address::ADD, Address::post)); // strh r10, [lr], r9
278
279
// MemoryOp
280
__ ldr(r6, Address(r3, r4, lsl(), Address::ADD, Address::pre)); // ldr r6, [r3, r4]!
281
__ ldrb(r4, Address(r6, sp, lsl(), Address::ADD, Address::pre)); // ldrb r4, [r6, sp]!
282
__ ldrh(r6, Address(r7, r10, lsl(), Address::ADD, Address::post)); // ldrh r6, [r7], r10
283
__ ldrsb(r0, Address(r6, r11, lsl(), Address::ADD, Address::pre)); // ldrsb r0, [r6, r11]!
284
__ ldrsh(r10, Address(r6, sp, lsl(), Address::ADD, Address::post)); // ldrsh r10, [r6], sp
285
__ str(r7, Address(r3, r12, lsl(), Address::ADD, Address::off)); // str r7, [r3, r12]
286
__ strb(r3, Address(r8, r1, lsl(), Address::ADD, Address::pre)); // strb r3, [r8, r1]!
287
__ strh(r4, Address(r12, 64)); // strh r4, [r12, #64]
288
289
__ bind(near);
290
291
// LitMemoryOp
292
__ ldr(r1, near); // ldr r1, near
293
__ ldrb(r7, __ pc()); // ldrb r7, .
294
__ ldrh(r2, near); // ldrh r2, near
295
__ ldrsb(r10, __ pc()); // ldrsb r10, .
296
__ ldrsh(lr, near_post); // ldrsh lr, near_post
297
298
// LitMemoryOp
299
__ ldr(r2, __ pc()); // ldr r2, .
300
__ ldrb(r3, __ pc()); // ldrb r3, .
301
__ ldrh(r7, near_post); // ldrh r7, near_post
302
__ ldrsb(sp, __ pc()); // ldrsb sp, .
303
__ ldrsh(r10, near); // ldrsh r10, near
304
305
// LitMemoryOp
306
__ ldr(r5, __ pc()); // ldr r5, .
307
__ ldrb(lr, near_post); // ldrb lr, near_post
308
__ ldrh(r5, near_post); // ldrh r5, near_post
309
__ ldrsb(r6, near); // ldrsb r6, near
310
__ ldrsh(r11, near); // ldrsh r11, near
311
312
// LitMemoryOp
313
__ ldr(r7, near_post); // ldr r7, near_post
314
__ ldrb(r5, near_post); // ldrb r5, near_post
315
__ ldrh(r10, near); // ldrh r10, near
316
__ ldrsb(r6, near_post); // ldrsb r6, near_post
317
__ ldrsh(r9, __ pc()); // ldrsh r9, .
318
319
__ bind(near_post);
320
321
// MemoryRegRegSftOp
322
__ ldr(r0, Address(r0, r10, ::ror(6), Address::ADD, Address::post)); // ldr r0, [r0], r10, ror #6
323
__ ldrb(r3, Address(r8, lr, ::lsl(9), Address::ADD, Address::off)); // ldrb r3, [r8, lr, lsl #9]
324
__ str(r5, Address(sp, r3, ::lsl(15), Address::ADD, Address::off)); // str r5, [sp, r3, lsl #15]
325
__ strb(r9, Address(r9, r5, ::asr(2), Address::ADD, Address::post)); // strb r9, [r9], r5, asr #2
326
327
// MemoryRegRegSftOp
328
__ ldr(r5, Address(r4, r0, ::ror(6), Address::ADD, Address::off)); // ldr r5, [r4, r0, ror #6]
329
__ ldrb(lr, Address(r0, r4, ::lsr(9), Address::ADD, Address::off)); // ldrb lr, [r0, r4, lsr #9]
330
__ str(r5, Address(r12, r12, ::asr(5), Address::ADD, Address::post)); // str r5, [r12], r12, asr #5
331
__ strb(r3, Address(r1, r7, ::ror(12), Address::ADD, Address::pre)); // strb r3, [r1, r7, ror #12]!
332
333
// MemoryRegRegSftOp
334
__ ldr(r6, Address(r2, r3, ::rrx(), Address::ADD, Address::pre)); // ldr r6, [r2, r3, rrx]!
335
__ ldrb(r8, Address(lr, r2, ::asr(16), Address::ADD, Address::pre)); // ldrb r8, [lr, r2, asr #16]!
336
__ str(r6, Address(r3, r6, ::ror(7), Address::ADD, Address::pre)); // str r6, [r3, r6, ror #7]!
337
__ strb(r3, Address(r8, r2, ::lsl(10), Address::ADD, Address::off)); // strb r3, [r8, r2, lsl #10]
338
339
// MemoryRegRegSftOp
340
__ ldr(r11, Address(sp, lr, ::lsl(8), Address::ADD, Address::off)); // ldr r11, [sp, lr, lsl #8]
341
__ ldrb(r10, Address(sp, r12, ::lsl(4), Address::ADD, Address::pre)); // ldrb r10, [sp, r12, lsl #4]!
342
__ str(sp, Address(r9, r2, ::asr(2), Address::ADD, Address::off)); // str sp, [r9, r2, asr #2]
343
__ strb(r7, Address(r11, lr, ::asr(14), Address::ADD, Address::pre)); // strb r7, [r11, lr, asr #14]!
344
345
// LdStOne
346
__ ldrex(r12, r11); // ldrex r12, [r11]
347
__ ldrexb(r4, r12); // ldrexb r4, [r12]
348
__ ldrexh(r11, r11); // ldrexh r11, [r11]
349
350
// LdStTwo
351
__ strex(r1, r7, lr); // strex r1, r7, [lr]
352
__ strexb(r12, r6, r4); // strexb r12, r6, [r4]
353
__ strexh(r4, r6, r7, Assembler::HS); // strexhHS r4, r6, [r7]
354
355
// ThreeRegNon
356
__ sadd16(r3, r4, r7); // sadd16 r3, r4, r7
357
__ sasx(r9, r10, r3, Assembler::AL); // sasxAL r9, r10, r3
358
__ ssax(r12, r5, r6); // ssax r12, r5, r6
359
__ ssub16(r12, r5, lr); // ssub16 r12, r5, lr
360
__ sadd8(r0, r10, r7); // sadd8 r0, r10, r7
361
__ ssub8(r0, r8, r2, Assembler::VS); // ssub8VS r0, r8, r2
362
__ qadd16(r11, r4, r5, Assembler::PL); // qadd16PL r11, r4, r5
363
__ qasx(r11, r3, r12, Assembler::VS); // qasxVS r11, r3, r12
364
__ qsax(r0, r3, r5); // qsax r0, r3, r5
365
__ ssub16(r10, r12, r5, Assembler::AL); // ssub16AL r10, r12, r5
366
__ qadd8(r10, r6, lr, Assembler::CC); // qadd8CC r10, r6, lr
367
__ qsub8(r10, r11, r7); // qsub8 r10, r11, r7
368
__ shadd16(r9, r4, lr, Assembler::PL); // shadd16PL r9, r4, lr
369
__ shasx(r1, lr, r7); // shasx r1, lr, r7
370
__ shsax(r9, r11, r5, Assembler::LO); // shsaxLO r9, r11, r5
371
__ shsub16(r3, r1, r11, Assembler::GE); // shsub16GE r3, r1, r11
372
__ shadd8(sp, r5, r7, Assembler::GT); // shadd8GT sp, r5, r7
373
__ shsub8(r1, r5, r7); // shsub8 r1, r5, r7
374
375
// ThreeRegNon
376
__ uadd16(r10, r4, r7); // uadd16 r10, r4, r7
377
__ uasx(r1, r9, r7, Assembler::HS); // uasxHS r1, r9, r7
378
__ usax(r11, sp, r7); // usax r11, sp, r7
379
__ usub16(r11, r4, lr); // usub16 r11, r4, lr
380
__ uadd8(r2, sp, r7, Assembler::LO); // uadd8LO r2, sp, r7
381
__ usub8(r8, r10, lr, Assembler::GT); // usub8GT r8, r10, lr
382
__ uqadd16(r3, r12, sp); // uqadd16 r3, r12, sp
383
__ uqasx(r4, sp, r6); // uqasx r4, sp, r6
384
__ uqsax(r1, r10, lr); // uqsax r1, r10, lr
385
__ uqsub16(r2, sp, lr, Assembler::LE); // uqsub16LE r2, sp, lr
386
__ uqadd8(r1, r12, r5); // uqadd8 r1, r12, r5
387
__ uqsub8(r0, r4, sp, Assembler::GT); // uqsub8GT r0, r4, sp
388
__ uhadd16(r0, r10, r5, Assembler::HI); // uhadd16HI r0, r10, r5
389
__ uhasx(r11, r4, r7, Assembler::LE); // uhasxLE r11, r4, r7
390
__ uhsax(r1, lr, r9, Assembler::GE); // uhsaxGE r1, lr, r9
391
__ uhsub16(r2, r11, lr); // uhsub16 r2, r11, lr
392
__ uhadd8(r9, r4, r5, Assembler::GE); // uhadd8GE r9, r4, r5
393
__ uhsub8(r2, sp, lr, Assembler::HI); // uhsub8HI r2, sp, lr
394
395
// PKUPSATREV
396
__ sxtab16(r10, r3, r7, ::ror(16)); // sxtab16 r10, r3, r7, ROR #16
397
__ sxtab(r9, r5, r7, ::ror(24), Assembler::CS); // sxtabCS r9, r5, r7, ROR #24
398
__ sxtah(r3, r5, r7, ::ror(8)); // sxtah r3, r5, r7, ROR #8
399
__ uxtab16(r8, r4, r6, ::ror(8), Assembler::AL); // uxtab16AL r8, r4, r6, ROR #8
400
__ uxtab(r0, r11, sp, ::rrx(), Assembler::EQ); // uxtabEQ r0, r11, sp, ROR #0
401
__ uxtah(r9, r12, r5, ::rrx()); // uxtah r9, r12, r5, ROR #0
402
403
// PKUPSATREV
404
__ sxtb16(r3, r11, ::ror(16), Assembler::GE); // sxtb16GE r3, r11, ROR #16
405
__ sxtb(r2, r6, ::rrx(), Assembler::HI); // sxtbHI r2, r6, ROR #0
406
__ sxth(r3, sp, ::ror(24), Assembler::GT); // sxthGT r3, sp, ROR #24
407
__ uxtb16(r12, r5, ::ror(16)); // uxtb16 r12, r5, ROR #16
408
__ uxtb(r12, r5, ::ror(16)); // uxtb r12, r5, ROR #16
409
__ uxth(r8, r5, ::ror(16)); // uxth r8, r5, ROR #16
410
411
// TwoRegNon
412
__ rev(r10, r4, Assembler::EQ); // revEQ r10, r4
413
__ rev16(r8, r12, Assembler::GE); // rev16GE r8, r12
414
__ rbit(lr, r7); // rbit lr, r7
415
__ revsh(sp, r7, Assembler::GT); // revshGT sp, r7
416
417
// ThreeRegNon
418
__ sdiv(r9, sp, lr); // sdiv r9, sp, lr
419
__ udiv(r2, r12, r6); // udiv r2, r12, r6
420
421
// TwoRegTwoImm
422
__ sbfx(r0, r1, (unsigned)20U, (unsigned)3U, Assembler::MI); // sbfxMI r0, r1, #20, #3
423
__ ubfx(r9, r2, (unsigned)16U, (unsigned)15U); // ubfx r9, r2, #16, #15
424
__ bfi(r1, r11, (unsigned)27U, (unsigned)3U, Assembler::HI); // bfiHI r1, r11, #27, #3
425
426
// TwoRegTwoImm
427
__ bfc(r3, (unsigned)7U, (unsigned)10U); // bfc r3, #7, #10
428
429
// MultipleMemOp
430
__ stmda(r6, 3435U, false); // stmda r6, {r0, r1, r3, r5, r6, r8, r10, r11}
431
__ stmed(r4, 14559U, false); // stmed r4, {r0, r1, r2, r3, r4, r6, r7, r11, r12, sp}
432
__ ldmda(r0, 57812U, false); // ldmda r0, {r2, r4, r6, r7, r8, sp, lr, pc}
433
__ ldmfa(r12, 39027U, true); // ldmfa r12!, {r0, r1, r4, r5, r6, r11, r12, pc}
434
__ stmia(r9, 12733U, true); // stmia r9!, {r0, r2, r3, r4, r5, r7, r8, r12, sp}
435
__ stmea(r11, 21955U, false); // stmea r11, {r0, r1, r6, r7, r8, r10, r12, lr}
436
__ ldmia(r12, 48418U, true); // ldmia r12!, {r1, r5, r8, r10, r11, r12, sp, pc}
437
__ ldmfd(sp, 41226U, true); // ldmfd sp!, {r1, r3, r8, sp, pc}
438
__ stmdb(r11, 8729U, true); // stmdb r11!, {r0, r3, r4, r9, sp}
439
__ stmfd(r9, 36309U, true); // stmfd r9!, {r0, r2, r4, r6, r7, r8, r10, r11, pc}
440
__ ldmdb(r5, 24667U, true); // ldmdb r5!, {r0, r1, r3, r4, r6, sp, lr}
441
__ ldmea(r1, 37287U, false); // ldmea r1, {r0, r1, r2, r5, r7, r8, r12, pc}
442
__ stmib(r11, 28266U, true); // stmib r11!, {r1, r3, r5, r6, r9, r10, r11, sp, lr}
443
__ stmfa(r11, 17671U, false); // stmfa r11, {r0, r1, r2, r8, r10, lr}
444
__ ldmib(r0, 21452U, true); // ldmib r0!, {r2, r3, r6, r7, r8, r9, r12, lr}
445
__ ldmed(r1, 11751U, false); // ldmed r1, {r0, r1, r2, r5, r6, r7, r8, r10, r11, sp}
446
447
// BranchLabel
448
__ b(forth, Assembler::CS); // bCS forth
449
__ bl(__ pc(), Assembler::MI); // blMI .
450
451
// OneRegNon
452
__ b(r0, Assembler::VS); // bxVS r0
453
__ bl(r3); // blx r3
454
455
// BranchLabel
456
__ b(__ pc(), Assembler::AL); // bAL .
457
__ bl(__ pc()); // bl .
458
459
// OneRegNon
460
__ b(r0, Assembler::VS); // bxVS r0
461
__ bl(r5); // blx r5
462
463
// BranchLabel
464
__ b(forth, Assembler::LE); // bLE forth
465
__ bl(__ pc(), Assembler::MI); // blMI .
466
467
// OneRegNon
468
__ b(r9, Assembler::NE); // bxNE r9
469
__ bl(r12); // blx r12
470
471
// BranchLabel
472
__ b(back); // b back
473
__ bl(__ pc(), Assembler::HI); // blHI .
474
475
// OneRegNon
476
__ b(r1, Assembler::VC); // bxVC r1
477
__ bl(r7, Assembler::GT); // blxGT r7
478
479
// BranchLabel
480
__ b(back, Assembler::GE); // bGE back
481
__ bl(__ pc(), Assembler::HI); // blHI .
482
483
// OneRegNon
484
__ b(r12); // bx r12
485
__ bl(r7, Assembler::CC); // blxCC r7
486
487
// BranchLabel
488
__ b(__ pc()); // b .
489
__ bl(back, Assembler::GT); // blGT back
490
491
// OneRegNon
492
__ b(r1, Assembler::GE); // bxGE r1
493
__ bl(r0); // blx r0
494
495
// BranchLabel
496
__ b(__ pc()); // b .
497
__ bl(forth); // bl forth
498
499
// OneRegNon
500
__ b(lr, Assembler::GT); // bxGT lr
501
__ bl(r11, Assembler::NE); // blxNE r11
502
503
// BranchLabel
504
__ b(__ pc(), Assembler::CS); // bCS .
505
__ bl(__ pc()); // bl .
506
507
// OneRegNon
508
__ b(r10, Assembler::HS); // bxHS r10
509
__ bl(r4); // blx r4
510
511
// BranchLabel
512
__ b(back, Assembler::AL); // bAL back
513
__ bl(__ pc()); // bl .
514
515
// OneRegNon
516
__ b(r12, Assembler::LO); // bxLO r12
517
__ bl(r8); // blx r8
518
519
// BranchLabel
520
__ b(forth); // b forth
521
__ bl(__ pc()); // bl .
522
523
// OneRegNon
524
__ b(r10); // bx r10
525
__ bl(r1); // blx r1
526
527
// ThreeFltNon
528
__ vmla_f32(f4, f8, f12, Assembler::MI); // vmlaMI.f32 s4, s8, s12
529
__ vmls_f32(f4, f10, f10); // vmls.f32 s4, s10, s10
530
__ vnmla_f32(f2, f10, f12); // vnmla.f32 s2, s10, s12
531
__ vnmls_f32(f8, f6, f8, Assembler::LT); // vnmlsLT.f32 s8, s6, s8
532
__ vnmul_f32(f6, f12, f14, Assembler::MI); // vnmulMI.f32 s6, s12, s14
533
__ vadd_f32(f0, f2, f0); // vadd.f32 s0, s2, s0
534
__ vsub_f32(f2, f4, f10, Assembler::AL); // vsubAL.f32 s2, s4, s10
535
__ vdiv_f32(f0, f2, f12, Assembler::CS); // vdivCS.f32 s0, s2, s12
536
537
// ThreeFltNon
538
__ vmla_f64(d0, d3, d6); // vmla.f64 d0, d3, d6
539
__ vmls_f64(d0, d1, d5); // vmls.f64 d0, d1, d5
540
__ vnmla_f64(d1, d4, d6); // vnmla.f64 d1, d4, d6
541
__ vnmls_f64(d0, d1, d1, Assembler::NE); // vnmlsNE.f64 d0, d1, d1
542
__ vnmul_f64(d3, d5, d5, Assembler::NE); // vnmulNE.f64 d3, d5, d5
543
__ vadd_f64(d0, d2, d4, Assembler::LO); // vaddLO.f64 d0, d2, d4
544
__ vsub_f64(d1, d2, d4); // vsub.f64 d1, d2, d4
545
__ vdiv_f64(d0, d1, d5, Assembler::MI); // vdivMI.f64 d0, d1, d5
546
547
// TwoFltNon
548
__ vabs_f32(f6, f6); // vabs.f32 s6, s6
549
__ vneg_f32(f6, f8, Assembler::PL); // vnegPL.f32 s6, s8
550
__ vsqrt_f32(f0, f8); // vsqrt.f32 s0, s8
551
552
// TwoFltNon
553
__ vabs_f64(d0, d4); // vabs.f64 d0, d4
554
__ vneg_f64(d1, d4); // vneg.f64 d1, d4
555
__ vsqrt_f64(d0, d1); // vsqrt.f64 d0, d1
556
557
// vmov_f32
558
__ vmov_f32(f0, lr, Assembler::PL); // vmovPL.f32 s0, lr
559
560
// vmov_f32
561
__ vmov_f32(r11, f8); // vmov.f32 r11, s8
562
563
// vmov_f64
564
__ vmov_f64(d1, r11, lr, Assembler::LT); // vmovLT.f64 d1, r11, lr
565
566
// vmov_f64
567
__ vmov_f64(r7, r5, d5); // vmov.f64 r7, r5, d5
568
569
// vmov_f32
570
__ vmov_f32(f8, f12); // vmov.f32 s8, s12
571
572
// vmov_f64
573
__ vmov_f64(d1, d2, Assembler::HI); // vmovHI.f64 d1, d2
574
575
// vmov_f32
576
__ vmov_f32(f4, 1.0f, Assembler::VS); // vmovVS.f32 s4, #1.0
577
578
// vmov_f64
579
__ vmov_f64(d2, 1.0); // vmov.f64 d2, #1.0
580
581
// vmov_f32
582
__ vmov_f32(f6, 2.0f); // vmov.f32 s6, #2.0
583
584
// vmov_f64
585
__ vmov_f64(d1, 2.0); // vmov.f64 d1, #2.0
586
587
// vector memory
588
__ vldr_f32(f4, Address(r5, 116)); // vldr.f32 s4, [r5, #116]
589
__ vstr_f32(f2, Address(r1, 56), Assembler::CC); // vstrCC.f32 s2, [r1, #56]
590
591
// vector memory
592
__ vldr_f64(d7, Address(r5, 16), Assembler::NE); // vldrNE.f64 d7, [r5, #16]
593
__ vstr_f64(d6, Address(r1, 228)); // vstr.f64 d6, [r1, #228]
594
595
__ bind(near_flt);
596
597
// vector memory
598
__ vldr_f32(f2, near_post_flt); // vldr.f32 s2, near_post_flt
599
__ vstr_f32(f6, near_post_flt); // vstr.f32 s6, near_post_flt
600
601
// vector memory
602
__ vldr_f64(d2, near_flt, Assembler::LT); // vldrLT.f64 d2, near_flt
603
__ vstr_f64(d3, __ pc(), Assembler::GT); // vstrGT.f64 d3, .
604
605
// vector memory
606
__ vldr_f32(f4, near_post_flt, Assembler::CC); // vldrCC.f32 s4, near_post_flt
607
__ vstr_f32(f0, near_post_flt); // vstr.f32 s0, near_post_flt
608
609
// vector memory
610
__ vldr_f64(d4, near_post_flt, Assembler::GT); // vldrGT.f64 d4, near_post_flt
611
__ vstr_f64(d0, near_flt); // vstr.f64 d0, near_flt
612
613
// vector memory
614
__ vldr_f32(f8, near_post_flt); // vldr.f32 s8, near_post_flt
615
__ vstr_f32(f6, near_post_flt); // vstr.f32 s6, near_post_flt
616
617
// vector memory
618
__ vldr_f64(d4, near_flt, Assembler::PL); // vldrPL.f64 d4, near_flt
619
__ vstr_f64(d5, near_flt); // vstr.f64 d5, near_flt
620
621
// vector memory
622
__ vldr_f32(f8, near_post_flt, Assembler::LS); // vldrLS.f32 s8, near_post_flt
623
__ vstr_f32(f12, __ pc(), Assembler::CC); // vstrCC.f32 s12, .
624
625
// vector memory
626
__ vldr_f64(d6, near_post_flt, Assembler::AL); // vldrAL.f64 d6, near_post_flt
627
__ vstr_f64(d1, near_post_flt, Assembler::LT); // vstrLT.f64 d1, near_post_flt
628
629
__ bind(near_post_flt);
630
631
// FltMultMemOp
632
__ vldmia_f32(r1, FloatRegSet::of(f4).bits(), false); // vldmia.f32 r1, {s4}
633
__ vstmia_f32(r6, FloatRegSet::of(f4).bits(), true, Assembler::CS); // vstmiaCS.f32 r6!, {s4}
634
635
// DblMultMemOp
636
__ vldmia_f64(r9, DoubleFloatRegSet::of(d1, d2, d3, d4).bits(), true); // vldmia.f64 r9!, {d1, d2, d3, d4}
637
__ vstmia_f64(r3, DoubleFloatRegSet::of(d6, d7).bits(), true); // vstmia.f64 r3!, {d6, d7}
638
639
// FltMultMemOp
640
__ vldmdb_f32(r2, FloatRegSet::of(f6).bits(), Assembler::VS); // vldmdbVS.f32 r2!, {s6}
641
__ vstmdb_f32(r6, FloatRegSet::of(f14).bits()); // vstmdb.f32 r6!, {s14}
642
643
// DblMultMemOp
644
__ vldmdb_f64(sp, DoubleFloatRegSet::of(d4, d5, d6, d7).bits()); // vldmdb.f64 sp!, {d4, d5, d6, d7}
645
__ vstmdb_f64(r0, DoubleFloatRegSet::of(d5, d6, d7).bits()); // vstmdb.f64 r0!, {d5, d6, d7}
646
647
// vcmp_f32
648
__ vcmp_f32(f2, f2); // vcmp.f32 s2, s2
649
650
// vcmpe_f32
651
__ vcmpe_f32(f8, f8, Assembler::VC); // vcmpeVC.f32 s8, s8
652
653
// vcmp_f64
654
__ vcmp_f64(d0, d6); // vcmp.f64 d0, d6
655
656
// vcmpe_f64
657
__ vcmpe_f64(d3, d7, Assembler::GE); // vcmpeGE.f64 d3, d7
658
659
// vcmp_f32
660
__ vcmp_f32(f2, 0.0f, Assembler::LT); // vcmpLT.f32 s2, #0.0
661
662
// vcmpe_f32
663
__ vcmpe_f32(f14, 0.0f, Assembler::GT); // vcmpeGT.f32 s14, #0.0
664
665
// vcmp_f64
666
__ vcmp_f64(d4, 0.0); // vcmp.f64 d4, #0.0
667
668
// vcmpe_f64
669
__ vcmpe_f64(d1, 0.0); // vcmpe.f64 d1, #0.0
670
671
// vcvt
672
__ vcvt_s32_f32(f2, f6, Assembler::VS); // vcvtVS.s32.f32 s2, s6
673
__ vcvt_u32_f32(f6, f14, Assembler::GT); // vcvtGT.u32.f32 s6, s14
674
__ vcvt_f32_s32(f0, f2, Assembler::CC); // vcvtCC.f32.s32 s0, s2
675
__ vcvt_f32_u32(f2, f4, Assembler::CC); // vcvtCC.f32.u32 s2, s4
676
677
// vcvt
678
__ vcvt_s32_f64(f4, d4, Assembler::HI); // vcvtHI.s32.f64 s4, d4
679
__ vcvt_u32_f64(f6, d6, Assembler::HI); // vcvtHI.u32.f64 s6, d6
680
__ vcvt_f32_f64(f6, d7, Assembler::LS); // vcvtLS.f32.f64 s6, d7
681
682
// vcvt
683
__ vcvt_f64_s32(d3, f8); // vcvt.f64.s32 d3, s8
684
__ vcvt_f64_u32(d5, f14, Assembler::EQ); // vcvtEQ.f64.u32 d5, s14
685
__ vcvt_f64_f32(d4, f10, Assembler::AL); // vcvtAL.f64.f32 d4, s10
686
687
// BKPT
688
__ bkpt((unsigned)26U); // bkpt #26
689
690
__ bind(forth);
691
692
/*
693
aarch32ops.o: file format elf32-littlearm
694
695
696
Disassembly of section .text:
697
698
00000000 <back>:
699
0: e082852b add r8, r2, fp, lsr #10
700
4: 009310c7 addseq r1, r3, r7, asr #1
701
8: e0290284 eor r0, r9, r4, lsl #5
702
c: c0329066 eorsgt r9, r2, r6, rrx
703
10: c04c000e subgt r0, ip, lr
704
14: 00528364 subseq r8, r2, r4, ror #6
705
18: e069818d rsb r8, r9, sp, lsl #3
706
1c: 60708864 rsbsvs r8, r0, r4, ror #16
707
20: d08597a1 addle r9, r5, r1, lsr #15
708
24: e09d12c6 adds r1, sp, r6, asr #5
709
28: c0adb0c7 adcgt fp, sp, r7, asr #1
710
2c: e0b80329 adcs r0, r8, r9, lsr #6
711
30: e0c392e6 sbc r9, r3, r6, ror #5
712
34: 80dd1845 sbcshi r1, sp, r5, asr #16
713
38: 30e28486 rsccc r8, r2, r6, lsl #9
714
3c: e0f4a76d rscs sl, r4, sp, ror #14
715
40: 118db785 orrne fp, sp, r5, lsl #15
716
44: e19a9764 orrs r9, sl, r4, ror #14
717
48: e1cd90e5 bic r9, sp, r5, ror #1
718
4c: e1d20547 bics r0, r2, r7, asr #10
719
50: e086d777 add sp, r6, r7, ror r7
720
54: 809c4776 addshi r4, ip, r6, ror r7
721
58: 90265c57 eorls r5, r6, r7, asr ip
722
5c: e035841d eors r8, r5, sp, lsl r4
723
60: e04c2055 sub r2, ip, r5, asr r0
724
64: 20539c17 subscs r9, r3, r7, lsl ip
725
68: c06c9614 rsbgt r9, ip, r4, lsl r6
726
6c: e072811c rsbs r8, r2, ip, lsl r1
727
70: e08c4d1d add r4, ip, sp, lsl sp
728
74: e09b8d76 adds r8, fp, r6, ror sp
729
78: 10a20415 adcne r0, r2, r5, lsl r4
730
7c: e0beb256 adcs fp, lr, r6, asr r2
731
80: 80ca835e sbchi r8, sl, lr, asr r3
732
84: e0dc1615 sbcs r1, ip, r5, lsl r6
733
88: 60e54a7e rscvs r4, r5, lr, ror sl
734
8c: e0fc181d rscs r1, ip, sp, lsl r8
735
90: 61818076 orrvs r8, r1, r6, ror r0
736
94: e19db577 orrs fp, sp, r7, ror r5
737
98: e1ce4216 bic r4, lr, r6, lsl r2
738
9c: e1dba31d bics sl, fp, sp, lsl r3
739
a0: 828d8261 addhi r8, sp, #268435462 ; 0x10000006
740
a4: e29ed69b adds sp, lr, #162529280 ; 0x9b00000
741
a8: e226e87d eor lr, r6, #8192000 ; 0x7d0000
742
ac: e2332f49 eors r2, r3, #292 ; 0x124
743
b0: e24d46d9 sub r4, sp, #227540992 ; 0xd900000
744
b4: b25e1402 subslt r1, lr, #33554432 ; 0x2000000
745
b8: e2650325 rsb r0, r5, #-1811939328 ; 0x94000000
746
bc: 3274882f rsbscc r8, r4, #3080192 ; 0x2f0000
747
c0: b2849102 addlt r9, r4, #-2147483648 ; 0x80000000
748
c4: e2948902 adds r8, r4, #32768 ; 0x8000
749
c8: 22aeac2a adccs sl, lr, #10752 ; 0x2a00
750
cc: e2b6aabd adcs sl, r6, #774144 ; 0xbd000
751
d0: e2cc2426 sbc r2, ip, #637534208 ; 0x26000000
752
d4: e2da85a5 sbcs r8, sl, #692060160 ; 0x29400000
753
d8: e2e6d871 rsc sp, r6, #7405568 ; 0x710000
754
dc: 12fba6e9 rscsne sl, fp, #244318208 ; 0xe900000
755
e0: 638737ff orrvs r3, r7, #66846720 ; 0x3fc0000
756
e4: 03952951 orrseq r2, r5, #1327104 ; 0x144000
757
e8: 63c18eea bicvs r8, r1, #3744 ; 0xea0
758
ec: 33d2020a bicscc r0, r2, #-1610612736 ; 0xa0000000
759
f0: e118028d tst r8, sp, lsl #5
760
f4: e13601a7 teq r6, r7, lsr #3
761
f8: e15c0164 cmp ip, r4, ror #2
762
fc: b1750807 cmnlt r5, r7, lsl #16
763
100: e112073e tst r2, lr, lsr r7
764
104: 31300572 teqcc r0, r2, ror r5
765
108: 915e0b37 cmpls lr, r7, lsr fp
766
10c: 617a0b17 cmnvs sl, r7, lsl fp
767
110: e3120585 tst r2, #557842432 ; 0x21400000
768
114: 433e071b teqmi lr, #7077888 ; 0x6c0000
769
118: e355030e cmp r5, #939524096 ; 0x38000000
770
11c: 3377010a cmncc r7, #-2147483646 ; 0x80000002
771
120: e1a00b84 lsl r0, r4, #23
772
124: e1b01484 lsls r1, r4, #9
773
128: e1a001aa lsr r0, sl, #3
774
12c: e1b00a2a lsrs r0, sl, #20
775
130: e1a015c9 asr r1, r9, #11
776
134: 61b0254b asrsvs r2, fp, #10
777
138: 31a08fe2 rorcc r8, r2, #31
778
13c: e1b0946c rors r9, ip, #8
779
140: e1a0877e ror r8, lr, r7
780
144: e1b0c473 rors ip, r3, r4
781
148: c1a0ce1d lslgt ip, sp, lr
782
14c: e1b0c61d lsls ip, sp, r6
783
150: c1a00931 lsrgt r0, r1, r9
784
154: c1b0bc33 lsrsgt fp, r3, ip
785
158: d1a0265c asrle r2, ip, r6
786
15c: b1b0165a asrslt r1, sl, r6
787
160: e1a0a003 mov sl, r3
788
164: e1b00009 movs r0, r9
789
168: 73a03e29 movvc r3, #656 ; 0x290
790
16c: e3b0497e movs r4, #2064384 ; 0x1f8000
791
170: e1a0c1a6 lsr ip, r6, #3
792
174: 71b0554d asrsvc r5, sp, #10
793
178: e1a0137e ror r1, lr, r3
794
17c: 01b0897c rorseq r8, ip, r9
795
180: 330cbf31 movwcc fp, #53041 ; 0xcf31
796
184: 33429bf7 movtcc r9, #11255 ; 0x2bf7
797
188: d001059d mulle r1, sp, r5
798
18c: e0100b9a muls r0, sl, fp
799
190: e0207c93 mla r0, r3, ip, r7
800
194: 0038639b mlaseq r8, fp, r3, r6
801
198: e084e695 umull lr, r4, r5, r6
802
19c: e0940796 umulls r0, r4, r6, r7
803
1a0: e0a08e9b umlal r8, r0, fp, lr
804
1a4: e0b4b79e umlals fp, r4, lr, r7
805
1a8: 20c51796 smullcs r1, r5, r6, r7
806
1ac: 40db059c smullsmi r0, fp, ip, r5
807
1b0: e0498592 umaal r8, r9, r2, r5
808
1b4: 0060ed94 mlseq r0, r4, sp, lr
809
1b8: 510d9054 qaddpl r9, r4, sp
810
1bc: 4125005c qsubmi r0, ip, r5
811
1c0: e1473055 qdadd r3, r5, r7
812
1c4: e1649052 qdsub r9, r2, r4
813
1c8: e101658c smlabb r1, ip, r5, r6
814
1cc: e1006cca smlabt r0, sl, ip, r6
815
1d0: e108e3a1 smlatb r8, r1, r3, lr
816
1d4: e10176ed smlatt r1, sp, r6, r7
817
1d8: e1206483 smlawb r0, r3, r4, r6
818
1dc: e12b7ec4 smlawt fp, r4, lr, r7
819
1e0: e14a0786 smlalbb r0, sl, r6, r7
820
1e4: 914b3ec4 smlalbtls r3, fp, r4, lr
821
1e8: e14b8ca3 smlaltb r8, fp, r3, ip
822
1ec: e14185e3 smlaltt r8, r1, r3, r5
823
1f0: 21220dac smulwbcs r2, ip, sp
824
1f4: e12806ec smulwt r8, ip, r6
825
1f8: a1620e86 smulbbge r2, r6, lr
826
1fc: e16807cc smulbt r8, ip, r7
827
200: 016a0ea3 smultbeq sl, r3, lr
828
204: e1600de3 smultt r0, r3, sp
829
208: e697a009 ldr sl, [r7], r9
830
20c: e5d900c4 ldrb r0, [r9, #196] ; 0xc4
831
210: e1b4e0b6 ldrh lr, [r4, r6]!
832
214: e1f96ed8 ldrsb r6, [r9, #232]! ; 0xe8
833
218: e09120f1 ldrsh r2, [r1], r1
834
21c: e6890004 str r0, [r9], r4
835
220: e5e5305c strb r3, [r5, #92]! ; 0x5c
836
224: e1c82ab0 strh r2, [r8, #160] ; 0xa0
837
228: e79c8008 ldr r8, [ip, r8]
838
22c: e4dab010 ldrb fp, [sl], #16
839
230: e19ab0b6 ldrh fp, [sl, r6]
840
234: e1bb50da ldrsb r5, [fp, sl]!
841
238: e19360f7 ldrsh r6, [r3, r7]
842
23c: e7ad7005 str r7, [sp, r5]!
843
240: e5ca2000 strb r2, [sl]
844
244: e08460b3 strh r6, [r4], r3
845
248: e59ca000 ldr sl, [ip]
846
24c: e4db4084 ldrb r4, [fp], #132 ; 0x84
847
250: e09990bc ldrh r9, [r9], ip
848
254: e0d399d4 ldrsb r9, [r3], #148 ; 0x94
849
258: e1f2b9f4 ldrsh fp, [r2, #148]! ; 0x94
850
25c: e78db00b str fp, [sp, fp]
851
260: e7cd100a strb r1, [sp, sl]
852
264: e08ea0b9 strh sl, [lr], r9
853
268: e7b36004 ldr r6, [r3, r4]!
854
26c: e7f6400d ldrb r4, [r6, sp]!
855
270: e09760ba ldrh r6, [r7], sl
856
274: e1b600db ldrsb r0, [r6, fp]!
857
278: e096a0fd ldrsh sl, [r6], sp
858
27c: e783700c str r7, [r3, ip]
859
280: e7e83001 strb r3, [r8, r1]!
860
284: e1cc44b0 strh r4, [ip, #64] ; 0x40
861
862
00000288 <near>:
863
288: e51f1008 ldr r1, [pc, #-8] ; 288 <near>
864
28c: e55f7008 ldrb r7, [pc, #-8] ; 28c <near+0x4>
865
290: e15f21b0 ldrh r2, [pc, #-16] ; 288 <near>
866
294: e15fa0d8 ldrsb sl, [pc, #-8] ; 294 <near+0xc>
867
298: e1dfe3f8 ldrsh lr, [pc, #56] ; 2d8 <near_post>
868
29c: e51f2008 ldr r2, [pc, #-8] ; 29c <near+0x14>
869
2a0: e55f3008 ldrb r3, [pc, #-8] ; 2a0 <near+0x18>
870
2a4: e1df72bc ldrh r7, [pc, #44] ; 2d8 <near_post>
871
2a8: e15fd0d8 ldrsb sp, [pc, #-8] ; 2a8 <near+0x20>
872
2ac: e15fa2fc ldrsh sl, [pc, #-44] ; 288 <near>
873
2b0: e51f5008 ldr r5, [pc, #-8] ; 2b0 <near+0x28>
874
2b4: e5dfe01c ldrb lr, [pc, #28] ; 2d8 <near_post>
875
2b8: e1df51b8 ldrh r5, [pc, #24] ; 2d8 <near_post>
876
2bc: e15f63dc ldrsb r6, [pc, #-60] ; 288 <near>
877
2c0: e15fb4f0 ldrsh fp, [pc, #-64] ; 288 <near>
878
2c4: e59f700c ldr r7, [pc, #12] ; 2d8 <near_post>
879
2c8: e5df5008 ldrb r5, [pc, #8] ; 2d8 <near_post>
880
2cc: e15fa4bc ldrh sl, [pc, #-76] ; 288 <near>
881
2d0: e1df60d0 ldrsb r6, [pc] ; 2d8 <near_post>
882
2d4: e15f90f8 ldrsh r9, [pc, #-8] ; 2d4 <near+0x4c>
883
884
000002d8 <near_post>:
885
2d8: e690036a ldr r0, [r0], sl, ror #6
886
2dc: e7d8348e ldrb r3, [r8, lr, lsl #9]
887
2e0: e78d5783 str r5, [sp, r3, lsl #15]
888
2e4: e6c99145 strb r9, [r9], r5, asr #2
889
2e8: e7945360 ldr r5, [r4, r0, ror #6]
890
2ec: e7d0e4a4 ldrb lr, [r0, r4, lsr #9]
891
2f0: e68c52cc str r5, [ip], ip, asr #5
892
2f4: e7e13667 strb r3, [r1, r7, ror #12]!
893
2f8: e7b26063 ldr r6, [r2, r3, rrx]!
894
2fc: e7fe8842 ldrb r8, [lr, r2, asr #16]!
895
300: e7a363e6 str r6, [r3, r6, ror #7]!
896
304: e7c83502 strb r3, [r8, r2, lsl #10]
897
308: e79db40e ldr fp, [sp, lr, lsl #8]
898
30c: e7fda20c ldrb sl, [sp, ip, lsl #4]!
899
310: e789d142 str sp, [r9, r2, asr #2]
900
314: e7eb774e strb r7, [fp, lr, asr #14]!
901
318: e19bcf9f ldrex r12, [fp]
902
31c: e1dc4f9f ldrexb r4, [ip]
903
320: e1fbbf9f ldrexh fp, [fp]
904
324: e18e1f97 strex r1, r7, [lr]
905
328: e1c4cf96 strexb ip, r6, [r4]
906
32c: 21e74f96 strexhcs r4, r6, [r7]
907
330: e6143f17 sadd16 r3, r4, r7
908
334: e61a9f33 sasx r9, sl, r3
909
338: e615cf56 ssax ip, r5, r6
910
33c: e615cf7e ssub16 ip, r5, lr
911
340: e61a0f97 sadd8 r0, sl, r7
912
344: 66180ff2 ssub8vs r0, r8, r2
913
348: 5624bf15 qadd16pl fp, r4, r5
914
34c: 6623bf3c qasxvs fp, r3, ip
915
350: e6230f55 qsax r0, r3, r5
916
354: e61caf75 ssub16 sl, ip, r5
917
358: 3626af9e qadd8cc sl, r6, lr
918
35c: e62baff7 qsub8 sl, fp, r7
919
360: 56349f1e shadd16pl r9, r4, lr
920
364: e63e1f37 shasx r1, lr, r7
921
368: 363b9f55 shsaxcc r9, fp, r5
922
36c: a6313f7b shsub16ge r3, r1, fp
923
370: c635df97 shadd8gt sp, r5, r7
924
374: e6351ff7 shsub8 r1, r5, r7
925
378: e654af17 uadd16 sl, r4, r7
926
37c: 26591f37 uasxcs r1, r9, r7
927
380: e65dbf57 usax fp, sp, r7
928
384: e654bf7e usub16 fp, r4, lr
929
388: 365d2f97 uadd8cc r2, sp, r7
930
38c: c65a8ffe usub8gt r8, sl, lr
931
390: e66c3f1d uqadd16 r3, ip, sp
932
394: e66d4f36 uqasx r4, sp, r6
933
398: e66a1f5e uqsax r1, sl, lr
934
39c: d66d2f7e uqsub16le r2, sp, lr
935
3a0: e66c1f95 uqadd8 r1, ip, r5
936
3a4: c6640ffd uqsub8gt r0, r4, sp
937
3a8: 867a0f15 uhadd16hi r0, sl, r5
938
3ac: d674bf37 uhasxle fp, r4, r7
939
3b0: a67e1f59 uhsaxge r1, lr, r9
940
3b4: e67b2f7e uhsub16 r2, fp, lr
941
3b8: a6749f95 uhadd8ge r9, r4, r5
942
3bc: 867d2ffe uhsub8hi r2, sp, lr
943
3c0: e683a877 sxtab16 sl, r3, r7, ror #16
944
3c4: 26a59c77 sxtabcs r9, r5, r7, ror #24
945
3c8: e6b53477 sxtah r3, r5, r7, ror #8
946
3cc: e6c48476 uxtab16 r8, r4, r6, ror #8
947
3d0: 06eb007d uxtabeq r0, fp, sp
948
3d4: e6fc9075 uxtah r9, ip, r5
949
3d8: a68f387b sxtb16ge r3, fp, ror #16
950
3dc: 86af2076 sxtbhi r2, r6
951
3e0: c6bf3c7d sxthgt r3, sp, ror #24
952
3e4: e6cfc875 uxtb16 ip, r5, ror #16
953
3e8: e6efc875 uxtb ip, r5, ror #16
954
3ec: e6ff8875 uxth r8, r5, ror #16
955
3f0: 06bfaf34 reveq sl, r4
956
3f4: a6bf8fbc rev16ge r8, ip
957
3f8: e6ffef37 rbit lr, r7
958
3fc: c6ffdfb7 revshgt sp, r7
959
400: e719fe1d sdiv r9, sp, lr
960
404: e732f61c udiv r2, ip, r6
961
408: 47a20a51 sbfxmi r0, r1, #20, #3
962
40c: e7ee9852 ubfx r9, r2, #16, #15
963
410: 87dd1d9b bfihi r1, fp, #27, #3
964
414: e7d0339f bfc r3, #7, #10
965
418: e8060d6b stmda r6, {r0, r1, r3, r5, r6, r8, sl, fp}
966
41c: e80438df stmda r4, {r0, r1, r2, r3, r4, r6, r7, fp, ip, sp}
967
420: e810e1d4 ldmda r0, {r2, r4, r6, r7, r8, sp, lr, pc}
968
424: e83c9873 ldmda ip!, {r0, r1, r4, r5, r6, fp, ip, pc}
969
428: e8a931bd stmia r9!, {r0, r2, r3, r4, r5, r7, r8, ip, sp}
970
42c: e88b55c3 stm fp, {r0, r1, r6, r7, r8, sl, ip, lr}
971
430: e8bcbd22 ldm ip!, {r1, r5, r8, sl, fp, ip, sp, pc}
972
434: e8bda10a pop {r1, r3, r8, sp, pc}
973
438: e92b2219 stmdb fp!, {r0, r3, r4, r9, sp}
974
43c: e9298dd5 stmdb r9!, {r0, r2, r4, r6, r7, r8, sl, fp, pc}
975
440: e935605b ldmdb r5!, {r0, r1, r3, r4, r6, sp, lr}
976
444: e91191a7 ldmdb r1, {r0, r1, r2, r5, r7, r8, ip, pc}
977
448: e9ab6e6a stmib fp!, {r1, r3, r5, r6, r9, sl, fp, sp, lr}
978
44c: e98b4507 stmib fp, {r0, r1, r2, r8, sl, lr}
979
450: e9b053cc ldmib r0!, {r2, r3, r6, r7, r8, r9, ip, lr}
980
454: e9912de7 ldmib r1, {r0, r1, r2, r5, r6, r7, r8, sl, fp, sp}
981
458: 2a000075 bcs 634 <forth>
982
45c: 4bfffffe blmi 45c <near_post+0x184>
983
460: 612fff10 bxvs r0
984
464: e12fff33 blx r3
985
468: eafffffe b 468 <near_post+0x190>
986
46c: ebfffffe bl 46c <near_post+0x194>
987
470: 612fff10 bxvs r0
988
474: e12fff35 blx r5
989
478: da00006d ble 634 <forth>
990
47c: 4bfffffe blmi 47c <near_post+0x1a4>
991
480: 112fff19 bxne r9
992
484: e12fff3c blx ip
993
488: eafffedc b 0 <back>
994
48c: 8bfffffe blhi 48c <near_post+0x1b4>
995
490: 712fff11 bxvc r1
996
494: c12fff37 blxgt r7
997
498: aafffed8 bge 0 <back>
998
49c: 8bfffffe blhi 49c <near_post+0x1c4>
999
4a0: e12fff1c bx ip
1000
4a4: 312fff37 blxcc r7
1001
4a8: eafffffe b 4a8 <near_post+0x1d0>
1002
4ac: cbfffed3 blgt 0 <back>
1003
4b0: a12fff11 bxge r1
1004
4b4: e12fff30 blx r0
1005
4b8: eafffffe b 4b8 <near_post+0x1e0>
1006
4bc: eb00005c bl 634 <forth>
1007
4c0: c12fff1e bxgt lr
1008
4c4: 112fff3b blxne fp
1009
4c8: 2afffffe bcs 4c8 <near_post+0x1f0>
1010
4cc: ebfffffe bl 4cc <near_post+0x1f4>
1011
4d0: 212fff1a bxcs sl
1012
4d4: e12fff34 blx r4
1013
4d8: eafffec8 b 0 <back>
1014
4dc: ebfffffe bl 4dc <near_post+0x204>
1015
4e0: 312fff1c bxcc ip
1016
4e4: e12fff38 blx r8
1017
4e8: ea000051 b 634 <forth>
1018
4ec: ebfffffe bl 4ec <near_post+0x214>
1019
4f0: e12fff1a bx sl
1020
4f4: e12fff31 blx r1
1021
4f8: 4e042a06 vmlami.f32 s4, s8, s12
1022
4fc: ee052a45 vmls.f32 s4, s10, s10
1023
500: ee151a46 vnmla.f32 s2, s10, s12
1024
504: be134a04 vnmlslt.f32 s8, s6, s8
1025
508: 4e263a47 vnmulmi.f32 s6, s12, s14
1026
50c: ee310a00 vadd.f32 s0, s2, s0
1027
510: ee321a45 vsub.f32 s2, s4, s10
1028
514: 2e810a06 vdivcs.f32 s0, s2, s12
1029
518: ee030b06 vmla.f64 d0, d3, d6
1030
51c: ee010b45 vmls.f64 d0, d1, d5
1031
520: ee141b46 vnmla.f64 d1, d4, d6
1032
524: 1e110b01 vnmlsne.f64 d0, d1, d1
1033
528: 1e253b45 vnmulne.f64 d3, d5, d5
1034
52c: 3e320b04 vaddcc.f64 d0, d2, d4
1035
530: ee321b44 vsub.f64 d1, d2, d4
1036
534: 4e810b05 vdivmi.f64 d0, d1, d5
1037
538: eeb03ac3 vabs.f32 s6, s6
1038
53c: 5eb13a44 vnegpl.f32 s6, s8
1039
540: eeb10ac4 vsqrt.f32 s0, s8
1040
544: eeb00bc4 vabs.f64 d0, d4
1041
548: eeb11b44 vneg.f64 d1, d4
1042
54c: eeb10bc1 vsqrt.f64 d0, d1
1043
550: 5e00ea10 vmovpl s0, lr
1044
554: ee14ba10 vmov fp, s8
1045
558: bc4ebb11 vmovlt d1, fp, lr
1046
55c: ec557b15 vmov r7, r5, d5
1047
560: eeb04a46 vmov.f32 s8, s12
1048
564: 8eb01b42 vmovhi.f64 d1, d2
1049
568: 6eb72a00 vmovvs.f32 s4, #112 ; 0x70
1050
56c: eeb72b00 vmov.f64 d2, #112 ; 0x70
1051
570: eeb03a00 vmov.f32 s6, #0
1052
574: eeb01b00 vmov.f64 d1, #0
1053
578: ed952a1d vldr s4, [r5, #116] ; 0x74
1054
57c: 3d811a0e vstrcc s2, [r1, #56] ; 0x38
1055
580: 1d957b04 vldrne d7, [r5, #16]
1056
584: ed816b39 vstr d6, [r1, #228] ; 0xe4
1057
1058
00000588 <near_flt>:
1059
588: ed9f1a0e vldr s2, [pc, #56] ; 5c8 <near_post_flt>
1060
58c: ed8f3a0d vstr s6, [pc, #52] ; 5c8 <near_post_flt>
1061
590: bd1f2b04 vldrlt d2, [pc, #-16] ; 588 <near_flt>
1062
594: cd0f3b02 vstrgt d3, [pc, #-8] ; 594 <near_flt+0xc>
1063
598: 3d9f2a0a vldrcc s4, [pc, #40] ; 5c8 <near_post_flt>
1064
59c: ed8f0a09 vstr s0, [pc, #36] ; 5c8 <near_post_flt>
1065
5a0: cd9f4b08 vldrgt d4, [pc, #32] ; 5c8 <near_post_flt>
1066
5a4: ed0f0b09 vstr d0, [pc, #-36] ; 588 <near_flt>
1067
5a8: ed9f4a06 vldr s8, [pc, #24] ; 5c8 <near_post_flt>
1068
5ac: ed8f3a05 vstr s6, [pc, #20] ; 5c8 <near_post_flt>
1069
5b0: 5d1f4b0c vldrpl d4, [pc, #-48] ; 588 <near_flt>
1070
5b4: ed0f5b0d vstr d5, [pc, #-52] ; 588 <near_flt>
1071
5b8: 9d9f4a02 vldrls s8, [pc, #8] ; 5c8 <near_post_flt>
1072
5bc: 3d0f6a02 vstrcc s12, [pc, #-8] ; 5bc <near_flt+0x34>
1073
5c0: ed9f6b00 vldr d6, [pc] ; 5c8 <near_post_flt>
1074
5c4: bd0f1b01 vstrlt d1, [pc, #-4] ; 5c8 <near_post_flt>
1075
1076
000005c8 <near_post_flt>:
1077
5c8: ec912a01 vldmia r1, {s4}
1078
5cc: 2ca62a01 vstmiacs r6!, {s4}
1079
5d0: ecb91b08 vldmia r9!, {d1-d4}
1080
5d4: eca36b04 vstmia r3!, {d6-d7}
1081
5d8: 6d323a01 vldmdbvs r2!, {s6}
1082
5dc: ed267a01 vstmdb r6!, {s14}
1083
5e0: ed3d4b08 vldmdb sp!, {d4-d7}
1084
5e4: ed205b06 vstmdb r0!, {d5-d7}
1085
5e8: eeb41a41 vcmp.f32 s2, s2
1086
5ec: 7eb44ac4 vcmpevc.f32 s8, s8
1087
5f0: eeb40b46 vcmp.f64 d0, d6
1088
5f4: aeb43bc7 vcmpege.f64 d3, d7
1089
5f8: beb51a40 vcmplt.f32 s2, #0.0
1090
5fc: ceb57ac0 vcmpegt.f32 s14, #0.0
1091
600: eeb54b40 vcmp.f64 d4, #0.0
1092
604: eeb51bc0 vcmpe.f64 d1, #0.0
1093
608: 6ebd1ac3 vcvtvs.s32.f32 s2, s6
1094
60c: cebc3ac7 vcvtgt.u32.f32 s6, s14
1095
610: 3eb80ac1 vcvtcc.f32.s32 s0, s2
1096
614: 3eb81a42 vcvtcc.f32.u32 s2, s4
1097
618: 8ebd2bc4 vcvthi.s32.f64 s4, d4
1098
61c: 8ebc3bc6 vcvthi.u32.f64 s6, d6
1099
620: 9eb73bc7 vcvtls.f32.f64 s6, d7
1100
624: eeb83bc4 vcvt.f64.s32 d3, s8
1101
628: 0eb85b47 vcvteq.f64.u32 d5, s14
1102
62c: eeb74ac5 vcvt.f64.f32 d4, s10
1103
630: e120017a bkpt 0x001a
1104
*/
1105
1106
static const unsigned int insns[] =
1107
{
1108
0xe082852b, 0x009310c7, 0xe0290284, 0xc0329066,
1109
0xc04c000e, 0x00528364, 0xe069818d, 0x60708864,
1110
0xd08597a1, 0xe09d12c6, 0xc0adb0c7, 0xe0b80329,
1111
0xe0c392e6, 0x80dd1845, 0x30e28486, 0xe0f4a76d,
1112
0x118db785, 0xe19a9764, 0xe1cd90e5, 0xe1d20547,
1113
0xe086d777, 0x809c4776, 0x90265c57, 0xe035841d,
1114
0xe04c2055, 0x20539c17, 0xc06c9614, 0xe072811c,
1115
0xe08c4d1d, 0xe09b8d76, 0x10a20415, 0xe0beb256,
1116
0x80ca835e, 0xe0dc1615, 0x60e54a7e, 0xe0fc181d,
1117
0x61818076, 0xe19db577, 0xe1ce4216, 0xe1dba31d,
1118
0x828d8261, 0xe29ed69b, 0xe226e87d, 0xe2332f49,
1119
0xe24d46d9, 0xb25e1402, 0xe2650325, 0x3274882f,
1120
0xb2849102, 0xe2948902, 0x22aeac2a, 0xe2b6aabd,
1121
0xe2cc2426, 0xe2da85a5, 0xe2e6d871, 0x12fba6e9,
1122
0x638737ff, 0x03952951, 0x63c18eea, 0x33d2020a,
1123
0xe118028d, 0xe13601a7, 0xe15c0164, 0xb1750807,
1124
0xe112073e, 0x31300572, 0x915e0b37, 0x617a0b17,
1125
0xe3120585, 0x433e071b, 0xe355030e, 0x3377010a,
1126
0xe1a00b84, 0xe1b01484, 0xe1a001aa, 0xe1b00a2a,
1127
0xe1a015c9, 0x61b0254b, 0x31a08fe2, 0xe1b0946c,
1128
0xe1a0877e, 0xe1b0c473, 0xc1a0ce1d, 0xe1b0c61d,
1129
0xc1a00931, 0xc1b0bc33, 0xd1a0265c, 0xb1b0165a,
1130
0xe1a0a003, 0xe1b00009, 0x73a03e29, 0xe3b0497e,
1131
0xe1a0c1a6, 0x71b0554d, 0xe1a0137e, 0x01b0897c,
1132
0x330cbf31, 0x33429bf7, 0xd001059d, 0xe0100b9a,
1133
0xe0207c93, 0x0038639b, 0xe084e695, 0xe0940796,
1134
0xe0a08e9b, 0xe0b4b79e, 0x20c51796, 0x40db059c,
1135
0xe0498592, 0x0060ed94, 0x510d9054, 0x4125005c,
1136
0xe1473055, 0xe1649052, 0xe101658c, 0xe1006cca,
1137
0xe108e3a1, 0xe10176ed, 0xe1206483, 0xe12b7ec4,
1138
0xe14a0786, 0x914b3ec4, 0xe14b8ca3, 0xe14185e3,
1139
0x21220dac, 0xe12806ec, 0xa1620e86, 0xe16807cc,
1140
0x016a0ea3, 0xe1600de3, 0xe697a009, 0xe5d900c4,
1141
0xe1b4e0b6, 0xe1f96ed8, 0xe09120f1, 0xe6890004,
1142
0xe5e5305c, 0xe1c82ab0, 0xe79c8008, 0xe4dab010,
1143
0xe19ab0b6, 0xe1bb50da, 0xe19360f7, 0xe7ad7005,
1144
0xe5ca2000, 0xe08460b3, 0xe59ca000, 0xe4db4084,
1145
0xe09990bc, 0xe0d399d4, 0xe1f2b9f4, 0xe78db00b,
1146
0xe7cd100a, 0xe08ea0b9, 0xe7b36004, 0xe7f6400d,
1147
0xe09760ba, 0xe1b600db, 0xe096a0fd, 0xe783700c,
1148
0xe7e83001, 0xe1cc44b0, 0xe51f1008, 0xe55f7008,
1149
0xe15f21b0, 0xe15fa0d8, 0xe1dfe3f8, 0xe51f2008,
1150
0xe55f3008, 0xe1df72bc, 0xe15fd0d8, 0xe15fa2fc,
1151
0xe51f5008, 0xe5dfe01c, 0xe1df51b8, 0xe15f63dc,
1152
0xe15fb4f0, 0xe59f700c, 0xe5df5008, 0xe15fa4bc,
1153
0xe1df60d0, 0xe15f90f8, 0xe690036a, 0xe7d8348e,
1154
0xe78d5783, 0xe6c99145, 0xe7945360, 0xe7d0e4a4,
1155
0xe68c52cc, 0xe7e13667, 0xe7b26063, 0xe7fe8842,
1156
0xe7a363e6, 0xe7c83502, 0xe79db40e, 0xe7fda20c,
1157
0xe789d142, 0xe7eb774e, 0xe19bcf9f, 0xe1dc4f9f,
1158
0xe1fbbf9f, 0xe18e1f97, 0xe1c4cf96, 0x21e74f96,
1159
0xe6143f17, 0xe61a9f33, 0xe615cf56, 0xe615cf7e,
1160
0xe61a0f97, 0x66180ff2, 0x5624bf15, 0x6623bf3c,
1161
0xe6230f55, 0xe61caf75, 0x3626af9e, 0xe62baff7,
1162
0x56349f1e, 0xe63e1f37, 0x363b9f55, 0xa6313f7b,
1163
0xc635df97, 0xe6351ff7, 0xe654af17, 0x26591f37,
1164
0xe65dbf57, 0xe654bf7e, 0x365d2f97, 0xc65a8ffe,
1165
0xe66c3f1d, 0xe66d4f36, 0xe66a1f5e, 0xd66d2f7e,
1166
0xe66c1f95, 0xc6640ffd, 0x867a0f15, 0xd674bf37,
1167
0xa67e1f59, 0xe67b2f7e, 0xa6749f95, 0x867d2ffe,
1168
0xe683a877, 0x26a59c77, 0xe6b53477, 0xe6c48476,
1169
0x06eb007d, 0xe6fc9075, 0xa68f387b, 0x86af2076,
1170
0xc6bf3c7d, 0xe6cfc875, 0xe6efc875, 0xe6ff8875,
1171
0x06bfaf34, 0xa6bf8fbc, 0xe6ffef37, 0xc6ffdfb7,
1172
0xe719fe1d, 0xe732f61c, 0x47a20a51, 0xe7ee9852,
1173
0x87dd1d9b, 0xe7d0339f, 0xe8060d6b, 0xe80438df,
1174
0xe810e1d4, 0xe83c9873, 0xe8a931bd, 0xe88b55c3,
1175
0xe8bcbd22, 0xe8bda10a, 0xe92b2219, 0xe9298dd5,
1176
0xe935605b, 0xe91191a7, 0xe9ab6e6a, 0xe98b4507,
1177
0xe9b053cc, 0xe9912de7, 0x2a000075, 0x4bfffffe,
1178
0x612fff10, 0xe12fff33, 0xeafffffe, 0xebfffffe,
1179
0x612fff10, 0xe12fff35, 0xda00006d, 0x4bfffffe,
1180
0x112fff19, 0xe12fff3c, 0xeafffedc, 0x8bfffffe,
1181
0x712fff11, 0xc12fff37, 0xaafffed8, 0x8bfffffe,
1182
0xe12fff1c, 0x312fff37, 0xeafffffe, 0xcbfffed3,
1183
0xa12fff11, 0xe12fff30, 0xeafffffe, 0xeb00005c,
1184
0xc12fff1e, 0x112fff3b, 0x2afffffe, 0xebfffffe,
1185
0x212fff1a, 0xe12fff34, 0xeafffec8, 0xebfffffe,
1186
0x312fff1c, 0xe12fff38, 0xea000051, 0xebfffffe,
1187
0xe12fff1a, 0xe12fff31, 0x4e042a06, 0xee052a45,
1188
0xee151a46, 0xbe134a04, 0x4e263a47, 0xee310a00,
1189
0xee321a45, 0x2e810a06, 0xee030b06, 0xee010b45,
1190
0xee141b46, 0x1e110b01, 0x1e253b45, 0x3e320b04,
1191
0xee321b44, 0x4e810b05, 0xeeb03ac3, 0x5eb13a44,
1192
0xeeb10ac4, 0xeeb00bc4, 0xeeb11b44, 0xeeb10bc1,
1193
0x5e00ea10, 0xee14ba10, 0xbc4ebb11, 0xec557b15,
1194
0xeeb04a46, 0x8eb01b42, 0x6eb72a00, 0xeeb72b00,
1195
0xeeb03a00, 0xeeb01b00, 0xed952a1d, 0x3d811a0e,
1196
0x1d957b04, 0xed816b39, 0xed9f1a0e, 0xed8f3a0d,
1197
0xbd1f2b04, 0xcd0f3b02, 0x3d9f2a0a, 0xed8f0a09,
1198
0xcd9f4b08, 0xed0f0b09, 0xed9f4a06, 0xed8f3a05,
1199
0x5d1f4b0c, 0xed0f5b0d, 0x9d9f4a02, 0x3d0f6a02,
1200
0xed9f6b00, 0xbd0f1b01, 0xec912a01, 0x2ca62a01,
1201
0xecb91b08, 0xeca36b04, 0x6d323a01, 0xed267a01,
1202
0xed3d4b08, 0xed205b06, 0xeeb41a41, 0x7eb44ac4,
1203
0xeeb40b46, 0xaeb43bc7, 0xbeb51a40, 0xceb57ac0,
1204
0xeeb54b40, 0xeeb51bc0, 0x6ebd1ac3, 0xcebc3ac7,
1205
0x3eb80ac1, 0x3eb81a42, 0x8ebd2bc4, 0x8ebc3bc6,
1206
0x9eb73bc7, 0xeeb83bc4, 0x0eb85b47, 0xeeb74ac5,
1207
0xe120017a,
1208
};
1209
// END Generated code -- do not edit
1210
1211
// reset the detected cpu feature set
1212
VM_Version::features(detected_features);
1213
1214
{
1215
bool ok = true;
1216
unsigned int *insns1 = (unsigned int *)entry;
1217
for (unsigned int i = 0; i < sizeof insns / sizeof insns[0]; i++) {
1218
if (insns[i] != insns1[i]) {
1219
ok = false;
1220
printf("Ours:\n");
1221
Disassembler::decode((address)&insns1[i], (address)&insns1[i+1]);
1222
printf(" Raw: 0x%x\n", insns1[i]);
1223
printf("Theirs:\n");
1224
Disassembler::decode((address)&insns[i], (address)&insns[i+1]);
1225
printf(" Raw: 0x%x\n", insns[i]);
1226
printf("\n");
1227
}
1228
}
1229
assert(ok, "Assembler smoke test failed");
1230
}
1231
#endif // ASSERT
1232
}
1233
1234
#undef __
1235
void Address::AddressConstruct(Register base, RegisterOrConstant index, enum reg_op op,
1236
shift_op shift, enum wb_mode mode) {
1237
_base = base;
1238
_wb_mode = mode;
1239
_shift = shift;
1240
_target = 0;
1241
if (index.is_register()) {
1242
_acc_mode = reg;
1243
_index = index.as_register();
1244
_offset = 0;
1245
_as_op = op;
1246
} else {
1247
assert(shift == lsl(), "should be");
1248
assert(index.is_constant(), "should be");
1249
_acc_mode = imm;
1250
// _index = no_reg;
1251
_offset = index.as_constant();
1252
if(SUB == _as_op)
1253
_offset = -_offset;
1254
}
1255
}
1256
1257
void Address::encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const {
1258
long offset = _offset;
1259
access_mode mode = _acc_mode;
1260
1261
if(lit == mode) {
1262
//Create the offset from the address
1263
offset = _target - pc;
1264
mode = imm;
1265
}
1266
1267
//Correct the offset if the base is the PC
1268
if(r15_pc == _base && imm == mode) {
1269
offset -= 8;
1270
}
1271
1272
int U = (offset >= 0 && _acc_mode == imm) || (_as_op == ADD && _acc_mode == reg);
1273
int P = pre == _wb_mode || off == _wb_mode;
1274
int W = pre == _wb_mode;
1275
i->f(P, 24), i->f(U, 23), i->f(W, 21), i->rf(_base, 16);
1276
1277
offset = offset < 0 ? -offset : offset;
1278
int opc = i->get(27, 25);
1279
1280
if (imm == mode) {
1281
switch(opc) {
1282
case 0b010:
1283
// LDR, LDRB
1284
// STR, STRB
1285
i->f(offset, 11, 0);
1286
break;
1287
case 0b000:
1288
// LDRH, LDRSH, LDRSB, LDRD
1289
// STRH, STRD
1290
i->f(1, 22);
1291
assert(offset < (1 << 8), "Offset larger than a byte");
1292
i->f(offset & 0xF, 3, 0);
1293
i->f(offset >> 4, 11, 8);
1294
break;
1295
default:
1296
ShouldNotReachHere();
1297
}
1298
} else if (reg == mode) {
1299
assert(r15_pc->encoding_nocheck() !=
1300
_base->encoding_nocheck(), "Remove this if you have your offsets right");
1301
switch(opc) {
1302
case 0b010:
1303
// LDR, LDRB
1304
// STR, STRB
1305
//Need to set bit 25 as Register 0b011
1306
i->f(1, 25);
1307
i->f(_shift.shift(), 11, 7);
1308
i->f(_shift.kind(), 6, 5);
1309
i->f(0, 4);
1310
i->rf(_index, 0);
1311
break;
1312
case 0b000:
1313
// LDRH, LDRSH, LDRSB, LDRD
1314
// STRH, STRD
1315
//Need to clear bit 22 as Register
1316
i->f(0, 22);
1317
assert(_shift == lsl(), "Type of load/store does not support shift");
1318
i->f(0b0000, 11, 8);
1319
i->rf(_index, 0);
1320
break;
1321
default:
1322
ShouldNotReachHere();
1323
}
1324
} else {
1325
ShouldNotReachHere();
1326
}
1327
1328
if(lit == _acc_mode) {
1329
sec->relocate(pc, _rspec);
1330
}
1331
}
1332
1333
void Address::fp_encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const {
1334
// ATM works only for immediate
1335
assert(_wb_mode == off, "Can't do pre or post addressing for vldr, vstr");
1336
long offset = _offset;
1337
if(imm == _acc_mode) {
1338
if(r15_pc == _base) {
1339
//Correct the offset if the base is the PC
1340
offset -= 8;
1341
}
1342
bool U = offset >= 0;
1343
assert(0 == (offset & 3), "Can only access aligned data");
1344
unsigned imm8 = uabs(offset) / 4;
1345
i->f(U, 23), i->rf(_base, 16), i->f(imm8, 7, 0);
1346
} else {
1347
ShouldNotReachHere();
1348
}
1349
}
1350
1351
#define __ as->
1352
void Address::lea(MacroAssembler *as, Register r) const {
1353
Relocation* reloc = _rspec.reloc();
1354
relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
1355
1356
//TODO Potentially remove this - added as aarch64 doesn't contain
1357
// any method of handling pre or post
1358
assert( _wb_mode != pre && _wb_mode != post, "Wrong wb mode");
1359
// could probably permit post however
1360
switch(_acc_mode) {
1361
case imm: {
1362
if (_offset == 0 && _base == r) // it's a nop
1363
break;
1364
if (_offset > 0)
1365
__ add(r, _base, _offset);
1366
else
1367
__ sub(r, _base, -_offset);
1368
break;
1369
}
1370
case reg: {
1371
__ add(r, _base, _index, _shift);
1372
break;
1373
}
1374
case lit: {
1375
if (rtype == relocInfo::none)
1376
__ mov(r, target());
1377
else
1378
__ movptr(r, (uint32_t)target());
1379
break;
1380
}
1381
default:
1382
ShouldNotReachHere();
1383
}
1384
}
1385
#undef __
1386
1387
#define __ as->
1388
class Address;
1389
1390
// Adapts given Address to the capabilities of instructions respective to the
1391
// provided data type. E.g. some of the instructions cannot use index register
1392
// while others cannot have an offset field.
1393
// Returns a copy of this Address is it's good or constructs a new Address
1394
// good for respective instructions by emitting necessary code to calculate
1395
// the address in tmp register
1396
Address Address::safe_for(InsnDataType type, MacroAssembler *as, Register tmp) {
1397
if (is_safe_for(type))
1398
return *this;
1399
assert(tmp->is_valid(), "must be");
1400
lea(as, tmp);
1401
return Address(tmp);
1402
}
1403
#undef __
1404
1405
bool Address::is_safe_for(InsnDataType type) {
1406
switch (_acc_mode) {
1407
case imm:
1408
case lit:
1409
return offset_ok_for_immed(_offset, type);
1410
case reg:
1411
return shift_ok_for_index(_shift, type);
1412
case no_mode:
1413
default:
1414
ShouldNotReachHere();
1415
return false;
1416
}
1417
}
1418
1419
bool Address::offset_ok_for_immed(long offset, InsnDataType type) {
1420
const int o = offset < 0 ? -offset : offset;
1421
switch (type) {
1422
case IDT_INT:
1423
case IDT_BOOLEAN:
1424
case IDT_OBJECT:
1425
case IDT_ADDRESS:
1426
case IDT_METADATA:
1427
case IDT_ARRAY:
1428
return o <= 0xfff;
1429
case IDT_BYTE:
1430
case IDT_SHORT:
1431
case IDT_LONG:
1432
case IDT_CHAR:
1433
return o <= 0xff;
1434
case IDT_FLOAT:
1435
case IDT_DOUBLE:
1436
return !(o & ~0x3fc);
1437
case IDT_LEA:
1438
return true;
1439
case IDT_MULTIWORD:
1440
return !o;
1441
default:
1442
ShouldNotReachHere();
1443
return false;
1444
}
1445
}
1446
1447
bool Address::shift_ok_for_index(shift_op shift, InsnDataType type) {
1448
switch (type) {
1449
case IDT_INT:
1450
case IDT_BOOLEAN:
1451
case IDT_OBJECT:
1452
case IDT_ADDRESS:
1453
case IDT_METADATA:
1454
case IDT_ARRAY:
1455
return !shift.is_register();
1456
case IDT_BYTE:
1457
case IDT_SHORT:
1458
case IDT_LONG:
1459
case IDT_CHAR:
1460
return !shift.is_register() && shift.shift() == 0;
1461
case IDT_LEA:
1462
return true;
1463
case IDT_FLOAT:
1464
case IDT_DOUBLE:
1465
case IDT_MULTIWORD:
1466
return false;
1467
default:
1468
ShouldNotReachHere();
1469
return false;
1470
}
1471
}
1472
1473
void Assembler::emit_data64(jlong data,
1474
relocInfo::relocType rtype,
1475
int format) {
1476
if (rtype == relocInfo::none) {
1477
emit_int64(data);
1478
} else {
1479
emit_data64(data, Relocation::spec_simple(rtype), format);
1480
}
1481
}
1482
1483
void Assembler::emit_data64(jlong data,
1484
RelocationHolder const& rspec,
1485
int format) {
1486
1487
assert(inst_mark() != NULL, "must be inside InstructionMark");
1488
// Do not use AbstractAssembler::relocate, which is not intended for
1489
// embedded words. Instead, relocate to the enclosing instruction.
1490
code_section()->relocate(inst_mark(), rspec, format);
1491
emit_int64(data);
1492
}
1493
1494
extern "C" {
1495
void das(uint64_t start, int len) {
1496
ResourceMark rm;
1497
len <<= 2;
1498
if (len < 0)
1499
Disassembler::decode((address)start + len, (address)start);
1500
else
1501
Disassembler::decode((address)start, (address)start + len);
1502
}
1503
1504
JNIEXPORT void das1(unsigned long insn) {
1505
das(insn, 1);
1506
}
1507
}
1508
1509
#define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use)
1510
1511
void Assembler::adr(Register Rd, address adr, Condition cond) {
1512
int offset = adr - pc() - 8;
1513
adr_encode(Rd, offset, cond);
1514
}
1515
1516
#undef starti
1517
1518
Address::Address(address target, relocInfo::relocType rtype)
1519
: _acc_mode(lit), _base(sp), _offset(0), _wb_mode(off) {
1520
//TODO we don't complete _wb_mode - what about Addresses that are pre/post accessed?
1521
_is_lval = false;
1522
_target = target;
1523
switch (rtype) {
1524
case relocInfo::oop_type:
1525
case relocInfo::metadata_type:
1526
// Oops are a special case. Normally they would be their own section
1527
// but in cases like icBuffer they are literals in the code stream that
1528
// we don't have a section for. We use none so that we get a literal address
1529
// which is always patchable.
1530
break;
1531
case relocInfo::external_word_type:
1532
_rspec = external_word_Relocation::spec(target);
1533
break;
1534
case relocInfo::internal_word_type:
1535
_rspec = internal_word_Relocation::spec(target);
1536
break;
1537
case relocInfo::opt_virtual_call_type:
1538
_rspec = opt_virtual_call_Relocation::spec();
1539
break;
1540
case relocInfo::static_call_type:
1541
_rspec = static_call_Relocation::spec();
1542
break;
1543
case relocInfo::runtime_call_type:
1544
_rspec = runtime_call_Relocation::spec();
1545
break;
1546
case relocInfo::poll_type:
1547
case relocInfo::poll_return_type:
1548
_rspec = Relocation::spec_simple(rtype);
1549
break;
1550
case relocInfo::none:
1551
_rspec = RelocationHolder::none;
1552
break;
1553
default:
1554
ShouldNotReachHere();
1555
break;
1556
}
1557
}
1558
1559
void Assembler::adr(Register r, const Address &dest, Condition cond) {
1560
code_section()->relocate(pc(), dest.rspec());
1561
adr(r, dest.target());
1562
}
1563
1564
void Assembler::wrap_label(Label &L, Assembler::uncond_branch_insn insn) {
1565
if (L.is_bound()) {
1566
(this->*insn)(target(L));
1567
} else {
1568
L.add_patch_at(code(), locator());
1569
(this->*insn)(pc());
1570
}
1571
}
1572
void Assembler::wrap_label(Label &L, Condition cond,
1573
Assembler::cond_branch_insn insn) {
1574
if (L.is_bound()) {
1575
(this->*insn)(target(L), cond);
1576
} else {
1577
L.add_patch_at(code(), locator());
1578
(this->*insn)(pc(), cond);
1579
}
1580
}
1581
1582
void Assembler::wrap_label(Register r, Label &L, Condition cond,
1583
Assembler::cond_ldst_insn insn) {
1584
if (L.is_bound()) {
1585
(this->*insn)(r, target(L), cond);
1586
} else {
1587
L.add_patch_at(code(), locator());
1588
(this->*insn)(r, pc(), cond);
1589
}
1590
}
1591
1592
void Assembler::wrap_label(FloatRegister r, Label &L, Condition cond,
1593
Assembler::cond_fp_ldst_insn insn) {
1594
if (L.is_bound()) {
1595
(this->*insn)(r, target(L), cond);
1596
} else {
1597
L.add_patch_at(code(), locator());
1598
(this->*insn)(r, pc(), cond);
1599
}
1600
}
1601
1602
uint32_t Assembler::encode_imm12(int imm) {
1603
assert(is_valid_for_imm12(imm),
1604
"only valid immediates allowed, call is_valid_for_imm12 first");
1605
uint32_t n = imm;
1606
if ((n & 0xFFFFFF00) == 0) {
1607
return n;
1608
}
1609
if ((n & 0xFC000000) == 0) {
1610
const int lshift = __builtin_ctz(n) & 0xFFFFFFFE;
1611
return ((32 - lshift) << 7) | (n >> lshift);
1612
}
1613
n = (n << 16) | (n >> 16);
1614
const int lshift = __builtin_ctz(n) & 0xFFFFFFFE;
1615
return ((16 - lshift) << 7) | (n >> lshift);
1616
}
1617
1618
int Assembler::decode_imm12(uint32_t imm12) {
1619
assert((imm12 & 0xFFFFF000) == 0, "bad imm12");
1620
uint32_t shift = (imm12 & 0x00000F00) >> 7;
1621
uint32_t value = imm12 & 0x000000FF;
1622
return (int) ((value >> shift) | (value << (32 - shift)));
1623
}
1624
1625
bool Assembler::is_valid_for_imm12(int imm) {
1626
uint32_t n = (uint32_t) imm;
1627
uint32_t shift = __builtin_clz(n) & 0xFFFFFFFE;
1628
uint32_t result = n << shift;
1629
if ((result & 0x00FFFFFF) == 0) {
1630
return true;
1631
}
1632
n = (n << 16) | (n >> 16);
1633
shift = __builtin_clz(n) & 0xFFFFFFFE;
1634
result = n << shift;
1635
if ((result & 0x00FFFFFF) == 0) {
1636
return true;
1637
}
1638
return false;
1639
}
1640
1641
bool Assembler::operand_valid_for_logical_immediate(bool is32, uint64_t imm) {
1642
return is32 && is_valid_for_imm12(imm);
1643
}
1644
1645
bool Assembler::operand_valid_for_add_sub_immediate(int imm) {
1646
return is_valid_for_imm12(imm);
1647
}
1648
1649
bool Assembler::operand_valid_for_add_sub_immediate(unsigned long imm) {
1650
return is_valid_for_imm12(imm);
1651
}
1652
1653
bool Assembler::operand_valid_for_add_sub_immediate(unsigned imm) {
1654
return is_valid_for_imm12(imm);
1655
}
1656
1657
bool Assembler::operand_valid_for_add_sub_immediate(jlong imm) {
1658
return is_valid_for_imm12(imm >> 32) && is_valid_for_imm12(imm);
1659
}
1660
1661
// n.b. this is implemented in subclass MacroAssembler
1662
void Assembler::bang_stack_with_offset(int offset) { Unimplemented(); }
1663
1664
int AbstractAssembler::code_fill_byte() {
1665
return 0;
1666
}
1667
1668
void Assembler::mov_immediate(Register dst, uint32_t imm32, Condition cond, bool s) {
1669
#ifndef PRODUCT
1670
{
1671
char buffer[64];
1672
snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32);
1673
block_comment(buffer);
1674
}
1675
#endif
1676
if(is_valid_for_imm12(imm32)) {
1677
if(s) movs_i(dst, (unsigned)imm32, cond);
1678
else mov_i (dst, (unsigned)imm32, cond);
1679
} else if(is_valid_for_imm12(~imm32)) {
1680
if(s) mvns_i(dst, (unsigned)~imm32, cond);
1681
else mvn_i (dst, (unsigned)~imm32, cond);
1682
} else if (!s && VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2) &&
1683
(imm32 < (1 << 16))) {
1684
movw_i(dst, (unsigned)imm32, cond);
1685
} else if (!s && VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2) &&
1686
!(imm32 & ((1 << 16) - 1))) {
1687
movw_i(dst, (unsigned)0, cond);
1688
movt_i(dst, (unsigned)(imm32 >> 16), cond);
1689
} else { // TODO Could expand to varied numbers of mov and orrs
1690
//Need to do a full 32 bits
1691
mov_immediate32(dst, imm32, cond, s);
1692
}
1693
}
1694
1695
//This should really be in the macroassembler
1696
void Assembler::mov_immediate32(Register dst, uint32_t imm32, Condition cond, bool s)
1697
{
1698
// Need to move a full 32 bit immediate, for example if we're loading an address that
1699
// might change later and therefore need to be updated.
1700
if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2)) {
1701
//Use a movw and a movt
1702
Assembler::movw_i(dst, (unsigned)(imm32 & 0xffff), cond);
1703
Assembler::movt_i(dst, (unsigned)(imm32 >> 16), cond);
1704
if(s) {
1705
//Additionally emit a cmp instruction
1706
Assembler::cmp(dst, 0);
1707
}
1708
} else {
1709
// Sadly we don't have movw, movt
1710
// instead emit a mov and three orr
1711
mov_i(dst, imm32 & (0xff ), cond);
1712
orr(dst, dst, imm32 & (0xff << 8 ), cond);
1713
orr(dst, dst, imm32 & (0xff << 16), cond);
1714
if(s) orrs(dst, dst, imm32 & (0xff << 24), cond);
1715
else orr (dst, dst, imm32 & (0xff << 24), cond);
1716
}
1717
}
1718
1719
#define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use)
1720
void Assembler::add_sub_imm(int decode, Register Rd, Register Rn, int imm,
1721
Condition cond, bool s) {
1722
int cpart = 0;
1723
switch(decode) {
1724
case 0b0100: cpart = 0b0010; break; // ADD -> SUB
1725
case 0b0010: // SUB -> ADD
1726
case 0b0011: cpart = 0b0100; break; // RSB -> ADD
1727
case 0b0101: cpart = 0b0110; break; // ADC -> SUBC
1728
case 0b0110: // SUBC -> ADC
1729
case 0b0111: cpart = 0b0101; break; // RSC -> ADC
1730
default: ShouldNotReachHere();
1731
}
1732
//try both possible imm_instrs
1733
if(imm_instr(decode, Rd, Rn, imm, cond, s)) return;
1734
if(imm_instr(cpart, Rd, Rn, -imm, cond, s)) return;
1735
1736
//Try plan B - a mov first - need to have destination that is not an arg
1737
assert(Rd != Rn, "Can't use imm and can't do a mov. I'm in a jam.");
1738
mov_immediate(Rd, (uint32_t)uabs(imm), cond, s);
1739
//Now do the non immediate version - copied from the immediate encodings
1740
{
1741
starti;
1742
reg_instr( imm < 0 ? cpart : decode, lsl(), cond, s);
1743
rf(Rn, 16), rf(Rd, 12), rf(Rd, 0);
1744
}
1745
}
1746
1747
void Assembler::vmov_imm(FloatRegister Rd, unsigned imm, bool is64bit,
1748
Condition cond) {
1749
starti;
1750
fp_instr_base(is64bit, cond);
1751
f(0b1011, 23, 20);
1752
// double register passed (see 'd0'-'dN' encoding), not reencode it's number
1753
fp_rencode(Rd, false, 12, 22);
1754
f(0b0000, 7, 4);
1755
f(imm & 0xf, 3, 0);
1756
f(imm >> 4, 19, 16);
1757
}
1758
1759
void Assembler::vmov_imm_zero(FloatRegister Rd, bool is64bit,
1760
Condition cond) {
1761
// Note that this is not a floating point vmov but instead
1762
// an integer vmov from the SIMD instructions.
1763
// cannot be conditional.
1764
assert(operand_valid_for_double_immediate(0), "operand should be valid for immediate");
1765
assert(is64bit, "SIMD loading available only for double registers");
1766
assert(cond == C_DFLT, "Unable to vmov #0 conditionally");
1767
//int cmod = is64bit? 0b1110 : 0b0000; // ? I64 : I32
1768
int cmod = 0b1110;
1769
{
1770
starti;
1771
f(0b1111001, 31, 25);
1772
f(0, 24); // imm1
1773
f(0b10000, 23, 19);
1774
// double register passed (see 'd0'-'dN' encoding), not reencode it's number
1775
fp_rencode(Rd, false, 12, 22);
1776
f(0b000, 18, 16); //imm3
1777
f(cmod, 11, 8);
1778
f(0b00, 7, 6);
1779
f(is64bit, 5);
1780
f(1, 4);
1781
f(0b0000, 3, 0); //imm4
1782
}
1783
}
1784
1785
bool Assembler::operand_valid_for_float_immediate(float v) {
1786
if (!(VM_Version::features() & FT_VFPV3)) {
1787
return false;
1788
}
1789
union ufloat {
1790
float f;
1791
uint32_t u;
1792
} imm;
1793
unsigned tmp;
1794
imm.f = v;
1795
1796
if (imm.u & ((1 << 19) - 1))
1797
return false;
1798
1799
tmp = (imm.u >> 25) & ((1 << 6) - 1);
1800
return tmp == 32 || tmp == 31;
1801
}
1802
1803
bool Assembler::operand_valid_for_double_immediate(double v) {
1804
if (!(VM_Version::features() & FT_VFPV3)) {
1805
return false;
1806
}
1807
union ufloat {
1808
double f;
1809
uint64_t u;
1810
} imm;
1811
unsigned tmp;
1812
imm.f = v;
1813
1814
if ((VM_Version::features() & FT_AdvSIMD) && imm.u == 0)
1815
return true;
1816
1817
if (imm.u & (uint64_t) 0xffffffffffffLL)
1818
return false;
1819
1820
imm.u >>= 48;
1821
1822
tmp = (imm.u >> 6) & ((1 << 9) - 1);
1823
return tmp == 0x100 || tmp == 0xff;
1824
}
1825
1826
unsigned Assembler::encode_float_fp_imm(float imm_f) {
1827
assert(operand_valid_for_float_immediate(imm_f), "operand should be valid for immediate");
1828
union ufloat {
1829
float f;
1830
uint32_t u;
1831
} imm;
1832
unsigned tmp, imm8;
1833
imm.f = imm_f;
1834
1835
assert(!(imm.u & ((1 << 19) - 1)), "Invalid float imm");
1836
tmp = (imm.u >> 25) & ((1 << 6) - 1);
1837
assert(tmp == 32 || tmp == 31, "Invalid float imm");
1838
1839
imm8 = (imm.u >> 24) & 0x80; // set a
1840
imm8 |= (imm.u >> 19) & 0x7F; // set bcdefgh
1841
return imm8;
1842
}
1843
1844
unsigned Assembler::encode_double_fp_imm(double imm_f) {
1845
assert(operand_valid_for_double_immediate(imm_f), "operand should be valid for immediate");
1846
union ufloat {
1847
double f;
1848
uint64_t u;
1849
} imm;
1850
unsigned tmp, imm8;
1851
imm.f = imm_f;
1852
1853
assert(!(imm.u & (uint64_t)0xffffffffffffLL), "Invalid float imm");
1854
imm.u >>= 48;
1855
1856
tmp = (imm.u >> 6) & ((1 << 9) - 1);
1857
assert(tmp == 0x100 || tmp == 0xff, "Invalid float imm");
1858
1859
imm8 = (imm.u >> 8) & 0x80; // set a
1860
imm8 |= imm.u & 0x7F; // set bcdefgh
1861
return imm8;
1862
}
1863
1864
unsigned Assembler::count_bits(unsigned val) {
1865
unsigned i, count;
1866
for(i = 0, count = 0; i < 8 * sizeof(val); val >>= 1, i++)
1867
if( val & 1 ) count++;
1868
return count;
1869
}
1870
bool Assembler::can_ldst_multiple( unsigned regset, const Address& adr) {
1871
int nbits = count_bits(regset);
1872
return adr.get_mode() == Address::imm &&
1873
!(adr.base()->bit() & regset) && // FIXME, this could be relaxed
1874
(((adr.offset() == 0 || adr.offset() == wordSize || adr.offset() == -nbits * wordSize) &&
1875
(adr.get_wb_mode() == Address::pre || adr.get_wb_mode() == Address::off)) ||
1876
((adr.offset() == 0 || adr.offset() == -wordSize || adr.offset() == nbits * wordSize) &&
1877
adr.get_wb_mode() == Address::post));
1878
}
1879
1880
void Assembler::fp_ldst_instr(int decode, bool is64bit, const Address& adr,
1881
Condition cond) {
1882
f(cond, 31, 28), f(0b110, 27, 25), f(decode, 24, 20);
1883
f(0b101, 11, 9), f(is64bit, 8);
1884
adr.fp_encode(current, code_section(), pc());
1885
}
1886
1887
void Assembler::fp_ldst_mul(Register Rn, int regset, bool load, bool is64bit,
1888
enum fp_mode mode, Condition cond) {
1889
starti;
1890
bool P = db_wb == mode;
1891
bool U = ia_wb == mode || ia == mode;
1892
bool W = ia_wb == mode || db_wb == mode;
1893
// Encode registers
1894
unsigned i, fp_first_reg, nregs = 1;
1895
bool enc_z = false;
1896
for(fp_first_reg = 0; !(regset & 1); regset >>= 1, fp_first_reg++);
1897
FloatRegister Rd = (FloatRegister) fp_first_reg;
1898
for(i = 0; i + fp_first_reg < 8 * sizeof(int); i++) {
1899
regset >>= 1;
1900
if(regset & 1) {
1901
assert(!enc_z, "Unable to encode non-consecutive registers in fp_ldst_mul");
1902
nregs++;
1903
} else {
1904
enc_z = true;
1905
}
1906
}
1907
assert(!is64bit || nregs <= 16, "Too many registers in a set");
1908
f(cond, 31, 28), f(0b110, 27, 25); f(P, 24), f(U, 23), f(W, 21), f(load, 20);
1909
// vstm/vstm uses double register number, not it's encoding. Should reencode it.
1910
rf(Rn, 16), fp_rencode(Rd, is64bit, 12, 22), f(0b101, 11, 9), f(is64bit, 8);
1911
f(is64bit ? nregs * 2 : nregs, 7, 0);
1912
}
1913
1914
void Assembler::simd_ld(FloatRegister Rd, unsigned type, unsigned size, unsigned num_regs,
1915
const Address &addr, enum SIMD_Align align) {
1916
starti;
1917
assert(addr.get_mode() == Address::imm &&
1918
(addr.get_wb_mode() == Address::off && addr.offset() == 0) ||
1919
(addr.get_wb_mode() == Address::post && addr.offset() == long(8*num_regs)), "Unsupported");
1920
assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1921
if (addr.get_wb_mode() == Address::post)
1922
f(0b1111, 31, 28), f(0b0100, 27, 24), f(0, 23), f(0b10, 21, 20);
1923
rf(addr.base(), 16), fp_rencode(Rd, false, 12, 22), f(type, 11, 8), f(size, 7, 6);
1924
f((unsigned)align, 5, 4), f(addr.get_wb_mode() == Address::post ? 0b1101 : 0b1111, 3, 0);
1925
}
1926
1927
void Assembler::simd_vmov(FloatRegister Dd, unsigned index, Register Rt, bool advsimd,
1928
unsigned index_bits, unsigned bit20, unsigned opc, Condition cond) {
1929
starti;
1930
assert(index < (1u<<index_bits), "Illegal element index");
1931
assert(!advsimd || (VM_Version::features() & FT_AdvSIMD), "SIMD coprocessor required");
1932
opc |= index << (3 - index_bits);
1933
f(cond, 31, 28), f(0b1110, 27, 24), f((opc>>2)&3, 22, 21), f(bit20, 20);
1934
fp_rencode(Dd, false, 16, 7), f(opc>>4, 23);
1935
rf(Rt, 12), f(0b1011, 11, 8), f(opc & 3, 6, 5), f(0b10000, 4, 0);
1936
}
1937
1938
void Assembler::simd_eor(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm, unsigned q) {
1939
starti;
1940
assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1941
assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
1942
f(0b111100110, 31, 23), f(0b00, 21, 20), fp_rencode(Dd, false, 12, 22);
1943
fp_rencode(Dn, false, 16, 7), f(0b0001, 11, 8), fp_rencode(Dm, false, 0, 5), f(q, 6), f(1, 4);
1944
}
1945
1946
void Assembler::simd_vmul(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm,
1947
unsigned bit24, unsigned bit9, unsigned size, unsigned mul, unsigned bit6) {
1948
starti;
1949
assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1950
f(0b1111001, 31, 25), f(bit24, 24), f(size, 21, 20), fp_rencode(Dd, false, 12, 22);
1951
f(mul^1, 23), fp_rencode(Dn, false, 16, 7), f(1, 11), f(mul^1, 10), f(bit9, 9);
1952
f(mul, 8), f(bit6, 6), f(mul, 4), fp_rencode(Dm, false, 0, 5);
1953
}
1954
1955
void Assembler::simd_vuzp(FloatRegister Dd, FloatRegister Dm, unsigned size, unsigned q) {
1956
starti;
1957
assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1958
assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
1959
f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20), f(size, 19, 18);
1960
f(0b10, 17, 16), f(0b00010, 11, 7), f(q, 6), f(0, 4), fp_rencode(Dm, false, 0, 5);
1961
}
1962
1963
void Assembler::simd_vshl(FloatRegister Dd, FloatRegister Dm, unsigned imm, unsigned size,
1964
unsigned q, unsigned bit24, unsigned encode) {
1965
starti;
1966
assert(VM_Version::features() & FT_AdvSIMD, "SIMD coprocessor required");
1967
assert(imm < (1u << size), "Shift is too big");
1968
assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
1969
f(0b1111001, 31, 25), f(bit24, 24), f(1, 23), fp_rencode(Dd, false, 12, 22);
1970
f(((1u << size) | imm) & 0b111111, 21, 16), f(size == 6 ? 1 : 0, 7), f(q, 6);
1971
f(encode, 11, 8), fp_rencode(Dm, false, 0, 5), f(1, 4);
1972
}
1973
1974
void Assembler::simd_rev(FloatRegister Dd, FloatRegister Dm, unsigned q, unsigned size,
1975
unsigned op) {
1976
starti;
1977
assert(!q || ((Dd->encoding() & 2) == 0 && (Dm->encoding() & 2) == 0), "Odd registers");
1978
f(0b111100111, 31, 23), fp_rencode(Dd, false, 12, 22), f(0b11, 21, 20);
1979
f(size, 19, 18), f(0b00, 17, 16), f(0b000, 11, 9), f(op, 8, 7);
1980
f(q, 6), fp_rencode(Dm, false, 0, 5), f(0, 4);
1981
}
1982
1983
void Assembler::v8_crc32(Register Rd, Register Rn, Register Rm, unsigned size, Condition cond) {
1984
starti;
1985
assert(VM_Version::features() & FT_CRC32, "Instruction is not supported by CPU");
1986
f(cond, 31, 28), f(0b00010, 27, 23), f(size, 22, 21), f(0, 20), rf(Rn, 16), rf(Rd, 12);
1987
f(0b00000100, 11, 4), rf(Rm, 0);
1988
}
1989
1990
#undef starti
1991
1992