Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/asm/assembler.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_ASM_ASSEMBLER_HPP
26
#define SHARE_VM_ASM_ASSEMBLER_HPP
27
28
#include "asm/codeBuffer.hpp"
29
#include "code/oopRecorder.hpp"
30
#include "code/relocInfo.hpp"
31
#include "memory/allocation.hpp"
32
#include "utilities/debug.hpp"
33
#include "utilities/growableArray.hpp"
34
#include "utilities/top.hpp"
35
36
#ifdef TARGET_ARCH_x86
37
# include "register_x86.hpp"
38
# include "vm_version_x86.hpp"
39
#endif
40
#ifdef TARGET_ARCH_sparc
41
# include "register_sparc.hpp"
42
# include "vm_version_sparc.hpp"
43
#endif
44
#ifdef TARGET_ARCH_zero
45
# include "register_zero.hpp"
46
# include "vm_version_zero.hpp"
47
#endif
48
#ifdef TARGET_ARCH_arm
49
# include "register_arm.hpp"
50
# include "vm_version_arm.hpp"
51
#endif
52
#ifdef TARGET_ARCH_ppc
53
# include "register_ppc.hpp"
54
# include "vm_version_ppc.hpp"
55
#endif
56
#ifdef TARGET_ARCH_aarch32
57
# include "register_aarch32.hpp"
58
# include "vm_version_aarch32.hpp"
59
#endif
60
#ifdef TARGET_ARCH_aarch64
61
# include "register_aarch64.hpp"
62
# include "vm_version_aarch64.hpp"
63
#endif
64
65
// This file contains platform-independent assembler declarations.
66
67
class MacroAssembler;
68
class AbstractAssembler;
69
class Label;
70
71
/**
72
* Labels represent destinations for control transfer instructions. Such
73
* instructions can accept a Label as their target argument. A Label is
74
* bound to the current location in the code stream by calling the
75
* MacroAssembler's 'bind' method, which in turn calls the Label's 'bind'
76
* method. A Label may be referenced by an instruction before it's bound
77
* (i.e., 'forward referenced'). 'bind' stores the current code offset
78
* in the Label object.
79
*
80
* If an instruction references a bound Label, the offset field(s) within
81
* the instruction are immediately filled in based on the Label's code
82
* offset. If an instruction references an unbound label, that
83
* instruction is put on a list of instructions that must be patched
84
* (i.e., 'resolved') when the Label is bound.
85
*
86
* 'bind' will call the platform-specific 'patch_instruction' method to
87
* fill in the offset field(s) for each unresolved instruction (if there
88
* are any). 'patch_instruction' lives in one of the
89
* cpu/<arch>/vm/assembler_<arch>* files.
90
*
91
* Instead of using a linked list of unresolved instructions, a Label has
92
* an array of unresolved instruction code offsets. _patch_index
93
* contains the total number of forward references. If the Label's array
94
* overflows (i.e., _patch_index grows larger than the array size), a
95
* GrowableArray is allocated to hold the remaining offsets. (The cache
96
* size is 4 for now, which handles over 99.5% of the cases)
97
*
98
* Labels may only be used within a single CodeSection. If you need
99
* to create references between code sections, use explicit relocations.
100
*/
101
class Label VALUE_OBJ_CLASS_SPEC {
102
private:
103
enum { PatchCacheSize = 4 };
104
105
// _loc encodes both the binding state (via its sign)
106
// and the binding locator (via its value) of a label.
107
//
108
// _loc >= 0 bound label, loc() encodes the target (jump) position
109
// _loc == -1 unbound label
110
int _loc;
111
112
// References to instructions that jump to this unresolved label.
113
// These instructions need to be patched when the label is bound
114
// using the platform-specific patchInstruction() method.
115
//
116
// To avoid having to allocate from the C-heap each time, we provide
117
// a local cache and use the overflow only if we exceed the local cache
118
int _patches[PatchCacheSize];
119
int _patch_index;
120
GrowableArray<int>* _patch_overflow;
121
122
Label(const Label&) { ShouldNotReachHere(); }
123
124
public:
125
126
/**
127
* After binding, be sure 'patch_instructions' is called later to link
128
*/
129
void bind_loc(int loc) {
130
assert(loc >= 0, "illegal locator");
131
assert(_loc == -1, "already bound");
132
_loc = loc;
133
}
134
void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
135
136
#ifndef PRODUCT
137
// Iterates over all unresolved instructions for printing
138
void print_instructions(MacroAssembler* masm) const;
139
#endif // PRODUCT
140
141
/**
142
* Returns the position of the the Label in the code buffer
143
* The position is a 'locator', which encodes both offset and section.
144
*/
145
int loc() const {
146
assert(_loc >= 0, "unbound label");
147
return _loc;
148
}
149
int loc_pos() const { return CodeBuffer::locator_pos(loc()); }
150
int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
151
152
bool is_bound() const { return _loc >= 0; }
153
bool is_unbound() const { return _loc == -1 && _patch_index > 0; }
154
bool is_unused() const { return _loc == -1 && _patch_index == 0; }
155
156
/**
157
* Adds a reference to an unresolved displacement instruction to
158
* this unbound label
159
*
160
* @param cb the code buffer being patched
161
* @param branch_loc the locator of the branch instruction in the code buffer
162
*/
163
void add_patch_at(CodeBuffer* cb, int branch_loc);
164
165
/**
166
* Iterate over the list of patches, resolving the instructions
167
* Call patch_instruction on each 'branch_loc' value
168
*/
169
void patch_instructions(MacroAssembler* masm);
170
171
void init() {
172
_loc = -1;
173
_patch_index = 0;
174
_patch_overflow = NULL;
175
}
176
177
Label() {
178
init();
179
}
180
181
~Label() {
182
assert(is_bound() || is_unused(), "Label was never bound to a location, but it was used as a jmp target");
183
}
184
185
void reset() {
186
init(); //leave _patch_overflow because it points to CodeBuffer.
187
}
188
};
189
190
// A union type for code which has to assemble both constant and
191
// non-constant operands, when the distinction cannot be made
192
// statically.
193
class RegisterOrConstant VALUE_OBJ_CLASS_SPEC {
194
private:
195
Register _r;
196
intptr_t _c;
197
198
public:
199
RegisterOrConstant(): _r(noreg), _c(0) {}
200
RegisterOrConstant(Register r): _r(r), _c(0) {}
201
RegisterOrConstant(intptr_t c): _r(noreg), _c(c) {}
202
203
Register as_register() const { assert(is_register(),""); return _r; }
204
intptr_t as_constant() const { assert(is_constant(),""); return _c; }
205
206
Register register_or_noreg() const { return _r; }
207
intptr_t constant_or_zero() const { return _c; }
208
209
bool is_register() const { return _r != noreg; }
210
bool is_constant() const { return _r == noreg; }
211
};
212
213
// The Abstract Assembler: Pure assembler doing NO optimizations on the
214
// instruction level; i.e., what you write is what you get.
215
// The Assembler is generating code into a CodeBuffer.
216
class AbstractAssembler : public ResourceObj {
217
friend class Label;
218
219
protected:
220
CodeSection* _code_section; // section within the code buffer
221
OopRecorder* _oop_recorder; // support for relocInfo::oop_type
222
223
public:
224
// Code emission & accessing
225
address addr_at(int pos) const { return code_section()->start() + pos; }
226
227
protected:
228
// This routine is called with a label is used for an address.
229
// Labels and displacements truck in offsets, but target must return a PC.
230
address target(Label& L) { return code_section()->target(L, pc()); }
231
232
bool is8bit(int x) const { return -0x80 <= x && x < 0x80; }
233
bool isByte(int x) const { return 0 <= x && x < 0x100; }
234
bool isShiftCount(int x) const { return 0 <= x && x < 32; }
235
236
// Instruction boundaries (required when emitting relocatable values).
237
class InstructionMark: public StackObj {
238
private:
239
AbstractAssembler* _assm;
240
241
public:
242
InstructionMark(AbstractAssembler* assm) : _assm(assm) {
243
assert(assm->inst_mark() == NULL, "overlapping instructions");
244
_assm->set_inst_mark();
245
}
246
~InstructionMark() {
247
_assm->clear_inst_mark();
248
}
249
};
250
friend class InstructionMark;
251
#ifdef ASSERT
252
// Make it return true on platforms which need to verify
253
// instruction boundaries for some operations.
254
static bool pd_check_instruction_mark();
255
256
// Add delta to short branch distance to verify that it still fit into imm8.
257
int _short_branch_delta;
258
259
int short_branch_delta() const { return _short_branch_delta; }
260
void set_short_branch_delta() { _short_branch_delta = 32; }
261
void clear_short_branch_delta() { _short_branch_delta = 0; }
262
263
class ShortBranchVerifier: public StackObj {
264
private:
265
AbstractAssembler* _assm;
266
267
public:
268
ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
269
assert(assm->short_branch_delta() == 0, "overlapping instructions");
270
_assm->set_short_branch_delta();
271
}
272
~ShortBranchVerifier() {
273
_assm->clear_short_branch_delta();
274
}
275
};
276
#else
277
// Dummy in product.
278
class ShortBranchVerifier: public StackObj {
279
public:
280
ShortBranchVerifier(AbstractAssembler* assm) {}
281
};
282
#endif
283
284
public:
285
286
// Creation
287
AbstractAssembler(CodeBuffer* code);
288
289
// ensure buf contains all code (call this before using/copying the code)
290
void flush();
291
292
void emit_int8( int8_t x) { code_section()->emit_int8( x); }
293
void emit_int16( int16_t x) { code_section()->emit_int16( x); }
294
void emit_int32( int32_t x) { code_section()->emit_int32( x); }
295
void emit_int64( int64_t x) { code_section()->emit_int64( x); }
296
297
void emit_float( jfloat x) { code_section()->emit_float( x); }
298
void emit_double( jdouble x) { code_section()->emit_double( x); }
299
void emit_address(address x) { code_section()->emit_address(x); }
300
301
// min and max values for signed immediate ranges
302
static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1)) ; }
303
static int max_simm(int nbits) { return (intptr_t(1) << (nbits - 1)) - 1; }
304
305
// Define some:
306
static int min_simm10() { return min_simm(10); }
307
static int min_simm13() { return min_simm(13); }
308
static int min_simm16() { return min_simm(16); }
309
310
// Test if x is within signed immediate range for nbits
311
static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
312
313
// Define some:
314
static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
315
static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
316
static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
317
static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
318
static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
319
static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
320
static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
321
static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
322
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
323
324
// Accessors
325
CodeSection* code_section() const { return _code_section; }
326
CodeBuffer* code() const { return code_section()->outer(); }
327
int sect() const { return code_section()->index(); }
328
address pc() const { return code_section()->end(); }
329
int offset() const { return code_section()->size(); }
330
int locator() const { return CodeBuffer::locator(offset(), sect()); }
331
332
OopRecorder* oop_recorder() const { return _oop_recorder; }
333
void set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
334
335
address inst_mark() const { return code_section()->mark(); }
336
void set_inst_mark() { code_section()->set_mark(); }
337
void clear_inst_mark() { code_section()->clear_mark(); }
338
339
// Constants in code
340
void relocate(RelocationHolder const& rspec, int format = 0) {
341
assert(!pd_check_instruction_mark()
342
|| inst_mark() == NULL || inst_mark() == code_section()->end(),
343
"call relocate() between instructions");
344
code_section()->relocate(code_section()->end(), rspec, format);
345
}
346
void relocate( relocInfo::relocType rtype, int format = 0) {
347
code_section()->relocate(code_section()->end(), rtype, format);
348
}
349
350
static int code_fill_byte(); // used to pad out odd-sized code buffers
351
352
// Associate a comment with the current offset. It will be printed
353
// along with the disassembly when printing nmethods. Currently
354
// only supported in the instruction section of the code buffer.
355
void block_comment(const char* comment);
356
// Copy str to a buffer that has the same lifetime as the CodeBuffer
357
const char* code_string(const char* str);
358
359
// Label functions
360
void bind(Label& L); // binds an unbound label L to the current code position
361
362
// Move to a different section in the same code buffer.
363
void set_code_section(CodeSection* cs);
364
365
// Inform assembler when generating stub code and relocation info
366
address start_a_stub(int required_space);
367
void end_a_stub();
368
// Ditto for constants.
369
address start_a_const(int required_space, int required_align = sizeof(double));
370
void end_a_const(CodeSection* cs); // Pass the codesection to continue in (insts or stubs?).
371
372
// constants support
373
//
374
// We must remember the code section (insts or stubs) in c1
375
// so we can reset to the proper section in end_a_const().
376
address long_constant(jlong c) {
377
CodeSection* c1 = _code_section;
378
address ptr = start_a_const(sizeof(c), sizeof(c));
379
if (ptr != NULL) {
380
emit_int64(c);
381
end_a_const(c1);
382
}
383
return ptr;
384
}
385
address double_constant(jdouble c) {
386
CodeSection* c1 = _code_section;
387
address ptr = start_a_const(sizeof(c), sizeof(c));
388
if (ptr != NULL) {
389
emit_double(c);
390
end_a_const(c1);
391
}
392
return ptr;
393
}
394
address float_constant(jfloat c) {
395
CodeSection* c1 = _code_section;
396
address ptr = start_a_const(sizeof(c), sizeof(c));
397
if (ptr != NULL) {
398
emit_float(c);
399
end_a_const(c1);
400
}
401
return ptr;
402
}
403
address address_constant(address c) {
404
CodeSection* c1 = _code_section;
405
address ptr = start_a_const(sizeof(c), sizeof(c));
406
if (ptr != NULL) {
407
emit_address(c);
408
end_a_const(c1);
409
}
410
return ptr;
411
}
412
address address_constant(address c, RelocationHolder const& rspec) {
413
CodeSection* c1 = _code_section;
414
address ptr = start_a_const(sizeof(c), sizeof(c));
415
if (ptr != NULL) {
416
relocate(rspec);
417
emit_address(c);
418
end_a_const(c1);
419
}
420
return ptr;
421
}
422
423
// Bootstrapping aid to cope with delayed determination of constants.
424
// Returns a static address which will eventually contain the constant.
425
// The value zero (NULL) stands instead of a constant which is still uncomputed.
426
// Thus, the eventual value of the constant must not be zero.
427
// This is fine, since this is designed for embedding object field
428
// offsets in code which must be generated before the object class is loaded.
429
// Field offsets are never zero, since an object's header (mark word)
430
// is located at offset zero.
431
RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0);
432
RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0);
433
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0;
434
// Last overloading is platform-dependent; look in assembler_<arch>.cpp.
435
static intptr_t* delayed_value_addr(int(*constant_fn)());
436
static intptr_t* delayed_value_addr(address(*constant_fn)());
437
static void update_delayed_values();
438
439
// Bang stack to trigger StackOverflowError at a safe location
440
// implementation delegates to machine-specific bang_stack_with_offset
441
void generate_stack_overflow_check( int frame_size_in_bytes );
442
virtual void bang_stack_with_offset(int offset) = 0;
443
444
445
/**
446
* A platform-dependent method to patch a jump instruction that refers
447
* to this label.
448
*
449
* @param branch the location of the instruction to patch
450
* @param masm the assembler which generated the branch
451
*/
452
void pd_patch_instruction(address branch, address target);
453
454
};
455
456
#ifdef TARGET_ARCH_x86
457
# include "assembler_x86.hpp"
458
#endif
459
#ifdef TARGET_ARCH_aarch32
460
# include "assembler_aarch32.hpp"
461
#endif
462
#ifdef TARGET_ARCH_aarch64
463
# include "assembler_aarch64.hpp"
464
#endif
465
#ifdef TARGET_ARCH_sparc
466
# include "assembler_sparc.hpp"
467
#endif
468
#ifdef TARGET_ARCH_zero
469
# include "assembler_zero.hpp"
470
#endif
471
#ifdef TARGET_ARCH_arm
472
# include "assembler_arm.hpp"
473
#endif
474
#ifdef TARGET_ARCH_ppc
475
# include "assembler_ppc.hpp"
476
#endif
477
478
479
#endif // SHARE_VM_ASM_ASSEMBLER_HPP
480
481