Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/asm/assembler.cpp
32285 views
1
/*
2
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "asm/codeBuffer.hpp"
29
#include "runtime/atomic.hpp"
30
#include "runtime/atomic.inline.hpp"
31
#include "runtime/icache.hpp"
32
#include "runtime/os.hpp"
33
34
35
// Implementation of AbstractAssembler
36
//
37
// The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
38
// the assembler keeps a copy of the code buffers boundaries & modifies them when
39
// emitting bytes rather than using the code buffers accessor functions all the time.
40
// The code buffer is updated via set_code_end(...) after emitting a whole instruction.
41
42
AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
43
if (code == NULL) return;
44
CodeSection* cs = code->insts();
45
cs->clear_mark(); // new assembler kills old mark
46
if (cs->start() == NULL) {
47
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s",
48
code->name()));
49
}
50
_code_section = cs;
51
_oop_recorder= code->oop_recorder();
52
DEBUG_ONLY( _short_branch_delta = 0; )
53
}
54
55
void AbstractAssembler::set_code_section(CodeSection* cs) {
56
assert(cs->outer() == code_section()->outer(), "sanity");
57
assert(cs->is_allocated(), "need to pre-allocate this section");
58
cs->clear_mark(); // new assembly into this section kills old mark
59
_code_section = cs;
60
}
61
62
// Inform CodeBuffer that incoming code and relocation will be for stubs
63
address AbstractAssembler::start_a_stub(int required_space) {
64
CodeBuffer* cb = code();
65
CodeSection* cs = cb->stubs();
66
assert(_code_section == cb->insts(), "not in insts?");
67
if (cs->maybe_expand_to_ensure_remaining(required_space)
68
&& cb->blob() == NULL) {
69
return NULL;
70
}
71
set_code_section(cs);
72
return pc();
73
}
74
75
// Inform CodeBuffer that incoming code and relocation will be code
76
// Should not be called if start_a_stub() returned NULL
77
void AbstractAssembler::end_a_stub() {
78
assert(_code_section == code()->stubs(), "not in stubs?");
79
set_code_section(code()->insts());
80
}
81
82
// Inform CodeBuffer that incoming code and relocation will be for stubs
83
address AbstractAssembler::start_a_const(int required_space, int required_align) {
84
CodeBuffer* cb = code();
85
CodeSection* cs = cb->consts();
86
assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?");
87
address end = cs->end();
88
int pad = -(intptr_t)end & (required_align-1);
89
if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
90
if (cb->blob() == NULL) return NULL;
91
end = cs->end(); // refresh pointer
92
}
93
if (pad > 0) {
94
while (--pad >= 0) { *end++ = 0; }
95
cs->set_end(end);
96
}
97
set_code_section(cs);
98
return end;
99
}
100
101
// Inform CodeBuffer that incoming code and relocation will be code
102
// in section cs (insts or stubs).
103
void AbstractAssembler::end_a_const(CodeSection* cs) {
104
assert(_code_section == code()->consts(), "not in consts?");
105
set_code_section(cs);
106
}
107
108
void AbstractAssembler::flush() {
109
ICache::invalidate_range(addr_at(0), offset());
110
}
111
112
void AbstractAssembler::bind(Label& L) {
113
if (L.is_bound()) {
114
// Assembler can bind a label more than once to the same place.
115
guarantee(L.loc() == locator(), "attempt to redefine label");
116
return;
117
}
118
L.bind_loc(locator());
119
L.patch_instructions((MacroAssembler*)this);
120
}
121
122
void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) {
123
if (UseStackBanging) {
124
// Each code entry causes one stack bang n pages down the stack where n
125
// is configurable by StackShadowPages. The setting depends on the maximum
126
// depth of VM call stack or native before going back into java code,
127
// since only java code can raise a stack overflow exception using the
128
// stack banging mechanism. The VM and native code does not detect stack
129
// overflow.
130
// The code in JavaCalls::call() checks that there is at least n pages
131
// available, so all entry code needs to do is bang once for the end of
132
// this shadow zone.
133
// The entry code may need to bang additional pages if the framesize
134
// is greater than a page.
135
136
const int page_size = os::vm_page_size();
137
int bang_end = StackShadowPages*page_size;
138
139
// This is how far the previous frame's stack banging extended.
140
const int bang_end_safe = bang_end;
141
142
if (frame_size_in_bytes > page_size) {
143
bang_end += frame_size_in_bytes;
144
}
145
146
int bang_offset = bang_end_safe;
147
while (bang_offset <= bang_end) {
148
// Need at least one stack bang at end of shadow zone.
149
bang_stack_with_offset(bang_offset);
150
bang_offset += page_size;
151
}
152
} // end (UseStackBanging)
153
}
154
155
void Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
156
assert(_loc == -1, "Label is unbound");
157
if (_patch_index < PatchCacheSize) {
158
_patches[_patch_index] = branch_loc;
159
} else {
160
if (_patch_overflow == NULL) {
161
_patch_overflow = cb->create_patch_overflow();
162
}
163
_patch_overflow->push(branch_loc);
164
}
165
++_patch_index;
166
}
167
168
void Label::patch_instructions(MacroAssembler* masm) {
169
assert(is_bound(), "Label is bound");
170
CodeBuffer* cb = masm->code();
171
int target_sect = CodeBuffer::locator_sect(loc());
172
address target = cb->locator_address(loc());
173
while (_patch_index > 0) {
174
--_patch_index;
175
int branch_loc;
176
if (_patch_index >= PatchCacheSize) {
177
branch_loc = _patch_overflow->pop();
178
} else {
179
branch_loc = _patches[_patch_index];
180
}
181
int branch_sect = CodeBuffer::locator_sect(branch_loc);
182
address branch = cb->locator_address(branch_loc);
183
if (branch_sect == CodeBuffer::SECT_CONSTS) {
184
// The thing to patch is a constant word.
185
*(address*)branch = target;
186
continue;
187
}
188
189
#ifdef ASSERT
190
// Cross-section branches only work if the
191
// intermediate section boundaries are frozen.
192
if (target_sect != branch_sect) {
193
for (int n = MIN2(target_sect, branch_sect),
194
nlimit = (target_sect + branch_sect) - n;
195
n < nlimit; n++) {
196
CodeSection* cs = cb->code_section(n);
197
assert(cs->is_frozen(), "cross-section branch needs stable offsets");
198
}
199
}
200
#endif //ASSERT
201
202
// Push the target offset into the branch instruction.
203
masm->pd_patch_instruction(branch, target);
204
}
205
}
206
207
struct DelayedConstant {
208
typedef void (*value_fn_t)();
209
BasicType type;
210
intptr_t value;
211
value_fn_t value_fn;
212
// This limit of 20 is generous for initial uses.
213
// The limit needs to be large enough to store the field offsets
214
// into classes which do not have statically fixed layouts.
215
// (Initial use is for method handle object offsets.)
216
// Look for uses of "delayed_value" in the source code
217
// and make sure this number is generous enough to handle all of them.
218
enum { DC_LIMIT = 20 };
219
static DelayedConstant delayed_constants[DC_LIMIT];
220
static DelayedConstant* add(BasicType type, value_fn_t value_fn);
221
bool match(BasicType t, value_fn_t cfn) {
222
return type == t && value_fn == cfn;
223
}
224
static void update_all();
225
};
226
227
DelayedConstant DelayedConstant::delayed_constants[DC_LIMIT];
228
// Default C structure initialization rules have the following effect here:
229
// = { { (BasicType)0, (intptr_t)NULL }, ... };
230
231
DelayedConstant* DelayedConstant::add(BasicType type,
232
DelayedConstant::value_fn_t cfn) {
233
for (int i = 0; i < DC_LIMIT; i++) {
234
DelayedConstant* dcon = &delayed_constants[i];
235
if (dcon->match(type, cfn))
236
return dcon;
237
if (dcon->value_fn == NULL) {
238
// (cmpxchg not because this is multi-threaded but because I'm paranoid)
239
if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
240
dcon->type = type;
241
return dcon;
242
}
243
}
244
}
245
// If this assert is hit (in pre-integration testing!) then re-evaluate
246
// the comment on the definition of DC_LIMIT.
247
guarantee(false, "too many delayed constants");
248
return NULL;
249
}
250
251
void DelayedConstant::update_all() {
252
for (int i = 0; i < DC_LIMIT; i++) {
253
DelayedConstant* dcon = &delayed_constants[i];
254
if (dcon->value_fn != NULL && dcon->value == 0) {
255
typedef int (*int_fn_t)();
256
typedef address (*address_fn_t)();
257
switch (dcon->type) {
258
case T_INT: dcon->value = (intptr_t) ((int_fn_t) dcon->value_fn)(); break;
259
case T_ADDRESS: dcon->value = (intptr_t) ((address_fn_t)dcon->value_fn)(); break;
260
}
261
}
262
}
263
}
264
265
RegisterOrConstant AbstractAssembler::delayed_value(int(*value_fn)(), Register tmp, int offset) {
266
intptr_t val = (intptr_t) (*value_fn)();
267
if (val != 0) return val + offset;
268
return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset);
269
}
270
RegisterOrConstant AbstractAssembler::delayed_value(address(*value_fn)(), Register tmp, int offset) {
271
intptr_t val = (intptr_t) (*value_fn)();
272
if (val != 0) return val + offset;
273
return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset);
274
}
275
intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) {
276
DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn);
277
return &dcon->value;
278
}
279
intptr_t* AbstractAssembler::delayed_value_addr(address(*value_fn)()) {
280
DelayedConstant* dcon = DelayedConstant::add(T_ADDRESS, (DelayedConstant::value_fn_t) value_fn);
281
return &dcon->value;
282
}
283
void AbstractAssembler::update_delayed_values() {
284
DelayedConstant::update_all();
285
}
286
287
void AbstractAssembler::block_comment(const char* comment) {
288
if (sect() == CodeBuffer::SECT_INSTS) {
289
code_section()->outer()->block_comment(offset(), comment);
290
}
291
}
292
293
const char* AbstractAssembler::code_string(const char* str) {
294
if (sect() == CodeBuffer::SECT_INSTS || sect() == CodeBuffer::SECT_STUBS) {
295
return code_section()->outer()->code_string(str);
296
}
297
return NULL;
298
}
299
300
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
301
// Exception handler checks the nmethod's implicit null checks table
302
// only when this method returns false.
303
#ifdef _LP64
304
if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
305
assert (Universe::heap() != NULL, "java heap should be initialized");
306
// The first page after heap_base is unmapped and
307
// the 'offset' is equal to [heap_base + offset] for
308
// narrow oop implicit null checks.
309
uintptr_t base = (uintptr_t)Universe::narrow_oop_base();
310
if ((uintptr_t)offset >= base) {
311
// Normalize offset for the next check.
312
offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1));
313
}
314
}
315
#endif
316
return offset < 0 || os::vm_page_size() <= offset;
317
}
318
319