Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/frame_aarch32.inline.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* Copyright (c) 2015, Linaro Ltd. All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#ifndef CPU_AARCH32_VM_FRAME_AARCH32_INLINE_HPP
28
#define CPU_AARCH32_VM_FRAME_AARCH32_INLINE_HPP
29
30
#include "code/codeCache.hpp"
31
32
// Inline functions for AArch64 frames:
33
34
// Constructors:
35
36
inline frame::frame() {
37
_pc = NULL;
38
_sp = NULL;
39
_unextended_sp = NULL;
40
_fp = NULL;
41
_cb = NULL;
42
_deopt_state = unknown;
43
}
44
45
static int spin;
46
47
inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
48
intptr_t a = intptr_t(sp);
49
intptr_t b = intptr_t(fp);
50
_sp = sp;
51
_unextended_sp = sp;
52
_fp = fp;
53
_pc = pc;
54
assert(pc != NULL, "no pc?");
55
_cb = CodeCache::find_blob(pc);
56
adjust_unextended_sp();
57
58
address original_pc = nmethod::get_deopt_original_pc(this);
59
if (original_pc != NULL) {
60
_pc = original_pc;
61
_deopt_state = is_deoptimized;
62
} else {
63
_deopt_state = not_deoptimized;
64
}
65
}
66
67
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
68
init(sp, fp, pc);
69
}
70
71
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
72
intptr_t a = intptr_t(sp);
73
intptr_t b = intptr_t(fp);
74
_sp = sp;
75
_unextended_sp = unextended_sp;
76
_fp = fp;
77
_pc = pc;
78
assert(pc != NULL, "no pc?");
79
_cb = CodeCache::find_blob(pc);
80
adjust_unextended_sp();
81
82
address original_pc = nmethod::get_deopt_original_pc(this);
83
if (original_pc != NULL) {
84
_pc = original_pc;
85
assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
86
_deopt_state = is_deoptimized;
87
} else {
88
_deopt_state = not_deoptimized;
89
}
90
}
91
92
inline frame::frame(intptr_t* sp, intptr_t* fp) {
93
intptr_t a = intptr_t(sp);
94
intptr_t b = intptr_t(fp);
95
_sp = sp;
96
_unextended_sp = sp;
97
_fp = fp;
98
_pc = (address)(fp[0]);
99
100
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
101
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
102
// unlucky the junk value could be to a zombied method and we'll die on the
103
// find_blob call. This is also why we can have no asserts on the validity
104
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
105
// -> pd_last_frame should use a specialized version of pd_last_frame which could
106
// call a specilaized frame constructor instead of this one.
107
// Then we could use the assert below. However this assert is of somewhat dubious
108
// value.
109
// assert(_pc != NULL, "no pc?");
110
111
_cb = CodeCache::find_blob(_pc);
112
adjust_unextended_sp();
113
114
address original_pc = nmethod::get_deopt_original_pc(this);
115
if (original_pc != NULL) {
116
_pc = original_pc;
117
_deopt_state = is_deoptimized;
118
} else {
119
_deopt_state = not_deoptimized;
120
}
121
}
122
123
// Accessors
124
125
inline bool frame::equal(frame other) const {
126
bool ret = sp() == other.sp()
127
&& unextended_sp() == other.unextended_sp()
128
&& fp() == other.fp()
129
&& pc() == other.pc();
130
assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
131
return ret;
132
}
133
134
// Return unique id for this frame. The id must have a value where we can distinguish
135
// identity and younger/older relationship. NULL represents an invalid (incomparable)
136
// frame.
137
inline intptr_t* frame::id(void) const { return unextended_sp(); }
138
139
// Relationals on frames based
140
// Return true if the frame is younger (more recent activation) than the frame represented by id
141
inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
142
return this->id() < id ; }
143
144
// Return true if the frame is older (less recent activation) than the frame represented by id
145
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
146
return this->id() > id ; }
147
148
149
150
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
151
152
153
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
154
155
// Return address:
156
157
inline address* frame::sender_pc_addr() const { return (address*) addr_at( return_addr_offset); }
158
inline address frame::sender_pc() const { return *sender_pc_addr(); }
159
160
#ifdef CC_INTERP
161
162
inline interpreterState frame::get_interpreterState() const {
163
return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
164
}
165
166
inline intptr_t* frame::sender_sp() const {
167
// Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
168
if (is_interpreted_frame()) {
169
assert(false, "should never happen");
170
return get_interpreterState()->sender_sp();
171
} else {
172
return addr_at(sender_sp_offset);
173
}
174
}
175
176
inline intptr_t** frame::interpreter_frame_locals_addr() const {
177
assert(is_interpreted_frame(), "must be interpreted");
178
return &(get_interpreterState()->_locals);
179
}
180
181
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
182
assert(is_interpreted_frame(), "must be interpreted");
183
return (intptr_t*) &(get_interpreterState()->_bcp);
184
}
185
186
187
// Constant pool cache
188
189
inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const {
190
assert(is_interpreted_frame(), "must be interpreted");
191
return &(get_interpreterState()->_constants);
192
}
193
194
// Method
195
196
inline methodOop* frame::interpreter_frame_method_addr() const {
197
assert(is_interpreted_frame(), "must be interpreted");
198
return &(get_interpreterState()->_method);
199
}
200
201
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
202
assert(is_interpreted_frame(), "must be interpreted");
203
return (intptr_t*) &(get_interpreterState()->_mdx);
204
}
205
206
// top of expression stack
207
inline intptr_t* frame::interpreter_frame_tos_address() const {
208
assert(is_interpreted_frame(), "wrong frame type");
209
return get_interpreterState()->_stack + 1;
210
}
211
212
#else /* asm interpreter */
213
inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); }
214
215
inline intptr_t** frame::interpreter_frame_locals_addr() const {
216
return (intptr_t**)addr_at(interpreter_frame_locals_offset);
217
}
218
219
inline intptr_t* frame::interpreter_frame_last_sp() const {
220
return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
221
}
222
223
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
224
return (intptr_t*) addr_at(interpreter_frame_bcx_offset);
225
}
226
227
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
228
return (intptr_t*) addr_at(interpreter_frame_mdx_offset);
229
}
230
231
232
// Constant pool cache
233
234
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
235
return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
236
}
237
238
// Method
239
240
inline Method** frame::interpreter_frame_method_addr() const {
241
return (Method**)addr_at(interpreter_frame_method_offset);
242
}
243
244
// top of expression stack
245
inline intptr_t* frame::interpreter_frame_tos_address() const {
246
intptr_t* last_sp = interpreter_frame_last_sp();
247
if (last_sp == NULL) {
248
return sp();
249
} else {
250
// sp() may have been extended or shrunk by an adapter. At least
251
// check that we don't fall behind the legal region.
252
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
253
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
254
return last_sp;
255
}
256
}
257
258
inline oop* frame::interpreter_frame_temp_oop_addr() const {
259
return (oop *)(fp() + interpreter_frame_oop_temp_offset);
260
}
261
262
#endif /* CC_INTERP */
263
264
inline int frame::pd_oop_map_offset_adjustment() const {
265
return 0;
266
}
267
268
inline int frame::interpreter_frame_monitor_size() {
269
return BasicObjectLock::size();
270
}
271
272
273
// expression stack
274
// (the max_stack arguments are used by the GC; see class FrameClosure)
275
276
inline intptr_t* frame::interpreter_frame_expression_stack() const {
277
intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
278
return monitor_end-1;
279
}
280
281
282
inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
283
284
285
// Entry frames
286
287
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
288
return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
289
}
290
291
292
// Compiled frames
293
294
inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
295
return (nof_args - local_index + (local_index < nof_args ? 1: -1));
296
}
297
298
inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
299
return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors);
300
}
301
302
inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
303
return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1);
304
}
305
306
inline bool frame::volatile_across_calls(Register reg) {
307
return true;
308
}
309
310
311
312
inline oop frame::saved_oop_result(RegisterMap* map) const {
313
oop* result_adr = (oop *)map->location(r0->as_VMReg());
314
guarantee(result_adr != NULL, "bad register save location");
315
316
return (*result_adr);
317
}
318
319
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
320
oop* result_adr = (oop *)map->location(r0->as_VMReg());
321
guarantee(result_adr != NULL, "bad register save location");
322
323
*result_adr = obj;
324
}
325
326
#endif // CPU_AARCH32_VM_FRAME_AARCH32_INLINE_HPP
327
328