Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
26
#define CPU_X86_VM_FRAME_X86_INLINE_HPP
27
28
#include "code/codeCache.hpp"
29
30
// Inline functions for Intel frames:
31
32
// Constructors:
33
34
inline frame::frame() {
35
_pc = NULL;
36
_sp = NULL;
37
_unextended_sp = NULL;
38
_fp = NULL;
39
_cb = NULL;
40
_deopt_state = unknown;
41
}
42
43
inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
44
_sp = sp;
45
_unextended_sp = sp;
46
_fp = fp;
47
_pc = pc;
48
assert(pc != NULL, "no pc?");
49
_cb = CodeCache::find_blob(pc);
50
adjust_unextended_sp();
51
52
address original_pc = nmethod::get_deopt_original_pc(this);
53
if (original_pc != NULL) {
54
_pc = original_pc;
55
_deopt_state = is_deoptimized;
56
} else {
57
_deopt_state = not_deoptimized;
58
}
59
}
60
61
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
62
init(sp, fp, pc);
63
}
64
65
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
66
_sp = sp;
67
_unextended_sp = unextended_sp;
68
_fp = fp;
69
_pc = pc;
70
assert(pc != NULL, "no pc?");
71
_cb = CodeCache::find_blob(pc);
72
adjust_unextended_sp();
73
74
address original_pc = nmethod::get_deopt_original_pc(this);
75
if (original_pc != NULL) {
76
_pc = original_pc;
77
assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
78
_deopt_state = is_deoptimized;
79
} else {
80
_deopt_state = not_deoptimized;
81
}
82
}
83
84
inline frame::frame(intptr_t* sp, intptr_t* fp) {
85
_sp = sp;
86
_unextended_sp = sp;
87
_fp = fp;
88
_pc = (address)(sp[-1]);
89
90
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
91
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
92
// unlucky the junk value could be to a zombied method and we'll die on the
93
// find_blob call. This is also why we can have no asserts on the validity
94
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
95
// -> pd_last_frame should use a specialized version of pd_last_frame which could
96
// call a specialized frame constructor instead of this one.
97
// Then we could use the assert below. However this assert is of somewhat dubious
98
// value.
99
// UPDATE: this constructor is only used by trace_method_handle_stub() now.
100
// assert(_pc != NULL, "no pc?");
101
102
_cb = CodeCache::find_blob(_pc);
103
adjust_unextended_sp();
104
105
address original_pc = nmethod::get_deopt_original_pc(this);
106
if (original_pc != NULL) {
107
_pc = original_pc;
108
_deopt_state = is_deoptimized;
109
} else {
110
_deopt_state = not_deoptimized;
111
}
112
}
113
114
// Accessors
115
116
inline bool frame::equal(frame other) const {
117
bool ret = sp() == other.sp()
118
&& unextended_sp() == other.unextended_sp()
119
&& fp() == other.fp()
120
&& pc() == other.pc();
121
assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
122
return ret;
123
}
124
125
// Return unique id for this frame. The id must have a value where we can distinguish
126
// identity and younger/older relationship. NULL represents an invalid (incomparable)
127
// frame.
128
inline intptr_t* frame::id(void) const { return unextended_sp(); }
129
130
// Relationals on frames based
131
// Return true if the frame is younger (more recent activation) than the frame represented by id
132
inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
133
return this->id() < id ; }
134
135
// Return true if the frame is older (less recent activation) than the frame represented by id
136
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
137
return this->id() > id ; }
138
139
140
141
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
142
inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
143
144
145
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
146
147
// Return address:
148
149
inline address* frame::sender_pc_addr() const { return (address*) addr_at( return_addr_offset); }
150
inline address frame::sender_pc() const { return *sender_pc_addr(); }
151
152
// return address of param, zero origin index.
153
inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }
154
155
#ifdef CC_INTERP
156
157
inline interpreterState frame::get_interpreterState() const {
158
return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
159
}
160
161
inline intptr_t* frame::sender_sp() const {
162
// Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
163
if (is_interpreted_frame()) {
164
assert(false, "should never happen");
165
return get_interpreterState()->sender_sp();
166
} else {
167
return addr_at(sender_sp_offset);
168
}
169
}
170
171
inline intptr_t** frame::interpreter_frame_locals_addr() const {
172
assert(is_interpreted_frame(), "must be interpreted");
173
return &(get_interpreterState()->_locals);
174
}
175
176
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
177
assert(is_interpreted_frame(), "must be interpreted");
178
return (intptr_t*) &(get_interpreterState()->_bcp);
179
}
180
181
182
// Constant pool cache
183
184
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
185
assert(is_interpreted_frame(), "must be interpreted");
186
return &(get_interpreterState()->_constants);
187
}
188
189
// Method
190
191
inline Method** frame::interpreter_frame_method_addr() const {
192
assert(is_interpreted_frame(), "must be interpreted");
193
return &(get_interpreterState()->_method);
194
}
195
196
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
197
assert(is_interpreted_frame(), "must be interpreted");
198
return (intptr_t*) &(get_interpreterState()->_mdx);
199
}
200
201
// top of expression stack
202
inline intptr_t* frame::interpreter_frame_tos_address() const {
203
assert(is_interpreted_frame(), "wrong frame type");
204
return get_interpreterState()->_stack + 1;
205
}
206
207
#else /* asm interpreter */
208
inline intptr_t* frame::sender_sp() const { return addr_at( sender_sp_offset); }
209
210
inline intptr_t** frame::interpreter_frame_locals_addr() const {
211
return (intptr_t**)addr_at(interpreter_frame_locals_offset);
212
}
213
214
inline intptr_t* frame::interpreter_frame_last_sp() const {
215
return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
216
}
217
218
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
219
return (intptr_t*)addr_at(interpreter_frame_bcx_offset);
220
}
221
222
223
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
224
return (intptr_t*)addr_at(interpreter_frame_mdx_offset);
225
}
226
227
228
229
// Constant pool cache
230
231
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
232
return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
233
}
234
235
// Method
236
237
inline Method** frame::interpreter_frame_method_addr() const {
238
return (Method**)addr_at(interpreter_frame_method_offset);
239
}
240
241
// top of expression stack
242
inline intptr_t* frame::interpreter_frame_tos_address() const {
243
intptr_t* last_sp = interpreter_frame_last_sp();
244
if (last_sp == NULL) {
245
return sp();
246
} else {
247
// sp() may have been extended or shrunk by an adapter. At least
248
// check that we don't fall behind the legal region.
249
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
250
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
251
return last_sp;
252
}
253
}
254
255
inline oop* frame::interpreter_frame_temp_oop_addr() const {
256
return (oop *)(fp() + interpreter_frame_oop_temp_offset);
257
}
258
259
#endif /* CC_INTERP */
260
261
inline int frame::pd_oop_map_offset_adjustment() const {
262
return 0;
263
}
264
265
inline int frame::interpreter_frame_monitor_size() {
266
return BasicObjectLock::size();
267
}
268
269
270
// expression stack
271
// (the max_stack arguments are used by the GC; see class FrameClosure)
272
273
inline intptr_t* frame::interpreter_frame_expression_stack() const {
274
intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
275
return monitor_end-1;
276
}
277
278
279
inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
280
281
282
// Entry frames
283
284
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
285
return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
286
}
287
288
// Compiled frames
289
290
inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
291
return (nof_args - local_index + (local_index < nof_args ? 1: -1));
292
}
293
294
inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
295
return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors);
296
}
297
298
inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
299
return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1);
300
}
301
302
inline bool frame::volatile_across_calls(Register reg) {
303
return true;
304
}
305
306
inline oop frame::saved_oop_result(RegisterMap* map) const {
307
oop* result_adr = (oop *)map->location(rax->as_VMReg());
308
guarantee(result_adr != NULL, "bad register save location");
309
310
return (*result_adr);
311
}
312
313
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
314
oop* result_adr = (oop *)map->location(rax->as_VMReg());
315
guarantee(result_adr != NULL, "bad register save location");
316
317
*result_adr = obj;
318
}
319
320
#endif // CPU_X86_VM_FRAME_X86_INLINE_HPP
321
322