Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/ppc/vm/frame_ppc.cpp
83402 views
1
/*
2
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2012, 2017 SAP AG. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "interpreter/interpreter.hpp"
28
#include "memory/resourceArea.hpp"
29
#include "oops/markOop.hpp"
30
#include "oops/method.hpp"
31
#include "oops/oop.inline.hpp"
32
#include "runtime/frame.inline.hpp"
33
#include "runtime/handles.inline.hpp"
34
#include "runtime/javaCalls.hpp"
35
#include "runtime/monitorChunk.hpp"
36
#include "runtime/signature.hpp"
37
#include "runtime/stubCodeGenerator.hpp"
38
#include "runtime/stubRoutines.hpp"
39
#include "vmreg_ppc.inline.hpp"
40
#ifdef COMPILER1
41
#include "c1/c1_Runtime1.hpp"
42
#include "runtime/vframeArray.hpp"
43
#endif
44
45
#ifdef ASSERT
46
void RegisterMap::check_location_valid() {
47
}
48
#endif // ASSERT
49
50
bool frame::safe_for_sender(JavaThread *thread) {
51
bool safe = false;
52
address sp = (address)_sp;
53
address fp = (address)_fp;
54
address unextended_sp = (address)_unextended_sp;
55
56
// Consider stack guards when trying to determine "safe" stack pointers
57
static size_t stack_guard_size = os::uses_stack_guard_pages() ?
58
thread->stack_red_zone_size() + thread->stack_yellow_zone_size() : 0;
59
size_t usable_stack_size = thread->stack_size() - stack_guard_size;
60
61
// sp must be within the usable part of the stack (not in guards)
62
bool sp_safe = (sp < thread->stack_base()) &&
63
(sp >= thread->stack_base() - usable_stack_size);
64
65
66
if (!sp_safe) {
67
return false;
68
}
69
70
// Unextended sp must be within the stack and above or equal sp
71
bool unextended_sp_safe = (unextended_sp < thread->stack_base()) && (unextended_sp >= sp);
72
73
if (!unextended_sp_safe) {
74
return false;
75
}
76
77
// An fp must be within the stack and above (but not equal) sp.
78
bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
79
// an interpreter fp must be within the stack and above (but not equal) sp
80
bool fp_interp_safe = (fp <= thread->stack_base()) && (fp > sp) &&
81
((fp - sp) >= (ijava_state_size + top_ijava_frame_abi_size));
82
83
// We know sp/unextended_sp are safe, only fp is questionable here
84
85
// If the current frame is known to the code cache then we can attempt to
86
// to construct the sender and do some validation of it. This goes a long way
87
// toward eliminating issues when we get in frame construction code
88
89
if (_cb != NULL ){
90
// Entry frame checks
91
if (is_entry_frame()) {
92
// An entry frame must have a valid fp.
93
return fp_safe && is_entry_frame_valid(thread);
94
}
95
96
// Now check if the frame is complete and the test is
97
// reliable. Unfortunately we can only check frame completeness for
98
// runtime stubs and nmethods. Other generic buffer blobs are more
99
// problematic so we just assume they are OK. Adapter blobs never have a
100
// complete frame and are never OK
101
if (!_cb->is_frame_complete_at(_pc)) {
102
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
103
return false;
104
}
105
}
106
107
// Could just be some random pointer within the codeBlob.
108
if (!_cb->code_contains(_pc)) {
109
return false;
110
}
111
112
if (is_interpreted_frame() && !fp_interp_safe) {
113
return false;
114
}
115
116
abi_minframe* sender_abi = (abi_minframe*) fp;
117
intptr_t* sender_sp = (intptr_t*) fp;
118
address sender_pc = (address) sender_abi->lr;;
119
120
// We must always be able to find a recognizable pc.
121
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
122
if (sender_blob == NULL) {
123
return false;
124
}
125
126
// Could be a zombie method
127
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
128
return false;
129
}
130
131
// It should be safe to construct the sender though it might not be valid.
132
133
frame sender(sender_sp, sender_pc);
134
135
// Do we have a valid fp?
136
address sender_fp = (address) sender.fp();
137
138
// sender_fp must be within the stack and above (but not
139
// equal) current frame's fp.
140
if (sender_fp > thread->stack_base() || sender_fp <= fp) {
141
return false;
142
}
143
144
// If the potential sender is the interpreter then we can do some more checking.
145
if (Interpreter::contains(sender_pc)) {
146
return sender.is_interpreted_frame_valid(thread);
147
}
148
149
// Could just be some random pointer within the codeBlob.
150
if (!sender.cb()->code_contains(sender_pc)) {
151
return false;
152
}
153
154
// We should never be able to see an adapter if the current frame is something from code cache.
155
if (sender_blob->is_adapter_blob()) {
156
return false;
157
}
158
159
if (sender.is_entry_frame()) {
160
return sender.is_entry_frame_valid(thread);
161
}
162
163
// Frame size is always greater than zero. If the sender frame size is zero or less,
164
// something is really weird and we better give up.
165
if (sender_blob->frame_size() <= 0) {
166
return false;
167
}
168
169
return true;
170
}
171
172
// Must be native-compiled frame. Since sender will try and use fp to find
173
// linkages it must be safe
174
175
if (!fp_safe) {
176
return false;
177
}
178
179
return true;
180
}
181
182
bool frame::is_interpreted_frame() const {
183
return Interpreter::contains(pc());
184
}
185
186
frame frame::sender_for_entry_frame(RegisterMap *map) const {
187
assert(map != NULL, "map must be set");
188
// Java frame called from C; skip all C frames and return top C
189
// frame of that chunk as the sender.
190
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
191
assert(!entry_frame_is_first(), "next Java fp must be non zero");
192
assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
193
map->clear();
194
assert(map->include_argument_oops(), "should be set by clear");
195
196
if (jfa->last_Java_pc() != NULL) {
197
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
198
return fr;
199
}
200
// Last_java_pc is not set, if we come here from compiled code. The
201
// constructor retrieves the PC from the stack.
202
frame fr(jfa->last_Java_sp());
203
return fr;
204
}
205
206
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
207
// Pass callers initial_caller_sp as unextended_sp.
208
return frame(sender_sp(), sender_pc(),
209
CC_INTERP_ONLY((intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp)
210
NOT_CC_INTERP((intptr_t*)get_ijava_state()->sender_sp)
211
);
212
}
213
214
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
215
assert(map != NULL, "map must be set");
216
217
// Frame owned by compiler.
218
address pc = *compiled_sender_pc_addr(_cb);
219
frame caller(compiled_sender_sp(_cb), pc);
220
221
// Now adjust the map.
222
223
// Get the rest.
224
if (map->update_map()) {
225
// Tell GC to use argument oopmaps for some runtime stubs that need it.
226
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
227
if (_cb->oop_maps() != NULL) {
228
OopMapSet::update_register_map(this, map);
229
}
230
}
231
232
return caller;
233
}
234
235
intptr_t* frame::compiled_sender_sp(CodeBlob* cb) const {
236
return sender_sp();
237
}
238
239
address* frame::compiled_sender_pc_addr(CodeBlob* cb) const {
240
return sender_pc_addr();
241
}
242
243
frame frame::sender(RegisterMap* map) const {
244
// Default is we do have to follow them. The sender_for_xxx will
245
// update it accordingly.
246
map->set_include_argument_oops(false);
247
248
if (is_entry_frame()) return sender_for_entry_frame(map);
249
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
250
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
251
252
if (_cb != NULL) {
253
return sender_for_compiled_frame(map);
254
}
255
// Must be native-compiled frame, i.e. the marshaling code for native
256
// methods that exists in the core system.
257
return frame(sender_sp(), sender_pc());
258
}
259
260
void frame::patch_pc(Thread* thread, address pc) {
261
if (TracePcPatching) {
262
tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]",
263
p2i(&((address*) _sp)[-1]), p2i(((address*) _sp)[-1]), p2i(pc));
264
}
265
own_abi()->lr = (uint64_t)pc;
266
_cb = CodeCache::find_blob(pc);
267
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
268
address orig = (((nmethod*)_cb)->get_original_pc(this));
269
assert(orig == _pc, "expected original to be stored before patching");
270
_deopt_state = is_deoptimized;
271
// Leave _pc as is.
272
} else {
273
_deopt_state = not_deoptimized;
274
_pc = pc;
275
}
276
}
277
278
void frame::pd_gc_epilog() {
279
if (is_interpreted_frame()) {
280
// Set constant pool cache entry for interpreter.
281
Method* m = interpreter_frame_method();
282
283
*interpreter_frame_cpoolcache_addr() = m->constants()->cache();
284
}
285
}
286
287
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
288
// Is there anything to do?
289
assert(is_interpreted_frame(), "Not an interpreted frame");
290
return true;
291
}
292
293
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
294
assert(is_interpreted_frame(), "interpreted frame expected");
295
Method* method = interpreter_frame_method();
296
BasicType type = method->result_type();
297
298
if (method->is_native()) {
299
// Prior to calling into the runtime to notify the method exit the possible
300
// result value is saved into the interpreter frame.
301
#ifdef CC_INTERP
302
interpreterState istate = get_interpreterState();
303
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
304
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
305
#else
306
address lresult = (address)&(get_ijava_state()->lresult);
307
address fresult = (address)&(get_ijava_state()->fresult);
308
#endif
309
310
switch (method->result_type()) {
311
case T_OBJECT:
312
case T_ARRAY: {
313
*oop_result = JNIHandles::resolve(*(jobject*)lresult);
314
break;
315
}
316
// We use std/stfd to store the values.
317
case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break;
318
case T_INT : value_result->i = (jint) *(long*)lresult; break;
319
case T_CHAR : value_result->c = (jchar) *(unsigned long*)lresult; break;
320
case T_SHORT : value_result->s = (jshort) *(long*)lresult; break;
321
case T_BYTE : value_result->z = (jbyte) *(long*)lresult; break;
322
case T_LONG : value_result->j = (jlong) *(long*)lresult; break;
323
case T_FLOAT : value_result->f = (jfloat) *(double*)fresult; break;
324
case T_DOUBLE : value_result->d = (jdouble) *(double*)fresult; break;
325
case T_VOID : /* Nothing to do */ break;
326
default : ShouldNotReachHere();
327
}
328
} else {
329
intptr_t* tos_addr = interpreter_frame_tos_address();
330
switch (method->result_type()) {
331
case T_OBJECT:
332
case T_ARRAY: {
333
oop obj = *(oop*)tos_addr;
334
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
335
*oop_result = obj;
336
}
337
case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break;
338
case T_BYTE : value_result->b = (jbyte) *(jint*)tos_addr; break;
339
case T_CHAR : value_result->c = (jchar) *(jint*)tos_addr; break;
340
case T_SHORT : value_result->s = (jshort) *(jint*)tos_addr; break;
341
case T_INT : value_result->i = *(jint*)tos_addr; break;
342
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
343
case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
344
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
345
case T_VOID : /* Nothing to do */ break;
346
default : ShouldNotReachHere();
347
}
348
}
349
return type;
350
}
351
352
#ifndef PRODUCT
353
354
void frame::describe_pd(FrameValues& values, int frame_no) {
355
if (is_interpreted_frame()) {
356
#ifdef CC_INTERP
357
interpreterState istate = get_interpreterState();
358
values.describe(frame_no, (intptr_t*)istate, "istate");
359
values.describe(frame_no, (intptr_t*)&(istate->_thread), " thread");
360
values.describe(frame_no, (intptr_t*)&(istate->_bcp), " bcp");
361
values.describe(frame_no, (intptr_t*)&(istate->_locals), " locals");
362
values.describe(frame_no, (intptr_t*)&(istate->_constants), " constants");
363
values.describe(frame_no, (intptr_t*)&(istate->_method), err_msg(" method = %s", istate->_method->name_and_sig_as_C_string()));
364
values.describe(frame_no, (intptr_t*)&(istate->_mdx), " mdx");
365
values.describe(frame_no, (intptr_t*)&(istate->_stack), " stack");
366
values.describe(frame_no, (intptr_t*)&(istate->_msg), err_msg(" msg = %s", BytecodeInterpreter::C_msg(istate->_msg)));
367
values.describe(frame_no, (intptr_t*)&(istate->_result), " result");
368
values.describe(frame_no, (intptr_t*)&(istate->_prev_link), " prev_link");
369
values.describe(frame_no, (intptr_t*)&(istate->_oop_temp), " oop_temp");
370
values.describe(frame_no, (intptr_t*)&(istate->_stack_base), " stack_base");
371
values.describe(frame_no, (intptr_t*)&(istate->_stack_limit), " stack_limit");
372
values.describe(frame_no, (intptr_t*)&(istate->_monitor_base), " monitor_base");
373
values.describe(frame_no, (intptr_t*)&(istate->_frame_bottom), " frame_bottom");
374
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_pc), " last_Java_pc");
375
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_fp), " last_Java_fp");
376
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_sp), " last_Java_sp");
377
values.describe(frame_no, (intptr_t*)&(istate->_self_link), " self_link");
378
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
379
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
380
#else
381
#define DESCRIBE_ADDRESS(name) \
382
values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
383
384
DESCRIBE_ADDRESS(method);
385
DESCRIBE_ADDRESS(locals);
386
DESCRIBE_ADDRESS(monitors);
387
DESCRIBE_ADDRESS(cpoolCache);
388
DESCRIBE_ADDRESS(bcp);
389
DESCRIBE_ADDRESS(esp);
390
DESCRIBE_ADDRESS(mdx);
391
DESCRIBE_ADDRESS(top_frame_sp);
392
DESCRIBE_ADDRESS(sender_sp);
393
DESCRIBE_ADDRESS(oop_tmp);
394
DESCRIBE_ADDRESS(lresult);
395
DESCRIBE_ADDRESS(fresult);
396
#endif
397
}
398
}
399
#endif
400
401
void frame::adjust_unextended_sp() {
402
// If we are returning to a compiled MethodHandle call site, the
403
// saved_fp will in fact be a saved value of the unextended SP. The
404
// simplest way to tell whether we are returning to such a call site
405
// is as follows:
406
407
if (is_compiled_frame() && false /*is_at_mh_callsite()*/) { // TODO PPC port
408
// If the sender PC is a deoptimization point, get the original
409
// PC. For MethodHandle call site the unextended_sp is stored in
410
// saved_fp.
411
_unextended_sp = _fp - _cb->frame_size();
412
413
#ifdef ASSERT
414
nmethod *sender_nm = _cb->as_nmethod_or_null();
415
assert(sender_nm && *_sp == *_unextended_sp, "backlink changed");
416
417
intptr_t* sp = _unextended_sp; // check if stack can be walked from here
418
for (int x = 0; x < 5; ++x) { // check up to a couple of backlinks
419
intptr_t* prev_sp = *(intptr_t**)sp;
420
if (prev_sp == 0) break; // end of stack
421
assert(prev_sp>sp, "broken stack");
422
sp = prev_sp;
423
}
424
425
if (sender_nm->is_deopt_mh_entry(_pc)) { // checks for deoptimization
426
address original_pc = sender_nm->get_original_pc(this);
427
assert(sender_nm->insts_contains(original_pc), "original PC must be in nmethod");
428
assert(sender_nm->is_method_handle_return(original_pc), "must be");
429
}
430
#endif
431
}
432
}
433
434
intptr_t *frame::initial_deoptimization_info() {
435
// unused... but returns fp() to minimize changes introduced by 7087445
436
return fp();
437
}
438
439
#ifndef PRODUCT
440
// This is a generic constructor which is only used by pns() in debug.cpp.
441
frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
442
find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
443
}
444
#endif
445
446