Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/ppc/vm/frame_ppc.cpp
32285 views
1
/*
2
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2012, 2017 SAP AG. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "interpreter/interpreter.hpp"
28
#include "memory/resourceArea.hpp"
29
#include "oops/markOop.hpp"
30
#include "oops/method.hpp"
31
#include "oops/oop.inline.hpp"
32
#include "runtime/frame.inline.hpp"
33
#include "runtime/handles.inline.hpp"
34
#include "runtime/javaCalls.hpp"
35
#include "runtime/monitorChunk.hpp"
36
#include "runtime/signature.hpp"
37
#include "runtime/stubCodeGenerator.hpp"
38
#include "runtime/stubRoutines.hpp"
39
#include "vmreg_ppc.inline.hpp"
40
#ifdef COMPILER1
41
#include "c1/c1_Runtime1.hpp"
42
#include "runtime/vframeArray.hpp"
43
#endif
44
45
#ifdef ASSERT
46
void RegisterMap::check_location_valid() {
47
}
48
#endif // ASSERT
49
50
bool frame::safe_for_sender(JavaThread *thread) {
51
bool safe = false;
52
address sp = (address)_sp;
53
address fp = (address)_fp;
54
address unextended_sp = (address)_unextended_sp;
55
56
// Consider stack guards when trying to determine "safe" stack pointers
57
static size_t stack_guard_size = os::uses_stack_guard_pages() ?
58
thread->stack_red_zone_size() + thread->stack_yellow_zone_size() : 0;
59
size_t usable_stack_size = thread->stack_size() - stack_guard_size;
60
61
// sp must be within the usable part of the stack (not in guards)
62
bool sp_safe = (sp < thread->stack_base()) &&
63
(sp >= thread->stack_base() - usable_stack_size);
64
65
66
if (!sp_safe) {
67
return false;
68
}
69
70
// Unextended sp must be within the stack
71
bool unextended_sp_safe = (unextended_sp < thread->stack_base());
72
73
if (!unextended_sp_safe) {
74
return false;
75
}
76
77
// An fp must be within the stack and above (but not equal) sp.
78
bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
79
// An interpreter fp must be within the stack and above (but not equal) sp.
80
// Moreover, it must be at least the size of the ijava_state structure.
81
bool fp_interp_safe = (fp <= thread->stack_base()) && (fp > sp) &&
82
((fp - sp) >= ijava_state_size);
83
84
// We know sp/unextended_sp are safe, only fp is questionable here
85
86
// If the current frame is known to the code cache then we can attempt to
87
// to construct the sender and do some validation of it. This goes a long way
88
// toward eliminating issues when we get in frame construction code
89
90
if (_cb != NULL ){
91
// Entry frame checks
92
if (is_entry_frame()) {
93
// An entry frame must have a valid fp.
94
return fp_safe && is_entry_frame_valid(thread);
95
}
96
97
// Now check if the frame is complete and the test is
98
// reliable. Unfortunately we can only check frame completeness for
99
// runtime stubs and nmethods. Other generic buffer blobs are more
100
// problematic so we just assume they are OK. Adapter blobs never have a
101
// complete frame and are never OK
102
if (!_cb->is_frame_complete_at(_pc)) {
103
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
104
return false;
105
}
106
}
107
108
// Could just be some random pointer within the codeBlob.
109
if (!_cb->code_contains(_pc)) {
110
return false;
111
}
112
113
if (is_interpreted_frame() && !fp_interp_safe) {
114
return false;
115
}
116
117
abi_minframe* sender_abi = (abi_minframe*) fp;
118
intptr_t* sender_sp = (intptr_t*) fp;
119
address sender_pc = (address) sender_abi->lr;;
120
121
// We must always be able to find a recognizable pc.
122
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
123
if (sender_blob == NULL) {
124
return false;
125
}
126
127
// Could be a zombie method
128
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
129
return false;
130
}
131
132
// It should be safe to construct the sender though it might not be valid.
133
134
frame sender(sender_sp, sender_pc);
135
136
// Do we have a valid fp?
137
address sender_fp = (address) sender.fp();
138
139
// sender_fp must be within the stack and above (but not
140
// equal) current frame's fp.
141
if (sender_fp > thread->stack_base() || sender_fp <= fp) {
142
return false;
143
}
144
145
// If the potential sender is the interpreter then we can do some more checking.
146
if (Interpreter::contains(sender_pc)) {
147
return sender.is_interpreted_frame_valid(thread);
148
}
149
150
// Could just be some random pointer within the codeBlob.
151
if (!sender.cb()->code_contains(sender_pc)) {
152
return false;
153
}
154
155
// We should never be able to see an adapter if the current frame is something from code cache.
156
if (sender_blob->is_adapter_blob()) {
157
return false;
158
}
159
160
if (sender.is_entry_frame()) {
161
return sender.is_entry_frame_valid(thread);
162
}
163
164
// Frame size is always greater than zero. If the sender frame size is zero or less,
165
// something is really weird and we better give up.
166
if (sender_blob->frame_size() <= 0) {
167
return false;
168
}
169
170
return true;
171
}
172
173
// Must be native-compiled frame. Since sender will try and use fp to find
174
// linkages it must be safe
175
176
if (!fp_safe) {
177
return false;
178
}
179
180
return true;
181
}
182
183
bool frame::is_interpreted_frame() const {
184
return Interpreter::contains(pc());
185
}
186
187
frame frame::sender_for_entry_frame(RegisterMap *map) const {
188
assert(map != NULL, "map must be set");
189
// Java frame called from C; skip all C frames and return top C
190
// frame of that chunk as the sender.
191
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
192
assert(!entry_frame_is_first(), "next Java fp must be non zero");
193
assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
194
map->clear();
195
assert(map->include_argument_oops(), "should be set by clear");
196
197
if (jfa->last_Java_pc() != NULL) {
198
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
199
return fr;
200
}
201
// Last_java_pc is not set, if we come here from compiled code. The
202
// constructor retrieves the PC from the stack.
203
frame fr(jfa->last_Java_sp());
204
return fr;
205
}
206
207
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
208
// Pass callers initial_caller_sp as unextended_sp.
209
return frame(sender_sp(), sender_pc(),
210
CC_INTERP_ONLY((intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp)
211
NOT_CC_INTERP((intptr_t*)get_ijava_state()->sender_sp)
212
);
213
}
214
215
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
216
assert(map != NULL, "map must be set");
217
218
// Frame owned by compiler.
219
address pc = *compiled_sender_pc_addr(_cb);
220
frame caller(compiled_sender_sp(_cb), pc);
221
222
// Now adjust the map.
223
224
// Get the rest.
225
if (map->update_map()) {
226
// Tell GC to use argument oopmaps for some runtime stubs that need it.
227
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
228
if (_cb->oop_maps() != NULL) {
229
OopMapSet::update_register_map(this, map);
230
}
231
}
232
233
return caller;
234
}
235
236
intptr_t* frame::compiled_sender_sp(CodeBlob* cb) const {
237
return sender_sp();
238
}
239
240
address* frame::compiled_sender_pc_addr(CodeBlob* cb) const {
241
return sender_pc_addr();
242
}
243
244
frame frame::sender(RegisterMap* map) const {
245
// Default is we do have to follow them. The sender_for_xxx will
246
// update it accordingly.
247
map->set_include_argument_oops(false);
248
249
if (is_entry_frame()) return sender_for_entry_frame(map);
250
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
251
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
252
253
if (_cb != NULL) {
254
return sender_for_compiled_frame(map);
255
}
256
// Must be native-compiled frame, i.e. the marshaling code for native
257
// methods that exists in the core system.
258
return frame(sender_sp(), sender_pc());
259
}
260
261
void frame::patch_pc(Thread* thread, address pc) {
262
if (TracePcPatching) {
263
tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]",
264
p2i(&((address*) _sp)[-1]), p2i(((address*) _sp)[-1]), p2i(pc));
265
}
266
own_abi()->lr = (uint64_t)pc;
267
_cb = CodeCache::find_blob(pc);
268
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
269
address orig = (((nmethod*)_cb)->get_original_pc(this));
270
assert(orig == _pc, "expected original to be stored before patching");
271
_deopt_state = is_deoptimized;
272
// Leave _pc as is.
273
} else {
274
_deopt_state = not_deoptimized;
275
_pc = pc;
276
}
277
}
278
279
void frame::pd_gc_epilog() {
280
if (is_interpreted_frame()) {
281
// Set constant pool cache entry for interpreter.
282
Method* m = interpreter_frame_method();
283
284
*interpreter_frame_cpoolcache_addr() = m->constants()->cache();
285
}
286
}
287
288
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
289
// Is there anything to do?
290
assert(is_interpreted_frame(), "Not an interpreted frame");
291
return true;
292
}
293
294
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
295
assert(is_interpreted_frame(), "interpreted frame expected");
296
Method* method = interpreter_frame_method();
297
BasicType type = method->result_type();
298
299
if (method->is_native()) {
300
// Prior to calling into the runtime to notify the method exit the possible
301
// result value is saved into the interpreter frame.
302
#ifdef CC_INTERP
303
interpreterState istate = get_interpreterState();
304
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
305
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
306
#else
307
address lresult = (address)&(get_ijava_state()->lresult);
308
address fresult = (address)&(get_ijava_state()->fresult);
309
#endif
310
311
switch (method->result_type()) {
312
case T_OBJECT:
313
case T_ARRAY: {
314
*oop_result = JNIHandles::resolve(*(jobject*)lresult);
315
break;
316
}
317
// We use std/stfd to store the values.
318
case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break;
319
case T_INT : value_result->i = (jint) *(long*)lresult; break;
320
case T_CHAR : value_result->c = (jchar) *(unsigned long*)lresult; break;
321
case T_SHORT : value_result->s = (jshort) *(long*)lresult; break;
322
case T_BYTE : value_result->z = (jbyte) *(long*)lresult; break;
323
case T_LONG : value_result->j = (jlong) *(long*)lresult; break;
324
case T_FLOAT : value_result->f = (jfloat) *(double*)fresult; break;
325
case T_DOUBLE : value_result->d = (jdouble) *(double*)fresult; break;
326
case T_VOID : /* Nothing to do */ break;
327
default : ShouldNotReachHere();
328
}
329
} else {
330
intptr_t* tos_addr = interpreter_frame_tos_address();
331
switch (method->result_type()) {
332
case T_OBJECT:
333
case T_ARRAY: {
334
oop obj = *(oop*)tos_addr;
335
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
336
*oop_result = obj;
337
}
338
case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break;
339
case T_BYTE : value_result->b = (jbyte) *(jint*)tos_addr; break;
340
case T_CHAR : value_result->c = (jchar) *(jint*)tos_addr; break;
341
case T_SHORT : value_result->s = (jshort) *(jint*)tos_addr; break;
342
case T_INT : value_result->i = *(jint*)tos_addr; break;
343
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
344
case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
345
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
346
case T_VOID : /* Nothing to do */ break;
347
default : ShouldNotReachHere();
348
}
349
}
350
return type;
351
}
352
353
#ifndef PRODUCT
354
355
void frame::describe_pd(FrameValues& values, int frame_no) {
356
if (is_interpreted_frame()) {
357
#ifdef CC_INTERP
358
interpreterState istate = get_interpreterState();
359
values.describe(frame_no, (intptr_t*)istate, "istate");
360
values.describe(frame_no, (intptr_t*)&(istate->_thread), " thread");
361
values.describe(frame_no, (intptr_t*)&(istate->_bcp), " bcp");
362
values.describe(frame_no, (intptr_t*)&(istate->_locals), " locals");
363
values.describe(frame_no, (intptr_t*)&(istate->_constants), " constants");
364
values.describe(frame_no, (intptr_t*)&(istate->_method), err_msg(" method = %s", istate->_method->name_and_sig_as_C_string()));
365
values.describe(frame_no, (intptr_t*)&(istate->_mdx), " mdx");
366
values.describe(frame_no, (intptr_t*)&(istate->_stack), " stack");
367
values.describe(frame_no, (intptr_t*)&(istate->_msg), err_msg(" msg = %s", BytecodeInterpreter::C_msg(istate->_msg)));
368
values.describe(frame_no, (intptr_t*)&(istate->_result), " result");
369
values.describe(frame_no, (intptr_t*)&(istate->_prev_link), " prev_link");
370
values.describe(frame_no, (intptr_t*)&(istate->_oop_temp), " oop_temp");
371
values.describe(frame_no, (intptr_t*)&(istate->_stack_base), " stack_base");
372
values.describe(frame_no, (intptr_t*)&(istate->_stack_limit), " stack_limit");
373
values.describe(frame_no, (intptr_t*)&(istate->_monitor_base), " monitor_base");
374
values.describe(frame_no, (intptr_t*)&(istate->_frame_bottom), " frame_bottom");
375
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_pc), " last_Java_pc");
376
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_fp), " last_Java_fp");
377
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_sp), " last_Java_sp");
378
values.describe(frame_no, (intptr_t*)&(istate->_self_link), " self_link");
379
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
380
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
381
#else
382
#define DESCRIBE_ADDRESS(name) \
383
values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
384
385
DESCRIBE_ADDRESS(method);
386
DESCRIBE_ADDRESS(locals);
387
DESCRIBE_ADDRESS(monitors);
388
DESCRIBE_ADDRESS(cpoolCache);
389
DESCRIBE_ADDRESS(bcp);
390
DESCRIBE_ADDRESS(esp);
391
DESCRIBE_ADDRESS(mdx);
392
DESCRIBE_ADDRESS(top_frame_sp);
393
DESCRIBE_ADDRESS(sender_sp);
394
DESCRIBE_ADDRESS(oop_tmp);
395
DESCRIBE_ADDRESS(lresult);
396
DESCRIBE_ADDRESS(fresult);
397
#endif
398
}
399
}
400
#endif
401
402
void frame::adjust_unextended_sp() {
403
// If we are returning to a compiled MethodHandle call site, the
404
// saved_fp will in fact be a saved value of the unextended SP. The
405
// simplest way to tell whether we are returning to such a call site
406
// is as follows:
407
408
if (is_compiled_frame() && false /*is_at_mh_callsite()*/) { // TODO PPC port
409
// If the sender PC is a deoptimization point, get the original
410
// PC. For MethodHandle call site the unextended_sp is stored in
411
// saved_fp.
412
_unextended_sp = _fp - _cb->frame_size();
413
414
#ifdef ASSERT
415
nmethod *sender_nm = _cb->as_nmethod_or_null();
416
assert(sender_nm && *_sp == *_unextended_sp, "backlink changed");
417
418
intptr_t* sp = _unextended_sp; // check if stack can be walked from here
419
for (int x = 0; x < 5; ++x) { // check up to a couple of backlinks
420
intptr_t* prev_sp = *(intptr_t**)sp;
421
if (prev_sp == 0) break; // end of stack
422
assert(prev_sp>sp, "broken stack");
423
sp = prev_sp;
424
}
425
426
if (sender_nm->is_deopt_mh_entry(_pc)) { // checks for deoptimization
427
address original_pc = sender_nm->get_original_pc(this);
428
assert(sender_nm->insts_contains(original_pc), "original PC must be in nmethod");
429
assert(sender_nm->is_method_handle_return(original_pc), "must be");
430
}
431
#endif
432
}
433
}
434
435
intptr_t *frame::initial_deoptimization_info() {
436
// unused... but returns fp() to minimize changes introduced by 7087445
437
return fp();
438
}
439
440
#ifndef PRODUCT
441
// This is a generic constructor which is only used by pns() in debug.cpp.
442
frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
443
find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
444
}
445
#endif
446
447