Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
40930 views
1
/*
2
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_Defs.hpp"
29
#include "c1/c1_MacroAssembler.hpp"
30
#include "c1/c1_Runtime1.hpp"
31
#include "ci/ciUtilities.hpp"
32
#include "compiler/oopMap.hpp"
33
#include "gc/shared/cardTable.hpp"
34
#include "gc/shared/cardTableBarrierSet.hpp"
35
#include "interpreter/interpreter.hpp"
36
#include "memory/universe.hpp"
37
#include "nativeInst_s390.hpp"
38
#include "oops/compiledICHolder.hpp"
39
#include "oops/oop.inline.hpp"
40
#include "prims/jvmtiExport.hpp"
41
#include "register_s390.hpp"
42
#include "registerSaver_s390.hpp"
43
#include "runtime/sharedRuntime.hpp"
44
#include "runtime/signature.hpp"
45
#include "runtime/stubRoutines.hpp"
46
#include "runtime/vframeArray.hpp"
47
#include "utilities/macros.hpp"
48
#include "utilities/powerOfTwo.hpp"
49
#include "vmreg_s390.inline.hpp"
50
51
// Implementation of StubAssembler
52
53
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
54
set_num_rt_args(0); // Nothing on stack.
55
assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
56
57
// We cannot trust that code generated by the C++ compiler saves R14
58
// to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
59
// z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
60
// Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
61
// it into the frame anchor.
62
address pc = get_PC(Z_R1_scratch);
63
int call_offset = (int)(pc - addr_at(0));
64
set_last_Java_frame(Z_SP, Z_R1_scratch);
65
66
// ARG1 must hold thread address.
67
z_lgr(Z_ARG1, Z_thread);
68
69
address return_pc = NULL;
70
align_call_far_patchable(this->pc());
71
return_pc = call_c_opt(entry_point);
72
assert(return_pc != NULL, "const section overflow");
73
74
reset_last_Java_frame();
75
76
// Check for pending exceptions.
77
{
78
load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
79
80
// This used to conditionally jump to forward_exception however it is
81
// possible if we relocate that the branch will not reach. So we must jump
82
// around so we can always reach.
83
84
Label ok;
85
z_bre(ok); // Bcondequal is the same as bcondZero.
86
87
// exception pending => forward to exception handler
88
89
// Make sure that the vm_results are cleared.
90
if (oop_result1->is_valid()) {
91
clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong));
92
}
93
if (metadata_result->is_valid()) {
94
clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong));
95
}
96
if (frame_size() == no_frame_size) {
97
// Pop the stub frame.
98
pop_frame();
99
restore_return_pc();
100
load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
101
z_br(Z_R1);
102
} else if (_stub_id == Runtime1::forward_exception_id) {
103
should_not_reach_here();
104
} else {
105
load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id));
106
z_br(Z_R1);
107
}
108
109
bind(ok);
110
}
111
112
// Get oop results if there are any and reset the values in the thread.
113
if (oop_result1->is_valid()) {
114
get_vm_result(oop_result1);
115
}
116
if (metadata_result->is_valid()) {
117
get_vm_result_2(metadata_result);
118
}
119
120
return call_offset;
121
}
122
123
124
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
125
// Z_ARG1 is reserved for the thread.
126
lgr_if_needed(Z_ARG2, arg1);
127
return call_RT(oop_result1, metadata_result, entry, 1);
128
}
129
130
131
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
132
// Z_ARG1 is reserved for the thread.
133
lgr_if_needed(Z_ARG2, arg1);
134
assert(arg2 != Z_ARG2, "smashed argument");
135
lgr_if_needed(Z_ARG3, arg2);
136
return call_RT(oop_result1, metadata_result, entry, 2);
137
}
138
139
140
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
141
// Z_ARG1 is reserved for the thread.
142
lgr_if_needed(Z_ARG2, arg1);
143
assert(arg2 != Z_ARG2, "smashed argument");
144
lgr_if_needed(Z_ARG3, arg2);
145
assert(arg3 != Z_ARG3, "smashed argument");
146
lgr_if_needed(Z_ARG4, arg3);
147
return call_RT(oop_result1, metadata_result, entry, 3);
148
}
149
150
151
// Implementation of Runtime1
152
153
#define __ sasm->
154
155
#ifndef PRODUCT
156
#undef __
157
#define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)->
158
#endif // !PRODUCT
159
160
#define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
161
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
162
163
static OopMap* generate_oop_map(StubAssembler* sasm) {
164
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers;
165
int frame_size_in_slots =
166
RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
167
sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
168
return RegisterSaver::generate_oop_map(sasm, reg_set);
169
}
170
171
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) {
172
__ block_comment("save_live_registers");
173
RegisterSaver::RegisterSet reg_set =
174
save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
175
int frame_size_in_slots =
176
RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
177
sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
178
return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
179
}
180
181
static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
182
if (!save_fpu_registers) {
183
__ unimplemented(FILE_AND_LINE);
184
}
185
__ block_comment("save_live_registers");
186
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
187
int frame_size_in_slots =
188
RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
189
sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
190
return RegisterSaver::save_live_registers(sasm, reg_set);
191
}
192
193
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
194
__ block_comment("restore_live_registers");
195
RegisterSaver::RegisterSet reg_set =
196
restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
197
RegisterSaver::restore_live_registers(sasm, reg_set);
198
}
199
200
static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
201
if (!restore_fpu_registers) {
202
__ unimplemented(FILE_AND_LINE);
203
}
204
__ block_comment("restore_live_registers_except_r2");
205
RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
206
}
207
208
void Runtime1::initialize_pd() {
209
// Nothing to do.
210
}
211
212
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
213
// Make a frame and preserve the caller's caller-save registers.
214
OopMap* oop_map = save_live_registers(sasm);
215
int call_offset;
216
if (!has_argument) {
217
call_offset = __ call_RT(noreg, noreg, target);
218
} else {
219
call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
220
}
221
OopMapSet* oop_maps = new OopMapSet();
222
oop_maps->add_gc_map(call_offset, oop_map);
223
224
__ should_not_reach_here();
225
return oop_maps;
226
}
227
228
void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
229
// Incoming parameters: Z_EXC_OOP and Z_EXC_PC.
230
// Keep copies in callee-saved registers during runtime call.
231
const Register exception_oop_callee_saved = Z_R11;
232
const Register exception_pc_callee_saved = Z_R12;
233
// Other registers used in this stub.
234
const Register handler_addr = Z_R4;
235
236
// Verify that only exception_oop, is valid at this time.
237
__ invalidate_registers(Z_EXC_OOP, Z_EXC_PC);
238
239
// Check that fields in JavaThread for exception oop and issuing pc are set.
240
__ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
241
__ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
242
243
// Save exception_oop and pc in callee-saved register to preserve it
244
// during runtime calls.
245
__ verify_not_null_oop(Z_EXC_OOP);
246
__ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP);
247
__ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC);
248
249
__ push_frame_abi160(0); // Runtime code needs the z_abi_160.
250
251
// Search the exception handler address of the caller (using the return address).
252
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC);
253
// Z_RET(Z_R2): exception handler address of the caller.
254
255
__ pop_frame();
256
257
__ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET);
258
259
// Move result of call into correct register.
260
__ lgr_if_needed(handler_addr, Z_RET);
261
262
// Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler).
263
__ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved);
264
__ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved);
265
266
// Verify that there is really a valid exception in Z_EXC_OOP.
267
__ verify_not_null_oop(Z_EXC_OOP);
268
269
__ z_br(handler_addr); // Jump to exception handler.
270
}
271
272
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
273
// Make a frame and preserve the caller's caller-save registers.
274
OopMap* oop_map = save_live_registers(sasm);
275
276
// Call the runtime patching routine, returns non-zero if nmethod got deopted.
277
int call_offset = __ call_RT(noreg, noreg, target);
278
OopMapSet* oop_maps = new OopMapSet();
279
oop_maps->add_gc_map(call_offset, oop_map);
280
281
// Re-execute the patched instruction or, if the nmethod was
282
// deoptmized, return to the deoptimization handler entry that will
283
// cause re-execution of the current bytecode.
284
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
285
assert(deopt_blob != NULL, "deoptimization blob must have been created");
286
287
__ z_ltr(Z_RET, Z_RET); // return value == 0
288
289
restore_live_registers(sasm);
290
291
__ z_bcr(Assembler::bcondZero, Z_R14);
292
293
// Return to the deoptimization handler entry for unpacking and
294
// rexecute if we simply returned then we'd deopt as if any call we
295
// patched had just returned.
296
AddressLiteral dest(deopt_blob->unpack_with_reexecution());
297
__ load_const_optimized(Z_R1_scratch, dest);
298
__ z_br(Z_R1_scratch);
299
300
return oop_maps;
301
}
302
303
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
304
305
// for better readability
306
const bool must_gc_arguments = true;
307
const bool dont_gc_arguments = false;
308
309
// Default value; overwritten for some optimized stubs that are
310
// called from methods that do not use the fpu.
311
bool save_fpu_registers = true;
312
313
// Stub code and info for the different stubs.
314
OopMapSet* oop_maps = NULL;
315
switch (id) {
316
case forward_exception_id:
317
{
318
oop_maps = generate_handle_exception(id, sasm);
319
// will not return
320
}
321
break;
322
323
case new_instance_id:
324
case fast_new_instance_id:
325
case fast_new_instance_init_check_id:
326
{
327
Register klass = Z_R11; // Incoming
328
Register obj = Z_R2; // Result
329
330
if (id == new_instance_id) {
331
__ set_info("new_instance", dont_gc_arguments);
332
} else if (id == fast_new_instance_id) {
333
__ set_info("fast new_instance", dont_gc_arguments);
334
} else {
335
assert(id == fast_new_instance_init_check_id, "bad StubID");
336
__ set_info("fast new_instance init check", dont_gc_arguments);
337
}
338
339
OopMap* map = save_live_registers_except_r2(sasm);
340
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
341
oop_maps = new OopMapSet();
342
oop_maps->add_gc_map(call_offset, map);
343
restore_live_registers_except_r2(sasm);
344
345
__ verify_oop(obj, FILE_AND_LINE);
346
__ z_br(Z_R14);
347
}
348
break;
349
350
case counter_overflow_id:
351
{
352
// Arguments :
353
// bci : stack param 0
354
// method : stack param 1
355
//
356
Register bci = Z_ARG2, method = Z_ARG3;
357
// frame size in bytes
358
OopMap* map = save_live_registers(sasm);
359
const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
360
__ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
361
__ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
362
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
363
oop_maps = new OopMapSet();
364
oop_maps->add_gc_map(call_offset, map);
365
restore_live_registers(sasm);
366
__ z_br(Z_R14);
367
}
368
break;
369
case new_type_array_id:
370
case new_object_array_id:
371
{
372
Register length = Z_R13; // Incoming
373
Register klass = Z_R11; // Incoming
374
Register obj = Z_R2; // Result
375
376
if (id == new_type_array_id) {
377
__ set_info("new_type_array", dont_gc_arguments);
378
} else {
379
__ set_info("new_object_array", dont_gc_arguments);
380
}
381
382
#ifdef ASSERT
383
// Assert object type is really an array of the proper kind.
384
{
385
NearLabel ok;
386
Register t0 = obj;
387
__ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false);
388
__ z_sra(t0, Klass::_lh_array_tag_shift);
389
int tag = ((id == new_type_array_id)
390
? Klass::_lh_array_tag_type_value
391
: Klass::_lh_array_tag_obj_value);
392
__ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok);
393
__ stop("assert(is an array klass)");
394
__ should_not_reach_here();
395
__ bind(ok);
396
}
397
#endif // ASSERT
398
399
OopMap* map = save_live_registers_except_r2(sasm);
400
int call_offset;
401
if (id == new_type_array_id) {
402
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
403
} else {
404
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
405
}
406
407
oop_maps = new OopMapSet();
408
oop_maps->add_gc_map(call_offset, map);
409
restore_live_registers_except_r2(sasm);
410
411
__ verify_oop(obj, FILE_AND_LINE);
412
__ z_br(Z_R14);
413
}
414
break;
415
416
case new_multi_array_id:
417
{ __ set_info("new_multi_array", dont_gc_arguments);
418
// Z_R3,: klass
419
// Z_R4,: rank
420
// Z_R5: address of 1st dimension
421
OopMap* map = save_live_registers(sasm);
422
int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5);
423
424
oop_maps = new OopMapSet();
425
oop_maps->add_gc_map(call_offset, map);
426
restore_live_registers_except_r2(sasm);
427
428
// Z_R2,: new multi array
429
__ verify_oop(Z_R2, FILE_AND_LINE);
430
__ z_br(Z_R14);
431
}
432
break;
433
434
case register_finalizer_id:
435
{
436
__ set_info("register_finalizer", dont_gc_arguments);
437
438
// Load the klass and check the has finalizer flag.
439
Register klass = Z_ARG2;
440
__ load_klass(klass, Z_ARG1);
441
__ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER));
442
__ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set.
443
444
OopMap* oop_map = save_live_registers(sasm);
445
int call_offset = __ call_RT(noreg, noreg,
446
CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1);
447
oop_maps = new OopMapSet();
448
oop_maps->add_gc_map(call_offset, oop_map);
449
450
// Now restore all the live registers.
451
restore_live_registers(sasm);
452
453
__ z_br(Z_R14);
454
}
455
break;
456
457
case throw_range_check_failed_id:
458
{ __ set_info("range_check_failed", dont_gc_arguments);
459
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
460
}
461
break;
462
463
case throw_index_exception_id:
464
{ __ set_info("index_range_check_failed", dont_gc_arguments);
465
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
466
}
467
break;
468
case throw_div0_exception_id:
469
{ __ set_info("throw_div0_exception", dont_gc_arguments);
470
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
471
}
472
break;
473
case throw_null_pointer_exception_id:
474
{ __ set_info("throw_null_pointer_exception", dont_gc_arguments);
475
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
476
}
477
break;
478
case handle_exception_nofpu_id:
479
case handle_exception_id:
480
{ __ set_info("handle_exception", dont_gc_arguments);
481
oop_maps = generate_handle_exception(id, sasm);
482
}
483
break;
484
case handle_exception_from_callee_id:
485
{ __ set_info("handle_exception_from_callee", dont_gc_arguments);
486
oop_maps = generate_handle_exception(id, sasm);
487
}
488
break;
489
case unwind_exception_id:
490
{ __ set_info("unwind_exception", dont_gc_arguments);
491
// Note: no stubframe since we are about to leave the current
492
// activation and we are calling a leaf VM function only.
493
generate_unwind_exception(sasm);
494
}
495
break;
496
case throw_array_store_exception_id:
497
{ __ set_info("throw_array_store_exception", dont_gc_arguments);
498
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
499
}
500
break;
501
case throw_class_cast_exception_id:
502
{ // Z_R1_scratch: object
503
__ set_info("throw_class_cast_exception", dont_gc_arguments);
504
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
505
}
506
break;
507
case throw_incompatible_class_change_error_id:
508
{ __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
509
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
510
}
511
break;
512
case slow_subtype_check_id:
513
{
514
// Arguments :
515
// sub : stack param 0
516
// super: stack param 1
517
// raddr: Z_R14, blown by call
518
//
519
// Result : condition code 0 for match (bcondEqual will be true),
520
// condition code 2 for miss (bcondNotEqual will be true)
521
NearLabel miss;
522
const Register Rsubklass = Z_ARG2; // sub
523
const Register Rsuperklass = Z_ARG3; // super
524
525
// No args, but tmp registers that are killed.
526
const Register Rlength = Z_ARG4; // cache array length
527
const Register Rarray_ptr = Z_ARG5; // Current value from cache array.
528
529
if (UseCompressedOops) {
530
assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
531
}
532
533
const int frame_size = 4*BytesPerWord + frame::z_abi_160_size;
534
// Save return pc. This is not necessary, but could be helpful
535
// in the case of crashes.
536
__ save_return_pc();
537
__ push_frame(frame_size);
538
// Save registers before changing them.
539
int i = 0;
540
__ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
541
__ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
542
__ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
543
__ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
544
assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
545
546
// Get sub and super from stack.
547
__ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
548
__ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
549
550
__ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss);
551
552
// Match falls through here.
553
i = 0;
554
__ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
555
__ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
556
__ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
557
__ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
558
assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
559
__ pop_frame();
560
// Return pc is still in R_14.
561
__ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true)
562
__ z_br(Z_R14);
563
564
__ BIND(miss);
565
i = 0;
566
__ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
567
__ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
568
__ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
569
__ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
570
assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
571
__ pop_frame();
572
// return pc is still in R_14
573
__ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss.
574
__ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true).
575
__ z_br(Z_R14);
576
}
577
break;
578
case monitorenter_nofpu_id:
579
case monitorenter_id:
580
{ // Z_R1_scratch : object
581
// Z_R13 : lock address (see LIRGenerator::syncTempOpr())
582
__ set_info("monitorenter", dont_gc_arguments);
583
584
int save_fpu_registers = (id == monitorenter_id);
585
// Make a frame and preserve the caller's caller-save registers.
586
OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
587
588
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13);
589
590
oop_maps = new OopMapSet();
591
oop_maps->add_gc_map(call_offset, oop_map);
592
restore_live_registers(sasm, save_fpu_registers);
593
594
__ z_br(Z_R14);
595
}
596
break;
597
598
case monitorexit_nofpu_id:
599
case monitorexit_id:
600
{ // Z_R1_scratch : lock address
601
// Note: really a leaf routine but must setup last java sp
602
// => Use call_RT for now (speed can be improved by
603
// doing last java sp setup manually).
604
__ set_info("monitorexit", dont_gc_arguments);
605
606
int save_fpu_registers = (id == monitorexit_id);
607
// Make a frame and preserve the caller's caller-save registers.
608
OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
609
610
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch);
611
612
oop_maps = new OopMapSet();
613
oop_maps->add_gc_map(call_offset, oop_map);
614
restore_live_registers(sasm, save_fpu_registers);
615
616
__ z_br(Z_R14);
617
}
618
break;
619
620
case deoptimize_id:
621
{ // Args: Z_R1_scratch: trap request
622
__ set_info("deoptimize", dont_gc_arguments);
623
Register trap_request = Z_R1_scratch;
624
OopMap* oop_map = save_live_registers(sasm);
625
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
626
oop_maps = new OopMapSet();
627
oop_maps->add_gc_map(call_offset, oop_map);
628
restore_live_registers(sasm);
629
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
630
assert(deopt_blob != NULL, "deoptimization blob must have been created");
631
AddressLiteral dest(deopt_blob->unpack_with_reexecution());
632
__ load_const_optimized(Z_R1_scratch, dest);
633
__ z_br(Z_R1_scratch);
634
}
635
break;
636
637
case access_field_patching_id:
638
{ __ set_info("access_field_patching", dont_gc_arguments);
639
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
640
}
641
break;
642
643
case load_klass_patching_id:
644
{ __ set_info("load_klass_patching", dont_gc_arguments);
645
// We should set up register map.
646
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
647
}
648
break;
649
650
case load_mirror_patching_id:
651
{ __ set_info("load_mirror_patching", dont_gc_arguments);
652
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
653
}
654
break;
655
656
case load_appendix_patching_id:
657
{ __ set_info("load_appendix_patching", dont_gc_arguments);
658
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
659
}
660
break;
661
#if 0
662
case dtrace_object_alloc_id:
663
{ // rax,: object
664
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
665
// We can't gc here so skip the oopmap but make sure that all
666
// the live registers get saved.
667
save_live_registers(sasm, 1);
668
669
__ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
670
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
671
NOT_LP64(__ pop(rax));
672
673
restore_live_registers(sasm);
674
}
675
break;
676
677
case fpu2long_stub_id:
678
{
679
// rax, and rdx are destroyed, but should be free since the result is returned there
680
// preserve rsi,ecx
681
__ push(rsi);
682
__ push(rcx);
683
LP64_ONLY(__ push(rdx);)
684
685
// check for NaN
686
Label return0, do_return, return_min_jlong, do_convert;
687
688
Address value_high_word(rsp, wordSize + 4);
689
Address value_low_word(rsp, wordSize);
690
Address result_high_word(rsp, 3*wordSize + 4);
691
Address result_low_word(rsp, 3*wordSize);
692
693
__ subptr(rsp, 32); // more than enough on 32bit
694
__ fst_d(value_low_word);
695
__ movl(rax, value_high_word);
696
__ andl(rax, 0x7ff00000);
697
__ cmpl(rax, 0x7ff00000);
698
__ jcc(Assembler::notEqual, do_convert);
699
__ movl(rax, value_high_word);
700
__ andl(rax, 0xfffff);
701
__ orl(rax, value_low_word);
702
__ jcc(Assembler::notZero, return0);
703
704
__ bind(do_convert);
705
__ fnstcw(Address(rsp, 0));
706
__ movzwl(rax, Address(rsp, 0));
707
__ orl(rax, 0xc00);
708
__ movw(Address(rsp, 2), rax);
709
__ fldcw(Address(rsp, 2));
710
__ fwait();
711
__ fistp_d(result_low_word);
712
__ fldcw(Address(rsp, 0));
713
__ fwait();
714
// This gets the entire long in rax on 64bit
715
__ movptr(rax, result_low_word);
716
// testing of high bits
717
__ movl(rdx, result_high_word);
718
__ mov(rcx, rax);
719
// What the heck is the point of the next instruction???
720
__ xorl(rcx, 0x0);
721
__ movl(rsi, 0x80000000);
722
__ xorl(rsi, rdx);
723
__ orl(rcx, rsi);
724
__ jcc(Assembler::notEqual, do_return);
725
__ fldz();
726
__ fcomp_d(value_low_word);
727
__ fnstsw_ax();
728
__ testl(rax, 0x4100); // ZF & CF == 0
729
__ jcc(Assembler::equal, return_min_jlong);
730
// return max_jlong
731
__ mov64(rax, CONST64(0x7fffffffffffffff));
732
__ jmp(do_return);
733
734
__ bind(return_min_jlong);
735
__ mov64(rax, UCONST64(0x8000000000000000));
736
__ jmp(do_return);
737
738
__ bind(return0);
739
__ fpop();
740
__ xorptr(rax, rax);
741
742
__ bind(do_return);
743
__ addptr(rsp, 32);
744
LP64_ONLY(__ pop(rdx);)
745
__ pop(rcx);
746
__ pop(rsi);
747
__ ret(0);
748
}
749
break;
750
#endif // TODO
751
752
case predicate_failed_trap_id:
753
{
754
__ set_info("predicate_failed_trap", dont_gc_arguments);
755
756
OopMap* map = save_live_registers(sasm);
757
758
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
759
oop_maps = new OopMapSet();
760
oop_maps->add_gc_map(call_offset, map);
761
restore_live_registers(sasm);
762
763
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
764
assert(deopt_blob != NULL, "deoptimization blob must have been created");
765
766
__ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
767
__ z_br(Z_R1_scratch);
768
}
769
break;
770
771
default:
772
{
773
__ should_not_reach_here(FILE_AND_LINE, id);
774
}
775
break;
776
}
777
return oop_maps;
778
}
779
780
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
781
__ block_comment("generate_handle_exception");
782
783
// incoming parameters: Z_EXC_OOP, Z_EXC_PC
784
785
// Save registers if required.
786
OopMapSet* oop_maps = new OopMapSet();
787
OopMap* oop_map = NULL;
788
Register reg_fp = Z_R1_scratch;
789
790
switch (id) {
791
case forward_exception_id: {
792
// We're handling an exception in the context of a compiled frame.
793
// The registers have been saved in the standard places. Perform
794
// an exception lookup in the caller and dispatch to the handler
795
// if found. Otherwise unwind and dispatch to the callers
796
// exception handler.
797
oop_map = generate_oop_map(sasm);
798
799
// Load and clear pending exception oop into.
800
__ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset()));
801
__ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8);
802
803
// Different stubs forward their exceptions; they should all have similar frame layouts
804
// (a) to find their return address (b) for a correct oop_map generated above.
805
assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) ==
806
RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement");
807
808
// Load issuing PC (the return address for this stub).
809
const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
810
__ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc)));
811
DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));)
812
813
// Make sure that the vm_results are cleared (may be unnecessary).
814
__ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(oop));
815
__ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*));
816
break;
817
}
818
case handle_exception_nofpu_id:
819
case handle_exception_id:
820
// At this point all registers MAY be live.
821
DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
822
oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC);
823
break;
824
case handle_exception_from_callee_id: {
825
// At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead.
826
DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
827
__ save_return_pc(Z_EXC_PC);
828
const int frame_size_in_bytes = __ push_frame_abi160(0);
829
oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0);
830
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
831
break;
832
}
833
default: ShouldNotReachHere();
834
}
835
836
// Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time.
837
__ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp);
838
// Verify that Z_EXC_OOP, contains a valid exception.
839
__ verify_not_null_oop(Z_EXC_OOP);
840
841
// Check that fields in JavaThread for exception oop and issuing pc
842
// are empty before writing to them.
843
__ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
844
__ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
845
846
// Save exception oop and issuing pc into JavaThread.
847
// (Exception handler will load it from here.)
848
__ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset()));
849
__ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset()));
850
851
#ifdef ASSERT
852
{ NearLabel ok;
853
__ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc)));
854
__ branch_optimized(Assembler::bcondEqual, ok);
855
__ stop("use throwing pc as return address (has bci & oop map)");
856
__ bind(ok);
857
}
858
#endif
859
860
// Compute the exception handler.
861
// The exception oop and the throwing pc are read from the fields in JavaThread.
862
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
863
oop_maps->add_gc_map(call_offset, oop_map);
864
865
// Z_RET(Z_R2): handler address
866
// will be the deopt blob if nmethod was deoptimized while we looked up
867
// handler regardless of whether handler existed in the nmethod.
868
869
// Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call.
870
__ invalidate_registers(Z_R2);
871
872
switch(id) {
873
case forward_exception_id:
874
case handle_exception_nofpu_id:
875
case handle_exception_id:
876
// Restore the registers that were saved at the beginning.
877
__ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2.
878
restore_live_registers(sasm, id != handle_exception_nofpu_id); // Pops as well the frame.
879
__ z_br(Z_R1_scratch);
880
break;
881
case handle_exception_from_callee_id: {
882
__ pop_frame();
883
__ z_br(Z_R2); // Jump to exception handler.
884
}
885
break;
886
default: ShouldNotReachHere();
887
}
888
889
return oop_maps;
890
}
891
892
893
#undef __
894
895
const char *Runtime1::pd_name_for_address(address entry) {
896
return "<unknown function>";
897
}
898
899