Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/s390/macroAssembler_s390.hpp
40930 views
1
/*
2
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#ifndef CPU_S390_MACROASSEMBLER_S390_HPP
27
#define CPU_S390_MACROASSEMBLER_S390_HPP
28
29
#include "asm/assembler.hpp"
30
#include "oops/accessDecorators.hpp"
31
32
#define MODERN_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name)
33
#define CLASSIC_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name)
34
#define MODERN_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name)
35
#define CLASSIC_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name)
36
37
class MacroAssembler: public Assembler {
38
public:
39
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
40
41
//
42
// Optimized instruction emitters
43
//
44
45
// Move register if destination register and target register are different.
46
void lr_if_needed(Register rd, Register rs);
47
void lgr_if_needed(Register rd, Register rs);
48
void llgfr_if_needed(Register rd, Register rs);
49
void ldr_if_needed(FloatRegister rd, FloatRegister rs);
50
51
void move_reg_if_needed(Register dest, BasicType dest_type, Register src, BasicType src_type);
52
void move_freg_if_needed(FloatRegister dest, BasicType dest_type, FloatRegister src, BasicType src_type);
53
54
void freg2mem_opt(FloatRegister reg,
55
int64_t disp,
56
Register index,
57
Register base,
58
void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
59
void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
60
Register scratch = Z_R0);
61
void freg2mem_opt(FloatRegister reg,
62
const Address &a, bool is_double = true);
63
64
void mem2freg_opt(FloatRegister reg,
65
int64_t disp,
66
Register index,
67
Register base,
68
void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
69
void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
70
Register scratch = Z_R0);
71
void mem2freg_opt(FloatRegister reg,
72
const Address &a, bool is_double = true);
73
74
void reg2mem_opt(Register reg,
75
int64_t disp,
76
Register index,
77
Register base,
78
void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
79
void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
80
Register scratch = Z_R0);
81
// returns offset of the store instruction
82
int reg2mem_opt(Register reg, const Address &a, bool is_double = true);
83
84
void mem2reg_opt(Register reg,
85
int64_t disp,
86
Register index,
87
Register base,
88
void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
89
void (MacroAssembler::*classic)(Register, int64_t, Register, Register));
90
void mem2reg_opt(Register reg, const Address &a, bool is_double = true);
91
void mem2reg_signed_opt(Register reg, const Address &a);
92
93
// AND immediate and set condition code, works for 64 bit immediates/operation as well.
94
void and_imm(Register r, long mask, Register tmp = Z_R0, bool wide = false);
95
96
// 1's complement, 32bit or 64bit. Optimized to exploit distinct operands facility.
97
// Note: The condition code is neither preserved nor correctly set by this code!!!
98
// Note: (wide == false) does not protect the high order half of the target register
99
// from alternation. It only serves as optimization hint for 32-bit results.
100
void not_(Register r1, Register r2 = noreg, bool wide = false); // r1 = ~r2
101
102
// Expanded support of all "rotate_then_<logicalOP>" instructions.
103
//
104
// Generalize and centralize rotate_then_<logicalOP> emitter.
105
// Functional description. For details, see Principles of Operation, Chapter 7, "Rotate Then Insert..."
106
// - Bits in a register are numbered left (most significant) to right (least significant), i.e. [0..63].
107
// - Bytes in a register are numbered left (most significant) to right (least significant), i.e. [0..7].
108
// - Register src is rotated to the left by (nRotate&0x3f) positions.
109
// - Negative values for nRotate result in a rotation to the right by abs(nRotate) positions.
110
// - The bits in positions [lBitPos..rBitPos] of the _ROTATED_ src operand take part in the
111
// logical operation performed on the contents (in those positions) of the dst operand.
112
// - The logical operation that is performed on the dst operand is one of
113
// o insert the selected bits (replacing the original contents of those bit positions)
114
// o and the selected bits with the corresponding bits of the dst operand
115
// o or the selected bits with the corresponding bits of the dst operand
116
// o xor the selected bits with the corresponding bits of the dst operand
117
// - For clear_dst == true, the destination register is cleared before the bits are inserted.
118
// For clear_dst == false, only the bit positions that get data inserted from src
119
// are changed. All other bit positions remain unchanged.
120
// - For test_only == true, the result of the logicalOP is only used to set the condition code, dst remains unchanged.
121
// For test_only == false, the result of the logicalOP replaces the selected bits of dst.
122
// - src32bit and dst32bit indicate the respective register is used as 32bit value only.
123
// Knowledge can simplify code generation.
124
//
125
// Here is an important performance note, valid for all <logicalOP>s except "insert":
126
// Due to the too complex nature of the operation, it cannot be done in a single cycle.
127
// Timing constraints require the instructions to be cracked into two micro-ops, taking
128
// one or two cycles each to execute. In some cases, an additional pipeline bubble might get added.
129
// Macroscopically, that makes up for a three- or four-cycle instruction where you would
130
// expect just a single cycle.
131
// It is thus not beneficial from a performance point of view to exploit those instructions.
132
// Other reasons (code compactness, register pressure, ...) might outweigh this penalty.
133
//
134
unsigned long create_mask(int lBitPos, int rBitPos);
135
void rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
136
int nRotate, bool src32bit, bool dst32bit, bool oneBits);
137
void rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
138
bool clear_dst);
139
void rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
140
bool test_only);
141
void rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
142
bool test_onlyt);
143
void rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
144
bool test_only);
145
146
void add64(Register r1, RegisterOrConstant inc);
147
148
// Helper function to multiply the 64bit contents of a register by a 16bit constant.
149
// The optimization tries to avoid the mghi instruction, since it uses the FPU for
150
// calculation and is thus rather slow.
151
//
152
// There is no handling for special cases, e.g. cval==0 or cval==1.
153
//
154
// Returns len of generated code block.
155
unsigned int mul_reg64_const16(Register rval, Register work, int cval);
156
157
// Generic operation r1 := r2 + imm.
158
void add2reg(Register r1, int64_t imm, Register r2 = noreg);
159
// Generic operation r := b + x + d.
160
void add2reg_with_index(Register r, int64_t d, Register x, Register b = noreg);
161
162
// Add2mem* methods for direct memory increment.
163
void add2mem_32(const Address &a, int64_t imm, Register tmp);
164
void add2mem_64(const Address &a, int64_t imm, Register tmp);
165
166
// *((int8_t*)(dst)) |= imm8
167
inline void or2mem_8(Address& dst, int64_t imm8);
168
169
// Load values by size and signedness.
170
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
171
void store_sized_value(Register src, Address dst, size_t size_in_bytes);
172
173
// Load values with large offsets to base address.
174
private:
175
int split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate);
176
public:
177
void load_long_largeoffset(Register t, int64_t si20, Register a, Register tmp);
178
void load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp);
179
void load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp);
180
181
private:
182
long toc_distance();
183
public:
184
void load_toc(Register Rtoc);
185
void load_long_pcrelative(Register Rdst, address dataLocation);
186
static int load_long_pcrelative_size() { return 6; }
187
void load_addr_pcrelative(Register Rdst, address dataLocation);
188
static int load_addr_pcrel_size() { return 6; } // Just a LARL.
189
190
// Load a value from memory and test (set CC).
191
void load_and_test_byte (Register dst, const Address &a);
192
void load_and_test_short (Register dst, const Address &a);
193
void load_and_test_int (Register dst, const Address &a);
194
void load_and_test_int2long(Register dst, const Address &a);
195
void load_and_test_long (Register dst, const Address &a);
196
197
// Test a bit in memory. Result is reflected in CC.
198
void testbit(const Address &a, unsigned int bit);
199
// Test a bit in a register. Result is reflected in CC.
200
void testbit(Register r, unsigned int bitPos);
201
202
void prefetch_read(Address a);
203
void prefetch_update(Address a);
204
205
// Clear a register, i.e. load const zero into reg. Return len (in bytes) of
206
// generated instruction(s).
207
// whole_reg: Clear 64 bits if true, 32 bits otherwise.
208
// set_cc: Use instruction that sets the condition code, if true.
209
int clear_reg(Register r, bool whole_reg = true, bool set_cc = true);
210
211
#ifdef ASSERT
212
int preset_reg(Register r, unsigned long pattern, int pattern_len);
213
#endif
214
215
// Clear (store zeros) a small piece of memory.
216
// CAUTION: Do not use this for atomic memory clearing. Use store_const() instead.
217
// addr: Address descriptor of memory to clear.
218
// Index register will not be used!
219
// size: Number of bytes to clear.
220
void clear_mem(const Address& addr, unsigned size);
221
222
// Move immediate values to memory. Currently supports 32 and 64 bit stores,
223
// but may be extended to 16 bit store operation, if needed.
224
// For details, see implementation in *.cpp file.
225
int store_const(const Address &dest, long imm,
226
unsigned int lm, unsigned int lc,
227
Register scratch = Z_R0);
228
inline int store_const(const Address &dest, long imm,
229
Register scratch = Z_R0, bool is_long = true);
230
231
// Move/initialize arbitrarily large memory area. No check for destructive overlap.
232
// Being interruptible, these instructions need a retry-loop.
233
void move_long_ext(Register dst, Register src, unsigned int pad);
234
235
void compare_long_ext(Register left, Register right, unsigned int pad);
236
void compare_long_uni(Register left, Register right, unsigned int pad);
237
238
void search_string(Register end, Register start);
239
void search_string_uni(Register end, Register start);
240
241
// Translate instructions
242
// Being interruptible, these instructions need a retry-loop.
243
void translate_oo(Register dst, Register src, uint mask);
244
void translate_ot(Register dst, Register src, uint mask);
245
void translate_to(Register dst, Register src, uint mask);
246
void translate_tt(Register dst, Register src, uint mask);
247
248
// Crypto instructions.
249
// Being interruptible, these instructions need a retry-loop.
250
void cksm(Register crcBuff, Register srcBuff);
251
void km( Register dstBuff, Register srcBuff);
252
void kmc(Register dstBuff, Register srcBuff);
253
void kimd(Register srcBuff);
254
void klmd(Register srcBuff);
255
void kmac(Register srcBuff);
256
257
// nop padding
258
void align(int modulus);
259
void align_address(int modulus);
260
261
//
262
// Constants, loading constants, TOC support
263
//
264
265
// Load generic address: d <- base(a) + index(a) + disp(a).
266
inline void load_address(Register d, const Address &a);
267
// Load absolute address (and try to optimize).
268
void load_absolute_address(Register d, address addr);
269
270
// Address of Z_ARG1 and argument_offset.
271
// If temp_reg == arg_slot, arg_slot will be overwritten.
272
Address argument_address(RegisterOrConstant arg_slot,
273
Register temp_reg = noreg,
274
int64_t extra_slot_offset = 0);
275
276
// Load a narrow ptr constant (oop or klass ptr).
277
void load_narrow_oop( Register t, narrowOop a);
278
void load_narrow_klass(Register t, Klass* k);
279
280
static bool is_load_const_32to64(address pos);
281
static bool is_load_narrow_oop(address pos) { return is_load_const_32to64(pos); }
282
static bool is_load_narrow_klass(address pos) { return is_load_const_32to64(pos); }
283
284
static int load_const_32to64_size() { return 6; }
285
static bool load_narrow_oop_size() { return load_const_32to64_size(); }
286
static bool load_narrow_klass_size() { return load_const_32to64_size(); }
287
288
static int patch_load_const_32to64(address pos, int64_t a);
289
static int patch_load_narrow_oop(address pos, oop o);
290
static int patch_load_narrow_klass(address pos, Klass* k);
291
292
// cOops. CLFI exploit.
293
void compare_immediate_narrow_oop(Register oop1, narrowOop oop2);
294
void compare_immediate_narrow_klass(Register op1, Klass* op2);
295
static bool is_compare_immediate32(address pos);
296
static bool is_compare_immediate_narrow_oop(address pos);
297
static bool is_compare_immediate_narrow_klass(address pos);
298
static int compare_immediate_narrow_size() { return 6; }
299
static int compare_immediate_narrow_oop_size() { return compare_immediate_narrow_size(); }
300
static int compare_immediate_narrow_klass_size() { return compare_immediate_narrow_size(); }
301
static int patch_compare_immediate_32(address pos, int64_t a);
302
static int patch_compare_immediate_narrow_oop(address pos, oop o);
303
static int patch_compare_immediate_narrow_klass(address pos, Klass* k);
304
305
// Load a 32bit constant into a 64bit register.
306
void load_const_32to64(Register t, int64_t x, bool sign_extend=true);
307
// Load a 64 bit constant.
308
void load_const(Register t, long a);
309
inline void load_const(Register t, void* a);
310
inline void load_const(Register t, Label& L);
311
inline void load_const(Register t, const AddressLiteral& a);
312
// Get the 64 bit constant from a `load_const' sequence.
313
static long get_const(address load_const);
314
// Patch the 64 bit constant of a `load_const' sequence. This is a low level
315
// procedure. It neither flushes the instruction cache nor is it atomic.
316
static void patch_const(address load_const, long x);
317
static int load_const_size() { return 12; }
318
319
// Turn a char into boolean. NOTE: destroys r.
320
void c2bool(Register r, Register t = Z_R0);
321
322
// Optimized version of load_const for constants that do not need to be
323
// loaded by a sequence of instructions of fixed length and that do not
324
// need to be patched.
325
int load_const_optimized_rtn_len(Register t, long x, bool emit);
326
inline void load_const_optimized(Register t, long x);
327
inline void load_const_optimized(Register t, void* a);
328
inline void load_const_optimized(Register t, Label& L);
329
inline void load_const_optimized(Register t, const AddressLiteral& a);
330
331
public:
332
333
//----------------------------------------------------------
334
// oops in code -------------
335
// including compressed oops support -------------
336
//----------------------------------------------------------
337
338
// Metadata in code that we have to keep track of.
339
AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
340
AddressLiteral constant_metadata_address(Metadata* obj); // find_index
341
342
// allocate_index
343
AddressLiteral allocate_oop_address(jobject obj);
344
// find_index
345
AddressLiteral constant_oop_address(jobject obj);
346
// Uses allocate_oop_address.
347
inline void set_oop (jobject obj, Register d);
348
// Uses constant_oop_address.
349
inline void set_oop_constant(jobject obj, Register d);
350
// Uses constant_metadata_address.
351
inline bool set_metadata_constant(Metadata* md, Register d);
352
353
//
354
// branch, jump
355
//
356
357
// Use one generic function for all branch patches.
358
static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos);
359
360
void pd_patch_instruction(address branch, address target, const char* file, int line);
361
362
// Extract relative address from "relative" instructions.
363
static long get_pcrel_offset(unsigned long inst);
364
static long get_pcrel_offset(address pc);
365
static address get_target_addr_pcrel(address pc);
366
367
static inline bool is_call_pcrelative_short(unsigned long inst);
368
static inline bool is_call_pcrelative_long(unsigned long inst);
369
static inline bool is_branch_pcrelative_short(unsigned long inst);
370
static inline bool is_branch_pcrelative_long(unsigned long inst);
371
static inline bool is_compareandbranch_pcrelative_short(unsigned long inst);
372
static inline bool is_branchoncount_pcrelative_short(unsigned long inst);
373
static inline bool is_branchonindex32_pcrelative_short(unsigned long inst);
374
static inline bool is_branchonindex64_pcrelative_short(unsigned long inst);
375
static inline bool is_branchonindex_pcrelative_short(unsigned long inst);
376
static inline bool is_branch_pcrelative16(unsigned long inst);
377
static inline bool is_branch_pcrelative32(unsigned long inst);
378
static inline bool is_branch_pcrelative(unsigned long inst);
379
static inline bool is_load_pcrelative_long(unsigned long inst);
380
static inline bool is_misc_pcrelative_long(unsigned long inst);
381
static inline bool is_pcrelative_short(unsigned long inst);
382
static inline bool is_pcrelative_long(unsigned long inst);
383
// PCrelative TOC access. Variants with address argument.
384
static inline bool is_load_pcrelative_long(address iLoc);
385
static inline bool is_pcrelative_short(address iLoc);
386
static inline bool is_pcrelative_long(address iLoc);
387
388
static inline bool is_pcrelative_instruction(address iloc);
389
static inline bool is_load_addr_pcrel(address a);
390
391
static void patch_target_addr_pcrel(address pc, address con);
392
static void patch_addr_pcrel(address pc, address con) {
393
patch_target_addr_pcrel(pc, con); // Just delegate. This is only for nativeInst_s390.cpp.
394
}
395
396
//---------------------------------------------------------
397
// Some macros for more comfortable assembler programming.
398
//---------------------------------------------------------
399
400
// NOTE: pass NearLabel T to signal that the branch target T will be bound to a near address.
401
402
void compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
403
void compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
404
void compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
405
void compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
406
407
void branch_optimized(Assembler::branch_condition cond, address branch_target);
408
void branch_optimized(Assembler::branch_condition cond, Label& branch_target);
409
void compare_and_branch_optimized(Register r1,
410
Register r2,
411
Assembler::branch_condition cond,
412
address branch_addr,
413
bool len64,
414
bool has_sign);
415
void compare_and_branch_optimized(Register r1,
416
jlong x2,
417
Assembler::branch_condition cond,
418
Label& branch_target,
419
bool len64,
420
bool has_sign);
421
void compare_and_branch_optimized(Register r1,
422
Register r2,
423
Assembler::branch_condition cond,
424
Label& branch_target,
425
bool len64,
426
bool has_sign);
427
428
//
429
// Support for frame handling
430
//
431
// Specify the register that should be stored as the return pc in the
432
// current frame (default is R14).
433
inline void save_return_pc(Register pc = Z_R14);
434
inline void restore_return_pc();
435
436
// Get current PC.
437
address get_PC(Register result);
438
439
// Get current PC + offset. Offset given in bytes, must be even!
440
address get_PC(Register result, int64_t offset);
441
442
// Get size of instruction at pc (which must point to valid code).
443
void instr_size(Register size, Register pc);
444
445
// Accessing, and in particular modifying, a stack location is only safe if
446
// the stack pointer (Z_SP) is set such that the accessed stack location is
447
// in the reserved range.
448
//
449
// From a performance point of view, it is desirable not to change the SP
450
// first and then immediately use it to access the freshly reserved space.
451
// That opens a small gap, though. If, just after storing some value (the
452
// frame pointer) into the to-be-reserved space, an interrupt is caught,
453
// the handler might use the space beyond Z_SP for it's own purpose.
454
// If that happens, the stored value might get altered.
455
456
// Resize current frame either relatively wrt to current SP or absolute.
457
void resize_frame_sub(Register offset, Register fp, bool load_fp=true);
458
void resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp);
459
void resize_frame_absolute(Register addr, Register fp, bool load_fp);
460
void resize_frame(RegisterOrConstant offset, Register fp, bool load_fp=true);
461
462
// Push a frame of size bytes, if copy_sp is false, old_sp must already
463
// contain a copy of Z_SP.
464
void push_frame(Register bytes, Register old_sp, bool copy_sp = true, bool bytes_with_inverted_sign = false);
465
466
// Push a frame of size `bytes'. no abi space provided.
467
// Don't rely on register locking, instead pass a scratch register
468
// (Z_R0 by default).
469
// CAUTION! passing registers >= Z_R2 may produce bad results on
470
// old CPUs!
471
unsigned int push_frame(unsigned int bytes, Register scratch = Z_R0);
472
473
// Push a frame of size `bytes' with abi160 on top.
474
unsigned int push_frame_abi160(unsigned int bytes);
475
476
// Pop current C frame.
477
void pop_frame();
478
// Pop current C frame and restore return PC register (Z_R14).
479
void pop_frame_restore_retPC(int frame_size_in_bytes);
480
481
//
482
// Calls
483
//
484
485
private:
486
address _last_calls_return_pc;
487
488
public:
489
// Support for VM calls. This is the base routine called by the
490
// different versions of call_VM_leaf. The interpreter may customize
491
// this version by overriding it for its purposes (e.g., to
492
// save/restore additional registers when doing a VM call).
493
void call_VM_leaf_base(address entry_point);
494
void call_VM_leaf_base(address entry_point, bool allow_relocation);
495
496
// It is imperative that all calls into the VM are handled via the
497
// call_VM macros. They make sure that the stack linkage is setup
498
// correctly. Call_VM's correspond to ENTRY/ENTRY_X entry points
499
// while call_VM_leaf's correspond to LEAF entry points.
500
//
501
// This is the base routine called by the different versions of
502
// call_VM. The interpreter may customize this version by overriding
503
// it for its purposes (e.g., to save/restore additional registers
504
// when doing a VM call).
505
506
// If no last_java_sp is specified (noreg) then SP will be used instead.
507
508
virtual void call_VM_base(
509
Register oop_result, // Where an oop-result ends up if any; use noreg otherwise.
510
Register last_java_sp, // To set up last_Java_frame in stubs; use noreg otherwise.
511
address entry_point, // The entry point.
512
bool check_exception); // Flag which indicates if exception should be checked.
513
virtual void call_VM_base(
514
Register oop_result, // Where an oop-result ends up if any; use noreg otherwise.
515
Register last_java_sp, // To set up last_Java_frame in stubs; use noreg otherwise.
516
address entry_point, // The entry point.
517
bool allow_relocation, // Flag to request generation of relocatable code.
518
bool check_exception); // Flag which indicates if exception should be checked.
519
520
// Call into the VM.
521
// Passes the thread pointer (in Z_ARG1) as a prepended argument.
522
// Makes sure oop return values are visible to the GC.
523
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
524
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
525
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
526
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
527
Register arg_3, bool check_exceptions = true);
528
529
void call_VM_static(Register oop_result, address entry_point, bool check_exceptions = true);
530
void call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
531
Register arg_3, bool check_exceptions = true);
532
533
// Overloaded with last_java_sp.
534
void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true);
535
void call_VM(Register oop_result, Register last_java_sp, address entry_point,
536
Register arg_1, bool check_exceptions = true);
537
void call_VM(Register oop_result, Register last_java_sp, address entry_point,
538
Register arg_1, Register arg_2, bool check_exceptions = true);
539
void call_VM(Register oop_result, Register last_java_sp, address entry_point,
540
Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
541
542
void call_VM_leaf(address entry_point);
543
void call_VM_leaf(address entry_point, Register arg_1);
544
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
545
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
546
547
// Really static VM leaf call (never patched).
548
void call_VM_leaf_static(address entry_point);
549
void call_VM_leaf_static(address entry_point, Register arg_1);
550
void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2);
551
void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3);
552
553
// Call a C function via its function entry. Updates and returns _last_calls_return_pc.
554
inline address call(Register function_entry);
555
inline address call_c(Register function_entry);
556
address call_c(address function_entry);
557
// Variant for really static (non-relocatable) calls which are never patched.
558
address call_c_static(address function_entry);
559
// TOC or pc-relative call + emits a runtime_call relocation.
560
address call_c_opt(address function_entry);
561
562
inline address call_stub(Register function_entry);
563
inline address call_stub(address function_entry);
564
565
// Get the pc where the last call will return to. Returns _last_calls_return_pc.
566
inline address last_calls_return_pc();
567
568
private:
569
static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call.
570
static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs.
571
572
573
public:
574
bool call_far_patchable(address target, int64_t toc_offset);
575
static bool is_call_far_patchable_at(address inst_start); // All supported forms of patchable calls.
576
static bool is_call_far_patchable_pcrelative_at(address inst_start); // Pc-relative call with leading nops.
577
static bool is_call_far_pcrelative(address instruction_addr); // Pure far pc-relative call, with one leading size adjustment nop.
578
static void set_dest_of_call_far_patchable_at(address inst_start, address target, int64_t toc_offset);
579
static address get_dest_of_call_far_patchable_at(address inst_start, address toc_start);
580
581
void align_call_far_patchable(address pc);
582
583
// PCrelative TOC access.
584
585
// This value is independent of code position - constant for the lifetime of the VM.
586
static int call_far_patchable_size() {
587
return load_const_from_toc_size() + call_byregister_size();
588
}
589
590
static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); }
591
592
static bool call_far_patchable_requires_alignment_nop(address pc) {
593
int size = call_far_patchable_size();
594
return ((intptr_t)(pc + size) & 0x03L) != 0;
595
}
596
597
// END OF PCrelative TOC access.
598
599
static int jump_byregister_size() { return 2; }
600
static int jump_pcrelative_size() { return 4; }
601
static int jump_far_pcrelative_size() { return 6; }
602
static int call_byregister_size() { return 2; }
603
static int call_pcrelative_size() { return 4; }
604
static int call_far_pcrelative_size() { return 2 + 6; } // Prepend each BRASL with a nop.
605
static int call_far_pcrelative_size_raw() { return 6; } // Prepend each BRASL with a nop.
606
607
//
608
// Java utilities
609
//
610
611
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
612
// The implementation is only non-empty for the InterpreterMacroAssembler,
613
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
614
virtual void check_and_handle_popframe(Register java_thread);
615
virtual void check_and_handle_earlyret(Register java_thread);
616
617
// Polling page support.
618
enum poll_mask {
619
mask_stackbang = 0xde, // 222 (dec)
620
mask_safepoint = 0x6f, // 111 (dec)
621
mask_profiling = 0xba // 186 (dec)
622
};
623
624
// Read from the polling page.
625
void load_from_polling_page(Register polling_page_address, int64_t offset = 0);
626
627
// Check if given instruction is a read from the polling page
628
// as emitted by load_from_polling_page.
629
static bool is_load_from_polling_page(address instr_loc);
630
// Extract poll address from instruction and ucontext.
631
static address get_poll_address(address instr_loc, void* ucontext);
632
// Extract poll register from instruction.
633
static uint get_poll_register(address instr_loc);
634
635
// Check if safepoint requested and if so branch
636
void safepoint_poll(Label& slow_path, Register temp_reg);
637
638
// Stack overflow checking
639
void bang_stack_with_offset(int offset);
640
641
// Check for reserved stack access in method being exited. If the reserved
642
// stack area was accessed, protect it again and throw StackOverflowError.
643
// Uses Z_R1.
644
void reserved_stack_check(Register return_pc);
645
646
// Atomics
647
// -- none?
648
649
void tlab_allocate(Register obj, // Result: pointer to object after successful allocation
650
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
651
int con_size_in_bytes, // Object size in bytes if known at compile time.
652
Register t1, // temp register
653
Label& slow_case); // Continuation point if fast allocation fails.
654
655
// Emitter for interface method lookup.
656
// input: recv_klass, intf_klass, itable_index
657
// output: method_result
658
// kills: itable_index, temp1_reg, Z_R0, Z_R1
659
void lookup_interface_method(Register recv_klass,
660
Register intf_klass,
661
RegisterOrConstant itable_index,
662
Register method_result,
663
Register temp1_reg,
664
Label& no_such_interface,
665
bool return_method = true);
666
667
// virtual method calling
668
void lookup_virtual_method(Register recv_klass,
669
RegisterOrConstant vtable_index,
670
Register method_result);
671
672
// Factor out code to call ic_miss_handler.
673
unsigned int call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch);
674
void nmethod_UEP(Label& ic_miss);
675
676
// Emitters for "partial subtype" checks.
677
678
// Test sub_klass against super_klass, with fast and slow paths.
679
680
// The fast path produces a tri-state answer: yes / no / maybe-slow.
681
// One of the three labels can be NULL, meaning take the fall-through.
682
// If super_check_offset is -1, the value is loaded up from super_klass.
683
// No registers are killed, except temp_reg and temp2_reg.
684
// If super_check_offset is not -1, temp1_reg is not used and can be noreg.
685
void check_klass_subtype_fast_path(Register sub_klass,
686
Register super_klass,
687
Register temp1_reg,
688
Label* L_success,
689
Label* L_failure,
690
Label* L_slow_path,
691
RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
692
693
// The rest of the type check; must be wired to a corresponding fast path.
694
// It does not repeat the fast path logic, so don't use it standalone.
695
// The temp_reg can be noreg, if no temps are available.
696
// It can also be sub_klass or super_klass, meaning it's OK to kill that one.
697
// Updates the sub's secondary super cache as necessary.
698
void check_klass_subtype_slow_path(Register Rsubklass,
699
Register Rsuperklas,
700
Register Rarray_ptr, // tmp
701
Register Rlength, // tmp
702
Label* L_success,
703
Label* L_failure);
704
705
// Simplified, combined version, good for typical uses.
706
// Falls through on failure.
707
void check_klass_subtype(Register sub_klass,
708
Register super_klass,
709
Register temp1_reg,
710
Register temp2_reg,
711
Label& L_success);
712
713
void clinit_barrier(Register klass,
714
Register thread,
715
Label* L_fast_path = NULL,
716
Label* L_slow_path = NULL);
717
718
// Increment a counter at counter_address when the eq condition code is set.
719
// Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
720
void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg);
721
// Biased locking support
722
// Upon entry,obj_reg must contain the target object, and mark_reg
723
// must contain the target object's header.
724
// Destroys mark_reg if an attempt is made to bias an anonymously
725
// biased lock. In this case a failure will go either to the slow
726
// case or fall through with the notEqual condition code set with
727
// the expectation that the slow case in the runtime will be called.
728
// In the fall-through case where the CAS-based lock is done,
729
// mark_reg is not destroyed.
730
void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
731
Register temp2_reg, Label& done, Label* slow_case = NULL);
732
// Upon entry, the base register of mark_addr must contain the oop.
733
// Destroys temp_reg.
734
// If allow_delay_slot_filling is set to true, the next instruction
735
// emitted after this one will go in an annulled delay slot if the
736
// biased locking exit case failed.
737
void biased_locking_exit(Register mark_addr, Register temp_reg, Label& done);
738
739
void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
740
void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
741
742
void resolve_jobject(Register value, Register tmp1, Register tmp2);
743
744
// Support for last Java frame (but use call_VM instead where possible).
745
private:
746
void set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation);
747
void reset_last_Java_frame(bool allow_relocation);
748
void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation);
749
public:
750
inline void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
751
inline void set_last_Java_frame_static(Register last_java_sp, Register last_Java_pc);
752
inline void reset_last_Java_frame(void);
753
inline void reset_last_Java_frame_static(void);
754
inline void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
755
inline void set_top_ijava_frame_at_SP_as_last_Java_frame_static(Register sp, Register tmp1);
756
757
void set_thread_state(JavaThreadState new_state);
758
759
// Read vm result from thread.
760
void get_vm_result (Register oop_result);
761
void get_vm_result_2(Register result);
762
763
// Vm result is currently getting hijacked to for oop preservation.
764
void set_vm_result(Register oop_result);
765
766
// Support for NULL-checks
767
//
768
// Generates code that causes a NULL OS exception if the content of reg is NULL.
769
// If the accessed location is M[reg + offset] and the offset is known, provide the
770
// offset. No explicit code generation is needed if the offset is within a certain
771
// range (0 <= offset <= page_size).
772
//
773
// %%%%%% Currently not done for z/Architecture
774
775
void null_check(Register reg, Register tmp = Z_R0, int64_t offset = -1);
776
static bool needs_explicit_null_check(intptr_t offset); // Implemented in shared file ?!
777
static bool uses_implicit_null_check(void* address);
778
779
// Klass oop manipulations if compressed.
780
void encode_klass_not_null(Register dst, Register src = noreg);
781
void decode_klass_not_null(Register dst, Register src);
782
void decode_klass_not_null(Register dst);
783
void load_klass(Register klass, Address mem);
784
void load_klass(Register klass, Register src_oop);
785
void load_prototype_header(Register Rheader, Register Rsrc_oop);
786
void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided.
787
void store_klass_gap(Register s, Register dst_oop);
788
789
// This function calculates the size of the code generated by
790
// decode_klass_not_null(register dst)
791
// when (Universe::heap() != NULL). Hence, if the instructions
792
// it generates change, then this method needs to be updated.
793
static int instr_size_for_decode_klass_not_null();
794
795
void encode_heap_oop(Register oop);
796
void encode_heap_oop_not_null(Register oop);
797
798
static int get_oop_base_pow2_offset(uint64_t oop_base);
799
int get_oop_base(Register Rbase, uint64_t oop_base);
800
int get_oop_base_complement(Register Rbase, uint64_t oop_base);
801
void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL);
802
void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL);
803
804
// Access heap oop, handle encoding and GC barriers.
805
private:
806
void access_store_at(BasicType type, DecoratorSet decorators,
807
const Address& addr, Register val,
808
Register tmp1, Register tmp2, Register tmp3);
809
void access_load_at(BasicType type, DecoratorSet decorators,
810
const Address& addr, Register dst,
811
Register tmp1, Register tmp2, Label *is_null = NULL);
812
813
public:
814
// tmp1 and tmp2 are used with decorators ON_PHANTOM_OOP_REF or ON_WEAK_OOP_REF.
815
void load_heap_oop(Register dest, const Address &a,
816
Register tmp1, Register tmp2,
817
DecoratorSet decorators = 0, Label *is_null = NULL);
818
void store_heap_oop(Register Roop, const Address &a,
819
Register tmp1, Register tmp2, Register tmp3,
820
DecoratorSet decorators = 0);
821
822
void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
823
Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false);
824
void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL,
825
Register Rbase = Z_R1, int pow2_offset = -1);
826
827
void resolve_oop_handle(Register result);
828
void load_mirror_from_const_method(Register mirror, Register const_method);
829
void load_method_holder(Register holder, Register method);
830
831
//--------------------------
832
//--- Operations on arrays.
833
//--------------------------
834
unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg);
835
unsigned int Clear_Array_Const(long cnt, Register base);
836
unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg);
837
unsigned int CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
838
Register cnt_reg,
839
Register tmp1_reg, Register tmp2_reg);
840
841
842
// Emit an oop const to the constant pool and set a relocation info
843
// with address current_pc. Return the TOC offset of the constant.
844
int store_const_in_toc(AddressLiteral& val);
845
int store_oop_in_toc(AddressLiteral& oop);
846
// Emit an oop const to the constant pool via store_oop_in_toc, or
847
// emit a scalar const to the constant pool via store_const_in_toc,
848
// and load the constant into register dst.
849
bool load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg);
850
// Get CPU version dependent size of load_const sequence.
851
// The returned value is valid only for code sequences
852
// generated by load_const, not load_const_optimized.
853
static int load_const_from_toc_size() {
854
return load_long_pcrelative_size();
855
}
856
bool load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg);
857
static intptr_t get_const_from_toc(address pc);
858
static void set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb);
859
860
// Dynamic TOC.
861
static bool is_load_const(address a);
862
static bool is_load_const_from_toc_pcrelative(address a);
863
static bool is_load_const_from_toc(address a) { return is_load_const_from_toc_pcrelative(a); }
864
865
// PCrelative TOC access.
866
static bool is_call_byregister(address a) { return is_z_basr(*(short*)a); }
867
static bool is_load_const_from_toc_call(address a);
868
static bool is_load_const_call(address a);
869
static int load_const_call_size() { return load_const_size() + call_byregister_size(); }
870
static int load_const_from_toc_call_size() { return load_const_from_toc_size() + call_byregister_size(); }
871
// Offset is +/- 2**32 -> use long.
872
static long get_load_const_from_toc_offset(address a);
873
874
// Bit operations for single register operands.
875
inline void lshift(Register r, int places, bool doubl = true); // <<
876
inline void rshift(Register r, int places, bool doubl = true); // >>
877
878
//
879
// Debugging
880
//
881
882
// Assert on CC (condition code in CPU state).
883
void asm_assert(bool check_equal, const char* msg, int id) PRODUCT_RETURN;
884
void asm_assert_low(const char *msg, int id) PRODUCT_RETURN;
885
void asm_assert_high(const char *msg, int id) PRODUCT_RETURN;
886
void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
887
void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
888
889
void asm_assert_static(bool check_equal, const char* msg, int id) PRODUCT_RETURN;
890
891
private:
892
// Emit assertions.
893
void asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
894
Register mem_base, const char* msg, int id) PRODUCT_RETURN;
895
896
public:
897
inline void asm_assert_mem4_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
898
asm_assert_mems_zero(true, true, 4, mem_offset, mem_base, msg, id);
899
}
900
inline void asm_assert_mem8_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
901
asm_assert_mems_zero(true, true, 8, mem_offset, mem_base, msg, id);
902
}
903
inline void asm_assert_mem4_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
904
asm_assert_mems_zero(false, true, 4, mem_offset, mem_base, msg, id);
905
}
906
inline void asm_assert_mem8_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
907
asm_assert_mems_zero(false, true, 8, mem_offset, mem_base, msg, id);
908
}
909
910
inline void asm_assert_mem4_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
911
asm_assert_mems_zero(true, false, 4, mem_offset, mem_base, msg, id);
912
}
913
inline void asm_assert_mem8_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
914
asm_assert_mems_zero(true, false, 8, mem_offset, mem_base, msg, id);
915
}
916
inline void asm_assert_mem4_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
917
asm_assert_mems_zero(false, false, 4, mem_offset, mem_base, msg, id);
918
}
919
inline void asm_assert_mem8_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
920
asm_assert_mems_zero(false, false, 8, mem_offset, mem_base, msg, id);
921
}
922
void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) PRODUCT_RETURN;
923
924
// Verify Z_thread contents.
925
void verify_thread();
926
927
// Save and restore functions: Exclude Z_R0.
928
void save_volatile_regs( Register dst, int offset, bool include_fp, bool include_flags);
929
void restore_volatile_regs(Register src, int offset, bool include_fp, bool include_flags);
930
931
// Only if +VerifyOops.
932
// Kills Z_R0.
933
void verify_oop(Register reg, const char* s = "broken oop");
934
// Kills Z_R0, condition code.
935
void verify_oop_addr(Address addr, const char* msg = "contains broken oop");
936
937
// TODO: verify_method and klass metadata (compare against vptr?).
938
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
939
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
940
941
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
942
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
943
944
private:
945
// Generate printout in stop().
946
static const char* stop_types[];
947
enum {
948
stop_stop = 0,
949
stop_untested = 1,
950
stop_unimplemented = 2,
951
stop_shouldnotreachhere = 3,
952
stop_end = 4
953
};
954
// Prints msg and stops execution.
955
void stop(int type, const char* msg, int id = 0);
956
address stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation); // Non-relocateable code only!!
957
void stop_static(int type, const char* msg, int id); // Non-relocateable code only!!
958
959
public:
960
961
// Prints msg and stops.
962
address stop_chain( address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, true); }
963
address stop_chain_static(address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, false); }
964
void stop_static (const char* msg = "", int id = 0) { stop_static(stop_stop, msg, id); }
965
void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
966
void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
967
void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
968
void should_not_reach_here(const char* msg = "", int id = -1) { stop(stop_shouldnotreachhere, msg, id); }
969
970
// Factor out part of stop into subroutine to save space.
971
void stop_subroutine();
972
973
// Prints msg, but don't stop.
974
void warn(const char* msg);
975
976
//-----------------------------
977
//--- basic block tracing code
978
//-----------------------------
979
void trace_basic_block(uint i);
980
void init_basic_block_trace();
981
// Number of bytes a basic block gets larger due to the tracing code macro (worst case).
982
// Currently, worst case is 48 bytes. 64 puts us securely on the safe side.
983
static int basic_blck_trace_blk_size_incr() { return 64; }
984
985
// Write pattern 0x0101010101010101 in region [low-before, high+after].
986
// Low and high may be the same registers. Before and after are
987
// the numbers of 8-byte words.
988
void zap_from_to(Register low, Register high, Register tmp1 = Z_R0, Register tmp2 = Z_R1,
989
int before = 0, int after = 0) PRODUCT_RETURN;
990
991
// Emitters for CRC32 calculation.
992
// A note on invertCRC:
993
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
994
// CRC32 holds it's current crc value in the externally visible representation.
995
// CRC32C holds it's current crc value in internal format, ready for updating.
996
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
997
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
998
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
999
private:
1000
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
1001
void fold_8bit_crc32(Register crc, Register table, Register tmp);
1002
void update_byte_crc32( Register crc, Register val, Register table);
1003
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
1004
Register data);
1005
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
1006
Register t0, Register t1, Register t2, Register t3);
1007
public:
1008
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
1009
bool invertCRC);
1010
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
1011
bool invertCRC);
1012
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
1013
Register t0, Register t1, Register t2, Register t3,
1014
bool invertCRC);
1015
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
1016
Register t0, Register t1, Register t2, Register t3,
1017
bool invertCRC);
1018
1019
// Emitters for BigInteger.multiplyToLen intrinsic
1020
// note: length of result array (zlen) is passed on the stack
1021
private:
1022
void add2_with_carry(Register dest_hi, Register dest_lo,
1023
Register src1, Register src2);
1024
void multiply_64_x_64_loop(Register x, Register xstart,
1025
Register x_xstart,
1026
Register y, Register y_idx, Register z,
1027
Register carry, Register product,
1028
Register idx, Register kdx);
1029
void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1030
Register yz_idx, Register idx,
1031
Register carry, Register product, int offset);
1032
void multiply_128_x_128_loop(Register x_xstart,
1033
Register y, Register z,
1034
Register yz_idx, Register idx,
1035
Register jdx,
1036
Register carry, Register product,
1037
Register carry2);
1038
public:
1039
void multiply_to_len(Register x, Register xlen,
1040
Register y, Register ylen,
1041
Register z,
1042
Register tmp1, Register tmp2,
1043
Register tmp3, Register tmp4, Register tmp5);
1044
};
1045
1046
/**
1047
* class SkipIfEqual:
1048
*
1049
* Instantiating this class will result in assembly code being output that will
1050
* jump around any code emitted between the creation of the instance and it's
1051
* automatic destruction at the end of a scope block, depending on the value of
1052
* the flag passed to the constructor, which will be checked at run-time.
1053
*/
1054
class SkipIfEqual {
1055
private:
1056
MacroAssembler* _masm;
1057
Label _label;
1058
1059
public:
1060
SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register _rscratch);
1061
~SkipIfEqual();
1062
};
1063
1064
#ifdef ASSERT
1065
// Return false (e.g. important for our impl. of virtual calls).
1066
inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1067
#endif
1068
1069
#endif // CPU_S390_MACROASSEMBLER_S390_HPP
1070
1071