Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
32285 views
1
/*
2
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "compiler/disassembler.hpp"
29
#include "gc_interface/collectedHeap.inline.hpp"
30
#include "interpreter/interpreter.hpp"
31
#include "memory/cardTableModRefBS.hpp"
32
#include "memory/resourceArea.hpp"
33
#include "prims/methodHandles.hpp"
34
#include "runtime/biasedLocking.hpp"
35
#include "runtime/interfaceSupport.hpp"
36
#include "runtime/objectMonitor.hpp"
37
#include "runtime/os.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "runtime/stubRoutines.hpp"
40
#include "utilities/macros.hpp"
41
#if INCLUDE_ALL_GCS
42
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
43
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
44
#include "gc_implementation/g1/heapRegion.hpp"
45
#endif // INCLUDE_ALL_GCS
46
47
#ifdef PRODUCT
48
#define BLOCK_COMMENT(str) // nothing
49
#else
50
#define BLOCK_COMMENT(str) block_comment(str)
51
#endif
52
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
53
54
#ifdef ASSERT
55
// On RISC, there's no benefit to verifying instruction boundaries.
56
bool AbstractAssembler::pd_check_instruction_mark() { return false; }
57
#endif
58
59
void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
60
assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
61
if (Assembler::is_simm(si31, 16)) {
62
ld(d, si31, a);
63
if (emit_filler_nop) nop();
64
} else {
65
const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
66
const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
67
addis(d, a, hi);
68
ld(d, lo, d);
69
}
70
}
71
72
void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
73
assert_different_registers(d, a);
74
ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
75
}
76
77
void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
78
size_t size_in_bytes, bool is_signed) {
79
switch (size_in_bytes) {
80
case 8: ld(dst, offs, base); break;
81
case 4: is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
82
case 2: is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
83
case 1: lbz(dst, offs, base); if (is_signed) extsb(dst, dst); break; // lba doesn't exist :(
84
default: ShouldNotReachHere();
85
}
86
}
87
88
void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
89
size_t size_in_bytes) {
90
switch (size_in_bytes) {
91
case 8: std(dst, offs, base); break;
92
case 4: stw(dst, offs, base); break;
93
case 2: sth(dst, offs, base); break;
94
case 1: stb(dst, offs, base); break;
95
default: ShouldNotReachHere();
96
}
97
}
98
99
void MacroAssembler::align(int modulus, int max, int rem) {
100
int padding = (rem + modulus - (offset() % modulus)) % modulus;
101
if (padding > max) return;
102
for (int c = (padding >> 2); c > 0; --c) { nop(); }
103
}
104
105
// Issue instructions that calculate given TOC from global TOC.
106
void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
107
bool add_relocation, bool emit_dummy_addr) {
108
int offset = -1;
109
if (emit_dummy_addr) {
110
offset = -128; // dummy address
111
} else if (addr != (address)(intptr_t)-1) {
112
offset = MacroAssembler::offset_to_global_toc(addr);
113
}
114
115
if (hi16) {
116
addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
117
}
118
if (lo16) {
119
if (add_relocation) {
120
// Relocate at the addi to avoid confusion with a load from the method's TOC.
121
relocate(internal_word_Relocation::spec(addr));
122
}
123
addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
124
}
125
}
126
127
int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
128
const int offset = MacroAssembler::offset_to_global_toc(addr);
129
130
const address inst2_addr = a;
131
const int inst2 = *(int *)inst2_addr;
132
133
// The relocation points to the second instruction, the addi,
134
// and the addi reads and writes the same register dst.
135
const int dst = inv_rt_field(inst2);
136
assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
137
138
// Now, find the preceding addis which writes to dst.
139
int inst1 = 0;
140
address inst1_addr = inst2_addr - BytesPerInstWord;
141
while (inst1_addr >= bound) {
142
inst1 = *(int *) inst1_addr;
143
if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
144
// Stop, found the addis which writes dst.
145
break;
146
}
147
inst1_addr -= BytesPerInstWord;
148
}
149
150
assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
151
set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
152
set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
153
return (int)((intptr_t)addr - (intptr_t)inst1_addr);
154
}
155
156
address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
157
const address inst2_addr = a;
158
const int inst2 = *(int *)inst2_addr;
159
160
// The relocation points to the second instruction, the addi,
161
// and the addi reads and writes the same register dst.
162
const int dst = inv_rt_field(inst2);
163
assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
164
165
// Now, find the preceding addis which writes to dst.
166
int inst1 = 0;
167
address inst1_addr = inst2_addr - BytesPerInstWord;
168
while (inst1_addr >= bound) {
169
inst1 = *(int *) inst1_addr;
170
if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
171
// stop, found the addis which writes dst
172
break;
173
}
174
inst1_addr -= BytesPerInstWord;
175
}
176
177
assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
178
179
int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
180
// -1 is a special case
181
if (offset == -1) {
182
return (address)(intptr_t)-1;
183
} else {
184
return global_toc() + offset;
185
}
186
}
187
188
#ifdef _LP64
189
// Patch compressed oops or klass constants.
190
// Assembler sequence is
191
// 1) compressed oops:
192
// lis rx = const.hi
193
// ori rx = rx | const.lo
194
// 2) compressed klass:
195
// lis rx = const.hi
196
// clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
197
// ori rx = rx | const.lo
198
// Clrldi will be passed by.
199
int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
200
assert(UseCompressedOops, "Should only patch compressed oops");
201
202
const address inst2_addr = a;
203
const int inst2 = *(int *)inst2_addr;
204
205
// The relocation points to the second instruction, the ori,
206
// and the ori reads and writes the same register dst.
207
const int dst = inv_rta_field(inst2);
208
assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
209
// Now, find the preceding addis which writes to dst.
210
int inst1 = 0;
211
address inst1_addr = inst2_addr - BytesPerInstWord;
212
bool inst1_found = false;
213
while (inst1_addr >= bound) {
214
inst1 = *(int *)inst1_addr;
215
if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
216
inst1_addr -= BytesPerInstWord;
217
}
218
assert(inst1_found, "inst is not lis");
219
220
int xc = (data >> 16) & 0xffff;
221
int xd = (data >> 0) & 0xffff;
222
223
set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
224
set_imm((int *)inst2_addr, (xd)); // unsigned int
225
return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
226
}
227
228
// Get compressed oop or klass constant.
229
narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
230
assert(UseCompressedOops, "Should only patch compressed oops");
231
232
const address inst2_addr = a;
233
const int inst2 = *(int *)inst2_addr;
234
235
// The relocation points to the second instruction, the ori,
236
// and the ori reads and writes the same register dst.
237
const int dst = inv_rta_field(inst2);
238
assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
239
// Now, find the preceding lis which writes to dst.
240
int inst1 = 0;
241
address inst1_addr = inst2_addr - BytesPerInstWord;
242
bool inst1_found = false;
243
244
while (inst1_addr >= bound) {
245
inst1 = *(int *) inst1_addr;
246
if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
247
inst1_addr -= BytesPerInstWord;
248
}
249
assert(inst1_found, "inst is not lis");
250
251
uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
252
uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
253
254
return (int) (xl | xh);
255
}
256
#endif // _LP64
257
258
void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
259
int toc_offset = 0;
260
// Use RelocationHolder::none for the constant pool entry, otherwise
261
// we will end up with a failing NativeCall::verify(x) where x is
262
// the address of the constant pool entry.
263
// FIXME: We should insert relocation information for oops at the constant
264
// pool entries instead of inserting it at the loads; patching of a constant
265
// pool entry should be less expensive.
266
address oop_address = address_constant((address)a.value(), RelocationHolder::none);
267
// Relocate at the pc of the load.
268
relocate(a.rspec());
269
toc_offset = (int)(oop_address - code()->consts()->start());
270
ld_largeoffset_unchecked(dst, toc_offset, toc, true);
271
}
272
273
bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
274
const address inst1_addr = a;
275
const int inst1 = *(int *)inst1_addr;
276
277
// The relocation points to the ld or the addis.
278
return (is_ld(inst1)) ||
279
(is_addis(inst1) && inv_ra_field(inst1) != 0);
280
}
281
282
int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
283
assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
284
285
const address inst1_addr = a;
286
const int inst1 = *(int *)inst1_addr;
287
288
if (is_ld(inst1)) {
289
return inv_d1_field(inst1);
290
} else if (is_addis(inst1)) {
291
const int dst = inv_rt_field(inst1);
292
293
// Now, find the succeeding ld which reads and writes to dst.
294
address inst2_addr = inst1_addr + BytesPerInstWord;
295
int inst2 = 0;
296
while (true) {
297
inst2 = *(int *) inst2_addr;
298
if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
299
// Stop, found the ld which reads and writes dst.
300
break;
301
}
302
inst2_addr += BytesPerInstWord;
303
}
304
return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
305
}
306
ShouldNotReachHere();
307
return 0;
308
}
309
310
// Get the constant from a `load_const' sequence.
311
long MacroAssembler::get_const(address a) {
312
assert(is_load_const_at(a), "not a load of a constant");
313
const int *p = (const int*) a;
314
unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
315
if (is_ori(*(p+1))) {
316
x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
317
x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
318
x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
319
} else if (is_lis(*(p+1))) {
320
x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
321
x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
322
x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
323
} else {
324
ShouldNotReachHere();
325
return (long) 0;
326
}
327
return (long) x;
328
}
329
330
// Patch the 64 bit constant of a `load_const' sequence. This is a low
331
// level procedure. It neither flushes the instruction cache nor is it
332
// mt safe.
333
void MacroAssembler::patch_const(address a, long x) {
334
assert(is_load_const_at(a), "not a load of a constant");
335
int *p = (int*) a;
336
if (is_ori(*(p+1))) {
337
set_imm(0 + p, (x >> 48) & 0xffff);
338
set_imm(1 + p, (x >> 32) & 0xffff);
339
set_imm(3 + p, (x >> 16) & 0xffff);
340
set_imm(4 + p, x & 0xffff);
341
} else if (is_lis(*(p+1))) {
342
set_imm(0 + p, (x >> 48) & 0xffff);
343
set_imm(2 + p, (x >> 32) & 0xffff);
344
set_imm(1 + p, (x >> 16) & 0xffff);
345
set_imm(3 + p, x & 0xffff);
346
} else {
347
ShouldNotReachHere();
348
}
349
}
350
351
AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
352
assert(oop_recorder() != NULL, "this assembler needs a Recorder");
353
int index = oop_recorder()->allocate_metadata_index(obj);
354
RelocationHolder rspec = metadata_Relocation::spec(index);
355
return AddressLiteral((address)obj, rspec);
356
}
357
358
AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
359
assert(oop_recorder() != NULL, "this assembler needs a Recorder");
360
int index = oop_recorder()->find_index(obj);
361
RelocationHolder rspec = metadata_Relocation::spec(index);
362
return AddressLiteral((address)obj, rspec);
363
}
364
365
AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
366
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
367
int oop_index = oop_recorder()->allocate_oop_index(obj);
368
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
369
}
370
371
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
372
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
373
int oop_index = oop_recorder()->find_index(obj);
374
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
375
}
376
377
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
378
Register tmp, int offset) {
379
intptr_t value = *delayed_value_addr;
380
if (value != 0) {
381
return RegisterOrConstant(value + offset);
382
}
383
384
// Load indirectly to solve generation ordering problem.
385
// static address, no relocation
386
int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
387
ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
388
389
if (offset != 0) {
390
addi(tmp, tmp, offset);
391
}
392
393
return RegisterOrConstant(tmp);
394
}
395
396
#ifndef PRODUCT
397
void MacroAssembler::pd_print_patched_instruction(address branch) {
398
Unimplemented(); // TODO: PPC port
399
}
400
#endif // ndef PRODUCT
401
402
// Conditional far branch for destinations encodable in 24+2 bits.
403
void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
404
405
// If requested by flag optimize, relocate the bc_far as a
406
// runtime_call and prepare for optimizing it when the code gets
407
// relocated.
408
if (optimize == bc_far_optimize_on_relocate) {
409
relocate(relocInfo::runtime_call_type);
410
}
411
412
// variant 2:
413
//
414
// b!cxx SKIP
415
// bxx DEST
416
// SKIP:
417
//
418
419
const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
420
opposite_bcond(inv_boint_bcond(boint)));
421
422
// We emit two branches.
423
// First, a conditional branch which jumps around the far branch.
424
const address not_taken_pc = pc() + 2 * BytesPerInstWord;
425
const address bc_pc = pc();
426
bc(opposite_boint, biint, not_taken_pc);
427
428
const int bc_instr = *(int*)bc_pc;
429
assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
430
assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
431
assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
432
opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
433
"postcondition");
434
assert(biint == inv_bi_field(bc_instr), "postcondition");
435
436
// Second, an unconditional far branch which jumps to dest.
437
// Note: target(dest) remembers the current pc (see CodeSection::target)
438
// and returns the current pc if the label is not bound yet; when
439
// the label gets bound, the unconditional far branch will be patched.
440
const address target_pc = target(dest);
441
const address b_pc = pc();
442
b(target_pc);
443
444
assert(not_taken_pc == pc(), "postcondition");
445
assert(dest.is_bound() || target_pc == b_pc, "postcondition");
446
}
447
448
bool MacroAssembler::is_bc_far_at(address instruction_addr) {
449
return is_bc_far_variant1_at(instruction_addr) ||
450
is_bc_far_variant2_at(instruction_addr) ||
451
is_bc_far_variant3_at(instruction_addr);
452
}
453
454
address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
455
if (is_bc_far_variant1_at(instruction_addr)) {
456
const address instruction_1_addr = instruction_addr;
457
const int instruction_1 = *(int*)instruction_1_addr;
458
return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
459
} else if (is_bc_far_variant2_at(instruction_addr)) {
460
const address instruction_2_addr = instruction_addr + 4;
461
return bxx_destination(instruction_2_addr);
462
} else if (is_bc_far_variant3_at(instruction_addr)) {
463
return instruction_addr + 8;
464
}
465
// variant 4 ???
466
ShouldNotReachHere();
467
return NULL;
468
}
469
void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
470
471
if (is_bc_far_variant3_at(instruction_addr)) {
472
// variant 3, far cond branch to the next instruction, already patched to nops:
473
//
474
// nop
475
// endgroup
476
// SKIP/DEST:
477
//
478
return;
479
}
480
481
// first, extract boint and biint from the current branch
482
int boint = 0;
483
int biint = 0;
484
485
ResourceMark rm;
486
const int code_size = 2 * BytesPerInstWord;
487
CodeBuffer buf(instruction_addr, code_size);
488
MacroAssembler masm(&buf);
489
if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
490
// Far branch to next instruction: Optimize it by patching nops (produce variant 3).
491
masm.nop();
492
masm.endgroup();
493
} else {
494
if (is_bc_far_variant1_at(instruction_addr)) {
495
// variant 1, the 1st instruction contains the destination address:
496
//
497
// bcxx DEST
498
// endgroup
499
//
500
const int instruction_1 = *(int*)(instruction_addr);
501
boint = inv_bo_field(instruction_1);
502
biint = inv_bi_field(instruction_1);
503
} else if (is_bc_far_variant2_at(instruction_addr)) {
504
// variant 2, the 2nd instruction contains the destination address:
505
//
506
// b!cxx SKIP
507
// bxx DEST
508
// SKIP:
509
//
510
const int instruction_1 = *(int*)(instruction_addr);
511
boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
512
opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
513
biint = inv_bi_field(instruction_1);
514
} else {
515
// variant 4???
516
ShouldNotReachHere();
517
}
518
519
// second, set the new branch destination and optimize the code
520
if (dest != instruction_addr + 4 && // the bc_far is still unbound!
521
masm.is_within_range_of_bcxx(dest, instruction_addr)) {
522
// variant 1:
523
//
524
// bcxx DEST
525
// endgroup
526
//
527
masm.bc(boint, biint, dest);
528
masm.endgroup();
529
} else {
530
// variant 2:
531
//
532
// b!cxx SKIP
533
// bxx DEST
534
// SKIP:
535
//
536
const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
537
opposite_bcond(inv_boint_bcond(boint)));
538
const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
539
masm.bc(opposite_boint, biint, not_taken_pc);
540
masm.b(dest);
541
}
542
}
543
ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
544
}
545
546
// Emit a NOT mt-safe patchable 64 bit absolute call/jump.
547
void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
548
// get current pc
549
uint64_t start_pc = (uint64_t) pc();
550
551
const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
552
const address pc_of_b = (address) (start_pc + (0*BytesPerInstWord)); // b is first
553
554
// relocate here
555
if (rt != relocInfo::none) {
556
relocate(rt);
557
}
558
559
if ( ReoptimizeCallSequences &&
560
(( link && is_within_range_of_b(dest, pc_of_bl)) ||
561
(!link && is_within_range_of_b(dest, pc_of_b)))) {
562
// variant 2:
563
// Emit an optimized, pc-relative call/jump.
564
565
if (link) {
566
// some padding
567
nop();
568
nop();
569
nop();
570
nop();
571
nop();
572
nop();
573
574
// do the call
575
assert(pc() == pc_of_bl, "just checking");
576
bl(dest, relocInfo::none);
577
} else {
578
// do the jump
579
assert(pc() == pc_of_b, "just checking");
580
b(dest, relocInfo::none);
581
582
// some padding
583
nop();
584
nop();
585
nop();
586
nop();
587
nop();
588
nop();
589
}
590
591
// Assert that we can identify the emitted call/jump.
592
assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
593
"can't identify emitted call");
594
} else {
595
// variant 1:
596
mr(R0, R11); // spill R11 -> R0.
597
598
// Load the destination address into CTR,
599
// calculate destination relative to global toc.
600
calculate_address_from_global_toc(R11, dest, true, true, false);
601
602
mtctr(R11);
603
mr(R11, R0); // spill R11 <- R0.
604
nop();
605
606
// do the call/jump
607
if (link) {
608
bctrl();
609
} else{
610
bctr();
611
}
612
// Assert that we can identify the emitted call/jump.
613
assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
614
"can't identify emitted call");
615
}
616
617
// Assert that we can identify the emitted call/jump.
618
assert(is_bxx64_patchable_at((address)start_pc, link),
619
"can't identify emitted call");
620
assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
621
"wrong encoding of dest address");
622
}
623
624
// Identify a bxx64_patchable instruction.
625
bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
626
return is_bxx64_patchable_variant1b_at(instruction_addr, link)
627
//|| is_bxx64_patchable_variant1_at(instruction_addr, link)
628
|| is_bxx64_patchable_variant2_at(instruction_addr, link);
629
}
630
631
// Does the call64_patchable instruction use a pc-relative encoding of
632
// the call destination?
633
bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
634
// variant 2 is pc-relative
635
return is_bxx64_patchable_variant2_at(instruction_addr, link);
636
}
637
638
// Identify variant 1.
639
bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
640
unsigned int* instr = (unsigned int*) instruction_addr;
641
return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
642
&& is_mtctr(instr[5]) // mtctr
643
&& is_load_const_at(instruction_addr);
644
}
645
646
// Identify variant 1b: load destination relative to global toc.
647
bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
648
unsigned int* instr = (unsigned int*) instruction_addr;
649
return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
650
&& is_mtctr(instr[3]) // mtctr
651
&& is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
652
}
653
654
// Identify variant 2.
655
bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
656
unsigned int* instr = (unsigned int*) instruction_addr;
657
if (link) {
658
return is_bl (instr[6]) // bl dest is last
659
&& is_nop(instr[0]) // nop
660
&& is_nop(instr[1]) // nop
661
&& is_nop(instr[2]) // nop
662
&& is_nop(instr[3]) // nop
663
&& is_nop(instr[4]) // nop
664
&& is_nop(instr[5]); // nop
665
} else {
666
return is_b (instr[0]) // b dest is first
667
&& is_nop(instr[1]) // nop
668
&& is_nop(instr[2]) // nop
669
&& is_nop(instr[3]) // nop
670
&& is_nop(instr[4]) // nop
671
&& is_nop(instr[5]) // nop
672
&& is_nop(instr[6]); // nop
673
}
674
}
675
676
// Set dest address of a bxx64_patchable instruction.
677
void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
678
ResourceMark rm;
679
int code_size = MacroAssembler::bxx64_patchable_size;
680
CodeBuffer buf(instruction_addr, code_size);
681
MacroAssembler masm(&buf);
682
masm.bxx64_patchable(dest, relocInfo::none, link);
683
ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
684
}
685
686
// Get dest address of a bxx64_patchable instruction.
687
address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
688
if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
689
return (address) (unsigned long) get_const(instruction_addr);
690
} else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
691
unsigned int* instr = (unsigned int*) instruction_addr;
692
if (link) {
693
const int instr_idx = 6; // bl is last
694
int branchoffset = branch_destination(instr[instr_idx], 0);
695
return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
696
} else {
697
const int instr_idx = 0; // b is first
698
int branchoffset = branch_destination(instr[instr_idx], 0);
699
return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
700
}
701
// Load dest relative to global toc.
702
} else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
703
return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
704
instruction_addr);
705
} else {
706
ShouldNotReachHere();
707
return NULL;
708
}
709
}
710
711
// Uses ordering which corresponds to ABI:
712
// _savegpr0_14: std r14,-144(r1)
713
// _savegpr0_15: std r15,-136(r1)
714
// _savegpr0_16: std r16,-128(r1)
715
void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
716
std(R14, offset, dst); offset += 8;
717
std(R15, offset, dst); offset += 8;
718
std(R16, offset, dst); offset += 8;
719
std(R17, offset, dst); offset += 8;
720
std(R18, offset, dst); offset += 8;
721
std(R19, offset, dst); offset += 8;
722
std(R20, offset, dst); offset += 8;
723
std(R21, offset, dst); offset += 8;
724
std(R22, offset, dst); offset += 8;
725
std(R23, offset, dst); offset += 8;
726
std(R24, offset, dst); offset += 8;
727
std(R25, offset, dst); offset += 8;
728
std(R26, offset, dst); offset += 8;
729
std(R27, offset, dst); offset += 8;
730
std(R28, offset, dst); offset += 8;
731
std(R29, offset, dst); offset += 8;
732
std(R30, offset, dst); offset += 8;
733
std(R31, offset, dst); offset += 8;
734
735
stfd(F14, offset, dst); offset += 8;
736
stfd(F15, offset, dst); offset += 8;
737
stfd(F16, offset, dst); offset += 8;
738
stfd(F17, offset, dst); offset += 8;
739
stfd(F18, offset, dst); offset += 8;
740
stfd(F19, offset, dst); offset += 8;
741
stfd(F20, offset, dst); offset += 8;
742
stfd(F21, offset, dst); offset += 8;
743
stfd(F22, offset, dst); offset += 8;
744
stfd(F23, offset, dst); offset += 8;
745
stfd(F24, offset, dst); offset += 8;
746
stfd(F25, offset, dst); offset += 8;
747
stfd(F26, offset, dst); offset += 8;
748
stfd(F27, offset, dst); offset += 8;
749
stfd(F28, offset, dst); offset += 8;
750
stfd(F29, offset, dst); offset += 8;
751
stfd(F30, offset, dst); offset += 8;
752
stfd(F31, offset, dst);
753
}
754
755
// Uses ordering which corresponds to ABI:
756
// _restgpr0_14: ld r14,-144(r1)
757
// _restgpr0_15: ld r15,-136(r1)
758
// _restgpr0_16: ld r16,-128(r1)
759
void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
760
ld(R14, offset, src); offset += 8;
761
ld(R15, offset, src); offset += 8;
762
ld(R16, offset, src); offset += 8;
763
ld(R17, offset, src); offset += 8;
764
ld(R18, offset, src); offset += 8;
765
ld(R19, offset, src); offset += 8;
766
ld(R20, offset, src); offset += 8;
767
ld(R21, offset, src); offset += 8;
768
ld(R22, offset, src); offset += 8;
769
ld(R23, offset, src); offset += 8;
770
ld(R24, offset, src); offset += 8;
771
ld(R25, offset, src); offset += 8;
772
ld(R26, offset, src); offset += 8;
773
ld(R27, offset, src); offset += 8;
774
ld(R28, offset, src); offset += 8;
775
ld(R29, offset, src); offset += 8;
776
ld(R30, offset, src); offset += 8;
777
ld(R31, offset, src); offset += 8;
778
779
// FP registers
780
lfd(F14, offset, src); offset += 8;
781
lfd(F15, offset, src); offset += 8;
782
lfd(F16, offset, src); offset += 8;
783
lfd(F17, offset, src); offset += 8;
784
lfd(F18, offset, src); offset += 8;
785
lfd(F19, offset, src); offset += 8;
786
lfd(F20, offset, src); offset += 8;
787
lfd(F21, offset, src); offset += 8;
788
lfd(F22, offset, src); offset += 8;
789
lfd(F23, offset, src); offset += 8;
790
lfd(F24, offset, src); offset += 8;
791
lfd(F25, offset, src); offset += 8;
792
lfd(F26, offset, src); offset += 8;
793
lfd(F27, offset, src); offset += 8;
794
lfd(F28, offset, src); offset += 8;
795
lfd(F29, offset, src); offset += 8;
796
lfd(F30, offset, src); offset += 8;
797
lfd(F31, offset, src);
798
}
799
800
// For verify_oops.
801
void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
802
std(R2, offset, dst); offset += 8;
803
std(R3, offset, dst); offset += 8;
804
std(R4, offset, dst); offset += 8;
805
std(R5, offset, dst); offset += 8;
806
std(R6, offset, dst); offset += 8;
807
std(R7, offset, dst); offset += 8;
808
std(R8, offset, dst); offset += 8;
809
std(R9, offset, dst); offset += 8;
810
std(R10, offset, dst); offset += 8;
811
std(R11, offset, dst); offset += 8;
812
std(R12, offset, dst);
813
}
814
815
// For verify_oops.
816
void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
817
ld(R2, offset, src); offset += 8;
818
ld(R3, offset, src); offset += 8;
819
ld(R4, offset, src); offset += 8;
820
ld(R5, offset, src); offset += 8;
821
ld(R6, offset, src); offset += 8;
822
ld(R7, offset, src); offset += 8;
823
ld(R8, offset, src); offset += 8;
824
ld(R9, offset, src); offset += 8;
825
ld(R10, offset, src); offset += 8;
826
ld(R11, offset, src); offset += 8;
827
ld(R12, offset, src);
828
}
829
830
void MacroAssembler::save_LR_CR(Register tmp) {
831
mfcr(tmp);
832
std(tmp, _abi(cr), R1_SP);
833
mflr(tmp);
834
std(tmp, _abi(lr), R1_SP);
835
// Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
836
}
837
838
void MacroAssembler::restore_LR_CR(Register tmp) {
839
assert(tmp != R1_SP, "must be distinct");
840
ld(tmp, _abi(lr), R1_SP);
841
mtlr(tmp);
842
ld(tmp, _abi(cr), R1_SP);
843
mtcr(tmp);
844
}
845
846
address MacroAssembler::get_PC_trash_LR(Register result) {
847
Label L;
848
bl(L);
849
bind(L);
850
address lr_pc = pc();
851
mflr(result);
852
return lr_pc;
853
}
854
855
void MacroAssembler::resize_frame(Register offset, Register tmp) {
856
#ifdef ASSERT
857
assert_different_registers(offset, tmp, R1_SP);
858
andi_(tmp, offset, frame::alignment_in_bytes-1);
859
asm_assert_eq("resize_frame: unaligned", 0x204);
860
#endif
861
862
// tmp <- *(SP)
863
ld(tmp, _abi(callers_sp), R1_SP);
864
// addr <- SP + offset;
865
// *(addr) <- tmp;
866
// SP <- addr
867
stdux(tmp, R1_SP, offset);
868
}
869
870
void MacroAssembler::resize_frame(int offset, Register tmp) {
871
assert(is_simm(offset, 16), "too big an offset");
872
assert_different_registers(tmp, R1_SP);
873
assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
874
// tmp <- *(SP)
875
ld(tmp, _abi(callers_sp), R1_SP);
876
// addr <- SP + offset;
877
// *(addr) <- tmp;
878
// SP <- addr
879
stdu(tmp, offset, R1_SP);
880
}
881
882
void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
883
// (addr == tmp1) || (addr == tmp2) is allowed here!
884
assert(tmp1 != tmp2, "must be distinct");
885
886
// compute offset w.r.t. current stack pointer
887
// tmp_1 <- addr - SP (!)
888
subf(tmp1, R1_SP, addr);
889
890
// atomically update SP keeping back link.
891
resize_frame(tmp1/* offset */, tmp2/* tmp */);
892
}
893
894
void MacroAssembler::push_frame(Register bytes, Register tmp) {
895
#ifdef ASSERT
896
assert(bytes != R0, "r0 not allowed here");
897
andi_(R0, bytes, frame::alignment_in_bytes-1);
898
asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
899
#endif
900
neg(tmp, bytes);
901
stdux(R1_SP, R1_SP, tmp);
902
}
903
904
// Push a frame of size `bytes'.
905
void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
906
long offset = align_addr(bytes, frame::alignment_in_bytes);
907
if (is_simm(-offset, 16)) {
908
stdu(R1_SP, -offset, R1_SP);
909
} else {
910
load_const(tmp, -offset);
911
stdux(R1_SP, R1_SP, tmp);
912
}
913
}
914
915
// Push a frame of size `bytes' plus abi_reg_args on top.
916
void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
917
push_frame(bytes + frame::abi_reg_args_size, tmp);
918
}
919
920
// Setup up a new C frame with a spill area for non-volatile GPRs and
921
// additional space for local variables.
922
void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
923
Register tmp) {
924
push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
925
}
926
927
// Pop current C frame.
928
void MacroAssembler::pop_frame() {
929
ld(R1_SP, _abi(callers_sp), R1_SP);
930
}
931
932
#if defined(ABI_ELFv2)
933
address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
934
// TODO(asmundak): make sure the caller uses R12 as function descriptor
935
// most of the times.
936
if (R12 != r_function_entry) {
937
mr(R12, r_function_entry);
938
}
939
mtctr(R12);
940
// Do a call or a branch.
941
if (and_link) {
942
bctrl();
943
} else {
944
bctr();
945
}
946
_last_calls_return_pc = pc();
947
948
return _last_calls_return_pc;
949
}
950
951
// Call a C function via a function descriptor and use full C
952
// calling conventions. Updates and returns _last_calls_return_pc.
953
address MacroAssembler::call_c(Register r_function_entry) {
954
return branch_to(r_function_entry, /*and_link=*/true);
955
}
956
957
// For tail calls: only branch, don't link, so callee returns to caller of this function.
958
address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
959
return branch_to(r_function_entry, /*and_link=*/false);
960
}
961
962
address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
963
load_const(R12, function_entry, R0);
964
return branch_to(R12, /*and_link=*/true);
965
}
966
967
#else
968
// Generic version of a call to C function via a function descriptor
969
// with variable support for C calling conventions (TOC, ENV, etc.).
970
// Updates and returns _last_calls_return_pc.
971
address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
972
bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
973
// we emit standard ptrgl glue code here
974
assert((function_descriptor != R0), "function_descriptor cannot be R0");
975
976
// retrieve necessary entries from the function descriptor
977
ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
978
mtctr(R0);
979
980
if (load_toc_of_callee) {
981
ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
982
}
983
if (load_env_of_callee) {
984
ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
985
} else if (load_toc_of_callee) {
986
li(R11, 0);
987
}
988
989
// do a call or a branch
990
if (and_link) {
991
bctrl();
992
} else {
993
bctr();
994
}
995
_last_calls_return_pc = pc();
996
997
return _last_calls_return_pc;
998
}
999
1000
// Call a C function via a function descriptor and use full C calling
1001
// conventions.
1002
// We don't use the TOC in generated code, so there is no need to save
1003
// and restore its value.
1004
address MacroAssembler::call_c(Register fd) {
1005
return branch_to(fd, /*and_link=*/true,
1006
/*save toc=*/false,
1007
/*restore toc=*/false,
1008
/*load toc=*/true,
1009
/*load env=*/true);
1010
}
1011
1012
address MacroAssembler::call_c_and_return_to_caller(Register fd) {
1013
return branch_to(fd, /*and_link=*/false,
1014
/*save toc=*/false,
1015
/*restore toc=*/false,
1016
/*load toc=*/true,
1017
/*load env=*/true);
1018
}
1019
1020
address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
1021
if (rt != relocInfo::none) {
1022
// this call needs to be relocatable
1023
if (!ReoptimizeCallSequences
1024
|| (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
1025
|| fd == NULL // support code-size estimation
1026
|| !fd->is_friend_function()
1027
|| fd->entry() == NULL) {
1028
// it's not a friend function as defined by class FunctionDescriptor,
1029
// so do a full call-c here.
1030
load_const(R11, (address)fd, R0);
1031
1032
bool has_env = (fd != NULL && fd->env() != NULL);
1033
return branch_to(R11, /*and_link=*/true,
1034
/*save toc=*/false,
1035
/*restore toc=*/false,
1036
/*load toc=*/true,
1037
/*load env=*/has_env);
1038
} else {
1039
// It's a friend function. Load the entry point and don't care about
1040
// toc and env. Use an optimizable call instruction, but ensure the
1041
// same code-size as in the case of a non-friend function.
1042
nop();
1043
nop();
1044
nop();
1045
bl64_patchable(fd->entry(), rt);
1046
_last_calls_return_pc = pc();
1047
return _last_calls_return_pc;
1048
}
1049
} else {
1050
// This call does not need to be relocatable, do more aggressive
1051
// optimizations.
1052
if (!ReoptimizeCallSequences
1053
|| !fd->is_friend_function()) {
1054
// It's not a friend function as defined by class FunctionDescriptor,
1055
// so do a full call-c here.
1056
load_const(R11, (address)fd, R0);
1057
return branch_to(R11, /*and_link=*/true,
1058
/*save toc=*/false,
1059
/*restore toc=*/false,
1060
/*load toc=*/true,
1061
/*load env=*/true);
1062
} else {
1063
// it's a friend function, load the entry point and don't care about
1064
// toc and env.
1065
address dest = fd->entry();
1066
if (is_within_range_of_b(dest, pc())) {
1067
bl(dest);
1068
} else {
1069
bl64_patchable(dest, rt);
1070
}
1071
_last_calls_return_pc = pc();
1072
return _last_calls_return_pc;
1073
}
1074
}
1075
}
1076
1077
// Call a C function. All constants needed reside in TOC.
1078
//
1079
// Read the address to call from the TOC.
1080
// Read env from TOC, if fd specifies an env.
1081
// Read new TOC from TOC.
1082
address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
1083
relocInfo::relocType rt, Register toc) {
1084
if (!ReoptimizeCallSequences
1085
|| (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
1086
|| !fd->is_friend_function()) {
1087
// It's not a friend function as defined by class FunctionDescriptor,
1088
// so do a full call-c here.
1089
assert(fd->entry() != NULL, "function must be linked");
1090
1091
AddressLiteral fd_entry(fd->entry());
1092
load_const_from_method_toc(R11, fd_entry, toc);
1093
mtctr(R11);
1094
if (fd->env() == NULL) {
1095
li(R11, 0);
1096
nop();
1097
} else {
1098
AddressLiteral fd_env(fd->env());
1099
load_const_from_method_toc(R11, fd_env, toc);
1100
}
1101
AddressLiteral fd_toc(fd->toc());
1102
load_toc_from_toc(R2_TOC, fd_toc, toc);
1103
// R2_TOC is killed.
1104
bctrl();
1105
_last_calls_return_pc = pc();
1106
} else {
1107
// It's a friend function, load the entry point and don't care about
1108
// toc and env. Use an optimizable call instruction, but ensure the
1109
// same code-size as in the case of a non-friend function.
1110
nop();
1111
bl64_patchable(fd->entry(), rt);
1112
_last_calls_return_pc = pc();
1113
}
1114
return _last_calls_return_pc;
1115
}
1116
#endif // ABI_ELFv2
1117
1118
void MacroAssembler::call_VM_base(Register oop_result,
1119
Register last_java_sp,
1120
address entry_point,
1121
bool check_exceptions) {
1122
BLOCK_COMMENT("call_VM {");
1123
// Determine last_java_sp register.
1124
if (!last_java_sp->is_valid()) {
1125
last_java_sp = R1_SP;
1126
}
1127
set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
1128
1129
// ARG1 must hold thread address.
1130
mr(R3_ARG1, R16_thread);
1131
#if defined(ABI_ELFv2)
1132
address return_pc = call_c(entry_point, relocInfo::none);
1133
#else
1134
address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
1135
#endif
1136
1137
reset_last_Java_frame();
1138
1139
// Check for pending exceptions.
1140
if (check_exceptions) {
1141
// We don't check for exceptions here.
1142
ShouldNotReachHere();
1143
}
1144
1145
// Get oop result if there is one and reset the value in the thread.
1146
if (oop_result->is_valid()) {
1147
get_vm_result(oop_result);
1148
}
1149
1150
_last_calls_return_pc = return_pc;
1151
BLOCK_COMMENT("} call_VM");
1152
}
1153
1154
void MacroAssembler::call_VM_leaf_base(address entry_point) {
1155
BLOCK_COMMENT("call_VM_leaf {");
1156
#if defined(ABI_ELFv2)
1157
call_c(entry_point, relocInfo::none);
1158
#else
1159
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
1160
#endif
1161
BLOCK_COMMENT("} call_VM_leaf");
1162
}
1163
1164
void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
1165
call_VM_base(oop_result, noreg, entry_point, check_exceptions);
1166
}
1167
1168
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
1169
bool check_exceptions) {
1170
// R3_ARG1 is reserved for the thread.
1171
mr_if_needed(R4_ARG2, arg_1);
1172
call_VM(oop_result, entry_point, check_exceptions);
1173
}
1174
1175
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
1176
bool check_exceptions) {
1177
// R3_ARG1 is reserved for the thread
1178
mr_if_needed(R4_ARG2, arg_1);
1179
assert(arg_2 != R4_ARG2, "smashed argument");
1180
mr_if_needed(R5_ARG3, arg_2);
1181
call_VM(oop_result, entry_point, check_exceptions);
1182
}
1183
1184
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
1185
bool check_exceptions) {
1186
// R3_ARG1 is reserved for the thread
1187
mr_if_needed(R4_ARG2, arg_1);
1188
assert(arg_2 != R4_ARG2, "smashed argument");
1189
mr_if_needed(R5_ARG3, arg_2);
1190
mr_if_needed(R6_ARG4, arg_3);
1191
call_VM(oop_result, entry_point, check_exceptions);
1192
}
1193
1194
void MacroAssembler::call_VM_leaf(address entry_point) {
1195
call_VM_leaf_base(entry_point);
1196
}
1197
1198
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
1199
mr_if_needed(R3_ARG1, arg_1);
1200
call_VM_leaf(entry_point);
1201
}
1202
1203
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
1204
mr_if_needed(R3_ARG1, arg_1);
1205
assert(arg_2 != R3_ARG1, "smashed argument");
1206
mr_if_needed(R4_ARG2, arg_2);
1207
call_VM_leaf(entry_point);
1208
}
1209
1210
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
1211
mr_if_needed(R3_ARG1, arg_1);
1212
assert(arg_2 != R3_ARG1, "smashed argument");
1213
mr_if_needed(R4_ARG2, arg_2);
1214
assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
1215
mr_if_needed(R5_ARG3, arg_3);
1216
call_VM_leaf(entry_point);
1217
}
1218
1219
// Check whether instruction is a read access to the polling page
1220
// which was emitted by load_from_polling_page(..).
1221
bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
1222
address* polling_address_ptr) {
1223
if (!is_ld(instruction))
1224
return false; // It's not a ld. Fail.
1225
1226
int rt = inv_rt_field(instruction);
1227
int ra = inv_ra_field(instruction);
1228
int ds = inv_ds_field(instruction);
1229
if (!(ds == 0 && ra != 0 && rt == 0)) {
1230
return false; // It's not a ld(r0, X, ra). Fail.
1231
}
1232
1233
if (!ucontext) {
1234
// Set polling address.
1235
if (polling_address_ptr != NULL) {
1236
*polling_address_ptr = NULL;
1237
}
1238
return true; // No ucontext given. Can't check value of ra. Assume true.
1239
}
1240
1241
#ifdef LINUX
1242
// Ucontext given. Check that register ra contains the address of
1243
// the safepoing polling page.
1244
ucontext_t* uc = (ucontext_t*) ucontext;
1245
// Set polling address.
1246
address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
1247
if (polling_address_ptr != NULL) {
1248
*polling_address_ptr = addr;
1249
}
1250
return os::is_poll_address(addr);
1251
#else
1252
// Not on Linux, ucontext must be NULL.
1253
ShouldNotReachHere();
1254
return false;
1255
#endif
1256
}
1257
1258
bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
1259
#ifdef LINUX
1260
ucontext_t* uc = (ucontext_t*) ucontext;
1261
1262
if (is_stwx(instruction) || is_stwux(instruction)) {
1263
int ra = inv_ra_field(instruction);
1264
int rb = inv_rb_field(instruction);
1265
1266
// look up content of ra and rb in ucontext
1267
address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
1268
long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
1269
return os::is_memory_serialize_page(thread, ra_val+rb_val);
1270
} else if (is_stw(instruction) || is_stwu(instruction)) {
1271
int ra = inv_ra_field(instruction);
1272
int d1 = inv_d1_field(instruction);
1273
1274
// look up content of ra in ucontext
1275
address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
1276
return os::is_memory_serialize_page(thread, ra_val+d1);
1277
} else {
1278
return false;
1279
}
1280
#else
1281
// workaround not needed on !LINUX :-)
1282
ShouldNotCallThis();
1283
return false;
1284
#endif
1285
}
1286
1287
void MacroAssembler::bang_stack_with_offset(int offset) {
1288
// When increasing the stack, the old stack pointer will be written
1289
// to the new top of stack according to the PPC64 abi.
1290
// Therefore, stack banging is not necessary when increasing
1291
// the stack by <= os::vm_page_size() bytes.
1292
// When increasing the stack by a larger amount, this method is
1293
// called repeatedly to bang the intermediate pages.
1294
1295
// Stack grows down, caller passes positive offset.
1296
assert(offset > 0, "must bang with positive offset");
1297
1298
long stdoffset = -offset;
1299
1300
if (is_simm(stdoffset, 16)) {
1301
// Signed 16 bit offset, a simple std is ok.
1302
if (UseLoadInstructionsForStackBangingPPC64) {
1303
ld(R0, (int)(signed short)stdoffset, R1_SP);
1304
} else {
1305
std(R0,(int)(signed short)stdoffset, R1_SP);
1306
}
1307
} else if (is_simm(stdoffset, 31)) {
1308
const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
1309
const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
1310
1311
Register tmp = R11;
1312
addis(tmp, R1_SP, hi);
1313
if (UseLoadInstructionsForStackBangingPPC64) {
1314
ld(R0, lo, tmp);
1315
} else {
1316
std(R0, lo, tmp);
1317
}
1318
} else {
1319
ShouldNotReachHere();
1320
}
1321
}
1322
1323
// If instruction is a stack bang of the form
1324
// std R0, x(Ry), (see bang_stack_with_offset())
1325
// stdu R1_SP, x(R1_SP), (see push_frame(), resize_frame())
1326
// or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame())
1327
// return the banged address. Otherwise, return 0.
1328
address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
1329
#ifdef LINUX
1330
ucontext_t* uc = (ucontext_t*) ucontext;
1331
int rs = inv_rs_field(instruction);
1332
int ra = inv_ra_field(instruction);
1333
if ( (is_ld(instruction) && rs == 0 && UseLoadInstructionsForStackBangingPPC64)
1334
|| (is_std(instruction) && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
1335
|| (is_stdu(instruction) && rs == 1)) {
1336
int ds = inv_ds_field(instruction);
1337
// return banged address
1338
return ds+(address)uc->uc_mcontext.regs->gpr[ra];
1339
} else if (is_stdux(instruction) && rs == 1) {
1340
int rb = inv_rb_field(instruction);
1341
address sp = (address)uc->uc_mcontext.regs->gpr[1];
1342
long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
1343
return ra != 1 || rb_val >= 0 ? NULL // not a stack bang
1344
: sp + rb_val; // banged address
1345
}
1346
return NULL; // not a stack bang
1347
#else
1348
// workaround not needed on !LINUX :-)
1349
ShouldNotCallThis();
1350
return NULL;
1351
#endif
1352
}
1353
1354
// CmpxchgX sets condition register to cmpX(current, compare).
1355
void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
1356
Register compare_value, Register exchange_value,
1357
Register addr_base, int semantics, bool cmpxchgx_hint,
1358
Register int_flag_success, bool contention_hint) {
1359
Label retry;
1360
Label failed;
1361
Label done;
1362
1363
// Save one branch if result is returned via register and
1364
// result register is different from the other ones.
1365
bool use_result_reg = (int_flag_success != noreg);
1366
bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
1367
int_flag_success != exchange_value && int_flag_success != addr_base);
1368
1369
// release/fence semantics
1370
if (semantics & MemBarRel) {
1371
release();
1372
}
1373
1374
if (use_result_reg && preset_result_reg) {
1375
li(int_flag_success, 0); // preset (assume cas failed)
1376
}
1377
1378
// Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
1379
if (contention_hint) { // Don't try to reserve if cmp fails.
1380
lwz(dest_current_value, 0, addr_base);
1381
cmpw(flag, dest_current_value, compare_value);
1382
bne(flag, failed);
1383
}
1384
1385
// atomic emulation loop
1386
bind(retry);
1387
1388
lwarx(dest_current_value, addr_base, cmpxchgx_hint);
1389
cmpw(flag, dest_current_value, compare_value);
1390
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1391
bne_predict_not_taken(flag, failed);
1392
} else {
1393
bne( flag, failed);
1394
}
1395
// branch to done => (flag == ne), (dest_current_value != compare_value)
1396
// fall through => (flag == eq), (dest_current_value == compare_value)
1397
1398
stwcx_(exchange_value, addr_base);
1399
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1400
bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
1401
} else {
1402
bne( CCR0, retry); // StXcx_ sets CCR0.
1403
}
1404
// fall through => (flag == eq), (dest_current_value == compare_value), (swapped)
1405
1406
// Result in register (must do this at the end because int_flag_success can be the
1407
// same register as one above).
1408
if (use_result_reg) {
1409
li(int_flag_success, 1);
1410
}
1411
1412
if (semantics & MemBarFenceAfter) {
1413
fence();
1414
} else if (semantics & MemBarAcq) {
1415
isync();
1416
}
1417
1418
if (use_result_reg && !preset_result_reg) {
1419
b(done);
1420
}
1421
1422
bind(failed);
1423
if (use_result_reg && !preset_result_reg) {
1424
li(int_flag_success, 0);
1425
}
1426
1427
bind(done);
1428
// (flag == ne) => (dest_current_value != compare_value), (!swapped)
1429
// (flag == eq) => (dest_current_value == compare_value), ( swapped)
1430
}
1431
1432
// Preforms atomic compare exchange:
1433
// if (compare_value == *addr_base)
1434
// *addr_base = exchange_value
1435
// int_flag_success = 1;
1436
// else
1437
// int_flag_success = 0;
1438
//
1439
// ConditionRegister flag = cmp(compare_value, *addr_base)
1440
// Register dest_current_value = *addr_base
1441
// Register compare_value Used to compare with value in memory
1442
// Register exchange_value Written to memory if compare_value == *addr_base
1443
// Register addr_base The memory location to compareXChange
1444
// Register int_flag_success Set to 1 if exchange_value was written to *addr_base
1445
//
1446
// To avoid the costly compare exchange the value is tested beforehand.
1447
// Several special cases exist to avoid that unnecessary information is generated.
1448
//
1449
void MacroAssembler::cmpxchgd(ConditionRegister flag,
1450
Register dest_current_value, Register compare_value, Register exchange_value,
1451
Register addr_base, int semantics, bool cmpxchgx_hint,
1452
Register int_flag_success, Label* failed_ext, bool contention_hint) {
1453
Label retry;
1454
Label failed_int;
1455
Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
1456
Label done;
1457
1458
// Save one branch if result is returned via register and result register is different from the other ones.
1459
bool use_result_reg = (int_flag_success!=noreg);
1460
bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
1461
int_flag_success!=exchange_value && int_flag_success!=addr_base);
1462
assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
1463
1464
// release/fence semantics
1465
if (semantics & MemBarRel) {
1466
release();
1467
}
1468
1469
if (use_result_reg && preset_result_reg) {
1470
li(int_flag_success, 0); // preset (assume cas failed)
1471
}
1472
1473
// Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
1474
if (contention_hint) { // Don't try to reserve if cmp fails.
1475
ld(dest_current_value, 0, addr_base);
1476
cmpd(flag, dest_current_value, compare_value);
1477
bne(flag, failed);
1478
}
1479
1480
// atomic emulation loop
1481
bind(retry);
1482
1483
ldarx(dest_current_value, addr_base, cmpxchgx_hint);
1484
cmpd(flag, dest_current_value, compare_value);
1485
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1486
bne_predict_not_taken(flag, failed);
1487
} else {
1488
bne( flag, failed);
1489
}
1490
1491
stdcx_(exchange_value, addr_base);
1492
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1493
bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
1494
} else {
1495
bne( CCR0, retry); // stXcx_ sets CCR0
1496
}
1497
1498
// result in register (must do this at the end because int_flag_success can be the same register as one above)
1499
if (use_result_reg) {
1500
li(int_flag_success, 1);
1501
}
1502
1503
// POWER6 doesn't need isync in CAS.
1504
// Always emit isync to be on the safe side.
1505
if (semantics & MemBarFenceAfter) {
1506
fence();
1507
} else if (semantics & MemBarAcq) {
1508
isync();
1509
}
1510
1511
if (use_result_reg && !preset_result_reg) {
1512
b(done);
1513
}
1514
1515
bind(failed_int);
1516
if (use_result_reg && !preset_result_reg) {
1517
li(int_flag_success, 0);
1518
}
1519
1520
bind(done);
1521
// (flag == ne) => (dest_current_value != compare_value), (!swapped)
1522
// (flag == eq) => (dest_current_value == compare_value), ( swapped)
1523
}
1524
1525
// Look up the method for a megamorphic invokeinterface call.
1526
// The target method is determined by <intf_klass, itable_index>.
1527
// The receiver klass is in recv_klass.
1528
// On success, the result will be in method_result, and execution falls through.
1529
// On failure, execution transfers to the given label.
1530
void MacroAssembler::lookup_interface_method(Register recv_klass,
1531
Register intf_klass,
1532
RegisterOrConstant itable_index,
1533
Register method_result,
1534
Register scan_temp,
1535
Register temp2,
1536
Label& L_no_such_interface,
1537
bool return_method) {
1538
assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
1539
1540
// Compute start of first itableOffsetEntry (which is at the end of the vtable).
1541
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
1542
int itentry_off = itableMethodEntry::method_offset_in_bytes();
1543
int logMEsize = exact_log2(itableMethodEntry::size() * wordSize);
1544
int scan_step = itableOffsetEntry::size() * wordSize;
1545
int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
1546
1547
lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
1548
// %%% We should store the aligned, prescaled offset in the klassoop.
1549
// Then the next several instructions would fold away.
1550
1551
sldi(scan_temp, scan_temp, log_vte_size);
1552
addi(scan_temp, scan_temp, vtable_base);
1553
add(scan_temp, recv_klass, scan_temp);
1554
1555
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
1556
if (return_method) {
1557
if (itable_index.is_register()) {
1558
Register itable_offset = itable_index.as_register();
1559
sldi(method_result, itable_offset, logMEsize);
1560
if (itentry_off) { addi(method_result, method_result, itentry_off); }
1561
add(method_result, method_result, recv_klass);
1562
} else {
1563
long itable_offset = (long)itable_index.as_constant();
1564
// static address, no relocation
1565
load_const_optimized(temp2, (itable_offset << logMEsize) + itentry_off); // static address, no relocation
1566
add(method_result, temp2, recv_klass);
1567
}
1568
}
1569
1570
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
1571
// if (scan->interface() == intf) {
1572
// result = (klass + scan->offset() + itable_index);
1573
// }
1574
// }
1575
Label search, found_method;
1576
1577
for (int peel = 1; peel >= 0; peel--) {
1578
// %%%% Could load both offset and interface in one ldx, if they were
1579
// in the opposite order. This would save a load.
1580
ld(temp2, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
1581
1582
// Check that this entry is non-null. A null entry means that
1583
// the receiver class doesn't implement the interface, and wasn't the
1584
// same as when the caller was compiled.
1585
cmpd(CCR0, temp2, intf_klass);
1586
1587
if (peel) {
1588
beq(CCR0, found_method);
1589
} else {
1590
bne(CCR0, search);
1591
// (invert the test to fall through to found_method...)
1592
}
1593
1594
if (!peel) break;
1595
1596
bind(search);
1597
1598
cmpdi(CCR0, temp2, 0);
1599
beq(CCR0, L_no_such_interface);
1600
addi(scan_temp, scan_temp, scan_step);
1601
}
1602
1603
bind(found_method);
1604
1605
// Got a hit.
1606
if (return_method) {
1607
int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
1608
lwz(scan_temp, ito_offset, scan_temp);
1609
ldx(method_result, scan_temp, method_result);
1610
}
1611
}
1612
1613
// virtual method calling
1614
void MacroAssembler::lookup_virtual_method(Register recv_klass,
1615
RegisterOrConstant vtable_index,
1616
Register method_result) {
1617
1618
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
1619
1620
const int base = InstanceKlass::vtable_start_offset() * wordSize;
1621
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1622
1623
if (vtable_index.is_register()) {
1624
sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
1625
add(recv_klass, vtable_index.as_register(), recv_klass);
1626
} else {
1627
addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
1628
}
1629
ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
1630
}
1631
1632
/////////////////////////////////////////// subtype checking ////////////////////////////////////////////
1633
1634
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1635
Register super_klass,
1636
Register temp1_reg,
1637
Register temp2_reg,
1638
Label& L_success,
1639
Label& L_failure) {
1640
1641
const Register check_cache_offset = temp1_reg;
1642
const Register cached_super = temp2_reg;
1643
1644
assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
1645
1646
int sco_offset = in_bytes(Klass::super_check_offset_offset());
1647
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1648
1649
// If the pointers are equal, we are done (e.g., String[] elements).
1650
// This self-check enables sharing of secondary supertype arrays among
1651
// non-primary types such as array-of-interface. Otherwise, each such
1652
// type would need its own customized SSA.
1653
// We move this check to the front of the fast path because many
1654
// type checks are in fact trivially successful in this manner,
1655
// so we get a nicely predicted branch right at the start of the check.
1656
cmpd(CCR0, sub_klass, super_klass);
1657
beq(CCR0, L_success);
1658
1659
// Check the supertype display:
1660
lwz(check_cache_offset, sco_offset, super_klass);
1661
// The loaded value is the offset from KlassOopDesc.
1662
1663
ldx(cached_super, check_cache_offset, sub_klass);
1664
cmpd(CCR0, cached_super, super_klass);
1665
beq(CCR0, L_success);
1666
1667
// This check has worked decisively for primary supers.
1668
// Secondary supers are sought in the super_cache ('super_cache_addr').
1669
// (Secondary supers are interfaces and very deeply nested subtypes.)
1670
// This works in the same check above because of a tricky aliasing
1671
// between the super_cache and the primary super display elements.
1672
// (The 'super_check_addr' can address either, as the case requires.)
1673
// Note that the cache is updated below if it does not help us find
1674
// what we need immediately.
1675
// So if it was a primary super, we can just fail immediately.
1676
// Otherwise, it's the slow path for us (no success at this point).
1677
1678
cmpwi(CCR0, check_cache_offset, sc_offset);
1679
bne(CCR0, L_failure);
1680
// bind(slow_path); // fallthru
1681
}
1682
1683
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1684
Register super_klass,
1685
Register temp1_reg,
1686
Register temp2_reg,
1687
Label* L_success,
1688
Register result_reg) {
1689
const Register array_ptr = temp1_reg; // current value from cache array
1690
const Register temp = temp2_reg;
1691
1692
assert_different_registers(sub_klass, super_klass, array_ptr, temp);
1693
1694
int source_offset = in_bytes(Klass::secondary_supers_offset());
1695
int target_offset = in_bytes(Klass::secondary_super_cache_offset());
1696
1697
int length_offset = Array<Klass*>::length_offset_in_bytes();
1698
int base_offset = Array<Klass*>::base_offset_in_bytes();
1699
1700
Label hit, loop, failure, fallthru;
1701
1702
ld(array_ptr, source_offset, sub_klass);
1703
1704
//assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
1705
lwz(temp, length_offset, array_ptr);
1706
cmpwi(CCR0, temp, 0);
1707
beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
1708
1709
mtctr(temp); // load ctr
1710
1711
bind(loop);
1712
// Oops in table are NO MORE compressed.
1713
ld(temp, base_offset, array_ptr);
1714
cmpd(CCR0, temp, super_klass);
1715
beq(CCR0, hit);
1716
addi(array_ptr, array_ptr, BytesPerWord);
1717
bdnz(loop);
1718
1719
bind(failure);
1720
if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
1721
b(fallthru);
1722
1723
bind(hit);
1724
std(super_klass, target_offset, sub_klass); // save result to cache
1725
if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
1726
if (L_success != NULL) b(*L_success);
1727
1728
bind(fallthru);
1729
}
1730
1731
// Try fast path, then go to slow one if not successful
1732
void MacroAssembler::check_klass_subtype(Register sub_klass,
1733
Register super_klass,
1734
Register temp1_reg,
1735
Register temp2_reg,
1736
Label& L_success) {
1737
Label L_failure;
1738
check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
1739
check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
1740
bind(L_failure); // Fallthru if not successful.
1741
}
1742
1743
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
1744
Register temp_reg,
1745
Label& wrong_method_type) {
1746
assert_different_registers(mtype_reg, mh_reg, temp_reg);
1747
// Compare method type against that of the receiver.
1748
load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
1749
cmpd(CCR0, temp_reg, mtype_reg);
1750
bne(CCR0, wrong_method_type);
1751
}
1752
1753
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
1754
Register temp_reg,
1755
int extra_slot_offset) {
1756
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
1757
int stackElementSize = Interpreter::stackElementSize;
1758
int offset = extra_slot_offset * stackElementSize;
1759
if (arg_slot.is_constant()) {
1760
offset += arg_slot.as_constant() * stackElementSize;
1761
return offset;
1762
} else {
1763
assert(temp_reg != noreg, "must specify");
1764
sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
1765
if (offset != 0)
1766
addi(temp_reg, temp_reg, offset);
1767
return temp_reg;
1768
}
1769
}
1770
1771
void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
1772
Register mark_reg, Register temp_reg,
1773
Register temp2_reg, Label& done, Label* slow_case) {
1774
assert(UseBiasedLocking, "why call this otherwise?");
1775
1776
#ifdef ASSERT
1777
assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
1778
#endif
1779
1780
Label cas_label;
1781
1782
// Branch to done if fast path fails and no slow_case provided.
1783
Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
1784
1785
// Biased locking
1786
// See whether the lock is currently biased toward our thread and
1787
// whether the epoch is still valid
1788
// Note that the runtime guarantees sufficient alignment of JavaThread
1789
// pointers to allow age to be placed into low bits
1790
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
1791
"biased locking makes assumptions about bit layout");
1792
1793
if (PrintBiasedLockingStatistics) {
1794
load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
1795
lwz(temp2_reg, 0, temp_reg);
1796
addi(temp2_reg, temp2_reg, 1);
1797
stw(temp2_reg, 0, temp_reg);
1798
}
1799
1800
andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
1801
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
1802
bne(cr_reg, cas_label);
1803
1804
load_klass(temp_reg, obj_reg);
1805
1806
load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
1807
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
1808
orr(temp_reg, R16_thread, temp_reg);
1809
xorr(temp_reg, mark_reg, temp_reg);
1810
andr(temp_reg, temp_reg, temp2_reg);
1811
cmpdi(cr_reg, temp_reg, 0);
1812
if (PrintBiasedLockingStatistics) {
1813
Label l;
1814
bne(cr_reg, l);
1815
load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
1816
lwz(temp2_reg, 0, mark_reg);
1817
addi(temp2_reg, temp2_reg, 1);
1818
stw(temp2_reg, 0, mark_reg);
1819
// restore mark_reg
1820
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
1821
bind(l);
1822
}
1823
beq(cr_reg, done);
1824
1825
Label try_revoke_bias;
1826
Label try_rebias;
1827
1828
// At this point we know that the header has the bias pattern and
1829
// that we are not the bias owner in the current epoch. We need to
1830
// figure out more details about the state of the header in order to
1831
// know what operations can be legally performed on the object's
1832
// header.
1833
1834
// If the low three bits in the xor result aren't clear, that means
1835
// the prototype header is no longer biased and we have to revoke
1836
// the bias on this object.
1837
andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
1838
cmpwi(cr_reg, temp2_reg, 0);
1839
bne(cr_reg, try_revoke_bias);
1840
1841
// Biasing is still enabled for this data type. See whether the
1842
// epoch of the current bias is still valid, meaning that the epoch
1843
// bits of the mark word are equal to the epoch bits of the
1844
// prototype header. (Note that the prototype header's epoch bits
1845
// only change at a safepoint.) If not, attempt to rebias the object
1846
// toward the current thread. Note that we must be absolutely sure
1847
// that the current epoch is invalid in order to do this because
1848
// otherwise the manipulations it performs on the mark word are
1849
// illegal.
1850
1851
int shift_amount = 64 - markOopDesc::epoch_shift;
1852
// rotate epoch bits to right (little) end and set other bits to 0
1853
// [ big part | epoch | little part ] -> [ 0..0 | epoch ]
1854
rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
1855
// branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
1856
bne(CCR0, try_rebias);
1857
1858
// The epoch of the current bias is still valid but we know nothing
1859
// about the owner; it might be set or it might be clear. Try to
1860
// acquire the bias of the object using an atomic operation. If this
1861
// fails we will go in to the runtime to revoke the object's bias.
1862
// Note that we first construct the presumed unbiased header so we
1863
// don't accidentally blow away another thread's valid bias.
1864
andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
1865
markOopDesc::age_mask_in_place |
1866
markOopDesc::epoch_mask_in_place));
1867
orr(temp_reg, R16_thread, mark_reg);
1868
1869
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
1870
1871
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
1872
fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
1873
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
1874
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
1875
/*where=*/obj_reg,
1876
MacroAssembler::MemBarAcq,
1877
MacroAssembler::cmpxchgx_hint_acquire_lock(),
1878
noreg, slow_case_int); // bail out if failed
1879
1880
// If the biasing toward our thread failed, this means that
1881
// another thread succeeded in biasing it toward itself and we
1882
// need to revoke that bias. The revocation will occur in the
1883
// interpreter runtime in the slow case.
1884
if (PrintBiasedLockingStatistics) {
1885
load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
1886
lwz(temp2_reg, 0, temp_reg);
1887
addi(temp2_reg, temp2_reg, 1);
1888
stw(temp2_reg, 0, temp_reg);
1889
}
1890
b(done);
1891
1892
bind(try_rebias);
1893
// At this point we know the epoch has expired, meaning that the
1894
// current "bias owner", if any, is actually invalid. Under these
1895
// circumstances _only_, we are allowed to use the current header's
1896
// value as the comparison value when doing the cas to acquire the
1897
// bias in the current epoch. In other words, we allow transfer of
1898
// the bias from one thread to another directly in this situation.
1899
andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
1900
orr(temp_reg, R16_thread, temp_reg);
1901
load_klass(temp2_reg, obj_reg);
1902
ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
1903
orr(temp_reg, temp_reg, temp2_reg);
1904
1905
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
1906
1907
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
1908
fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
1909
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
1910
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
1911
/*where=*/obj_reg,
1912
MacroAssembler::MemBarAcq,
1913
MacroAssembler::cmpxchgx_hint_acquire_lock(),
1914
noreg, slow_case_int); // bail out if failed
1915
1916
// If the biasing toward our thread failed, this means that
1917
// another thread succeeded in biasing it toward itself and we
1918
// need to revoke that bias. The revocation will occur in the
1919
// interpreter runtime in the slow case.
1920
if (PrintBiasedLockingStatistics) {
1921
load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
1922
lwz(temp2_reg, 0, temp_reg);
1923
addi(temp2_reg, temp2_reg, 1);
1924
stw(temp2_reg, 0, temp_reg);
1925
}
1926
b(done);
1927
1928
bind(try_revoke_bias);
1929
// The prototype mark in the klass doesn't have the bias bit set any
1930
// more, indicating that objects of this data type are not supposed
1931
// to be biased any more. We are going to try to reset the mark of
1932
// this object to the prototype value and fall through to the
1933
// CAS-based locking scheme. Note that if our CAS fails, it means
1934
// that another thread raced us for the privilege of revoking the
1935
// bias of this particular object, so it's okay to continue in the
1936
// normal locking code.
1937
load_klass(temp_reg, obj_reg);
1938
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
1939
andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
1940
orr(temp_reg, temp_reg, temp2_reg);
1941
1942
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
1943
1944
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
1945
fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
1946
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
1947
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
1948
/*where=*/obj_reg,
1949
MacroAssembler::MemBarAcq,
1950
MacroAssembler::cmpxchgx_hint_acquire_lock());
1951
1952
// reload markOop in mark_reg before continuing with lightweight locking
1953
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
1954
1955
// Fall through to the normal CAS-based lock, because no matter what
1956
// the result of the above CAS, some thread must have succeeded in
1957
// removing the bias bit from the object's header.
1958
if (PrintBiasedLockingStatistics) {
1959
Label l;
1960
bne(cr_reg, l);
1961
load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
1962
lwz(temp2_reg, 0, temp_reg);
1963
addi(temp2_reg, temp2_reg, 1);
1964
stw(temp2_reg, 0, temp_reg);
1965
bind(l);
1966
}
1967
1968
bind(cas_label);
1969
}
1970
1971
void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
1972
// Check for biased locking unlock case, which is a no-op
1973
// Note: we do not have to check the thread ID for two reasons.
1974
// First, the interpreter checks for IllegalMonitorStateException at
1975
// a higher level. Second, if the bias was revoked while we held the
1976
// lock, the object could not be rebiased toward another thread, so
1977
// the bias bit would be clear.
1978
1979
ld(temp_reg, 0, mark_addr);
1980
andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
1981
1982
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
1983
beq(cr_reg, done);
1984
}
1985
1986
// "The box" is the space on the stack where we copy the object mark.
1987
void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
1988
Register temp, Register displaced_header, Register current_header) {
1989
assert_different_registers(oop, box, temp, displaced_header, current_header);
1990
assert(flag != CCR0, "bad condition register");
1991
Label cont;
1992
Label object_has_monitor;
1993
Label cas_failed;
1994
1995
// Load markOop from object into displaced_header.
1996
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
1997
1998
1999
// Always do locking in runtime.
2000
if (EmitSync & 0x01) {
2001
cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
2002
return;
2003
}
2004
2005
if (UseBiasedLocking) {
2006
biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
2007
}
2008
2009
// Handle existing monitor.
2010
if ((EmitSync & 0x02) == 0) {
2011
// The object has an existing monitor iff (mark & monitor_value) != 0.
2012
andi_(temp, displaced_header, markOopDesc::monitor_value);
2013
bne(CCR0, object_has_monitor);
2014
}
2015
2016
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
2017
ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
2018
2019
// Load Compare Value application register.
2020
2021
// Initialize the box. (Must happen before we update the object mark!)
2022
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
2023
2024
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
2025
// Compare object markOop with mark and if equal exchange scratch1 with object markOop.
2026
// CmpxchgX sets cr_reg to cmpX(current, displaced).
2027
membar(Assembler::StoreStore);
2028
cmpxchgd(/*flag=*/flag,
2029
/*current_value=*/current_header,
2030
/*compare_value=*/displaced_header,
2031
/*exchange_value=*/box,
2032
/*where=*/oop,
2033
MacroAssembler::MemBarAcq,
2034
MacroAssembler::cmpxchgx_hint_acquire_lock(),
2035
noreg,
2036
&cas_failed);
2037
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
2038
2039
// If the compare-and-exchange succeeded, then we found an unlocked
2040
// object and we have now locked it.
2041
b(cont);
2042
2043
bind(cas_failed);
2044
// We did not see an unlocked object so try the fast recursive case.
2045
2046
// Check if the owner is self by comparing the value in the markOop of object
2047
// (current_header) with the stack pointer.
2048
sub(current_header, current_header, R1_SP);
2049
load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
2050
2051
and_(R0/*==0?*/, current_header, temp);
2052
// If condition is true we are cont and hence we can store 0 as the
2053
// displaced header in the box, which indicates that it is a recursive lock.
2054
mcrf(flag,CCR0);
2055
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
2056
2057
// Handle existing monitor.
2058
if ((EmitSync & 0x02) == 0) {
2059
b(cont);
2060
2061
bind(object_has_monitor);
2062
// The object's monitor m is unlocked iff m->owner == NULL,
2063
// otherwise m->owner may contain a thread or a stack address.
2064
//
2065
// Try to CAS m->owner from NULL to current thread.
2066
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
2067
li(displaced_header, 0);
2068
// CmpxchgX sets flag to cmpX(current, displaced).
2069
cmpxchgd(/*flag=*/flag,
2070
/*current_value=*/current_header,
2071
/*compare_value=*/displaced_header,
2072
/*exchange_value=*/R16_thread,
2073
/*where=*/temp,
2074
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
2075
MacroAssembler::cmpxchgx_hint_acquire_lock());
2076
2077
// Store a non-null value into the box.
2078
std(box, BasicLock::displaced_header_offset_in_bytes(), box);
2079
2080
# ifdef ASSERT
2081
bne(flag, cont);
2082
// We have acquired the monitor, check some invariants.
2083
addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
2084
// Invariant 1: _recursions should be 0.
2085
//assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
2086
asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
2087
"monitor->_recursions should be 0", -1);
2088
// Invariant 2: OwnerIsThread shouldn't be 0.
2089
//assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
2090
//asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
2091
// "monitor->OwnerIsThread shouldn't be 0", -1);
2092
# endif
2093
}
2094
2095
bind(cont);
2096
// flag == EQ indicates success
2097
// flag == NE indicates failure
2098
}
2099
2100
void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
2101
Register temp, Register displaced_header, Register current_header) {
2102
assert_different_registers(oop, box, temp, displaced_header, current_header);
2103
assert(flag != CCR0, "bad condition register");
2104
Label cont;
2105
Label object_has_monitor;
2106
2107
// Always do locking in runtime.
2108
if (EmitSync & 0x01) {
2109
cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
2110
return;
2111
}
2112
2113
if (UseBiasedLocking) {
2114
biased_locking_exit(flag, oop, current_header, cont);
2115
}
2116
2117
// Find the lock address and load the displaced header from the stack.
2118
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
2119
2120
// If the displaced header is 0, we have a recursive unlock.
2121
cmpdi(flag, displaced_header, 0);
2122
beq(flag, cont);
2123
2124
// Handle existing monitor.
2125
if ((EmitSync & 0x02) == 0) {
2126
// The object has an existing monitor iff (mark & monitor_value) != 0.
2127
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
2128
andi(temp, current_header, markOopDesc::monitor_value);
2129
cmpdi(flag, temp, 0);
2130
bne(flag, object_has_monitor);
2131
}
2132
2133
2134
// Check if it is still a light weight lock, this is is true if we see
2135
// the stack address of the basicLock in the markOop of the object.
2136
// Cmpxchg sets flag to cmpd(current_header, box).
2137
cmpxchgd(/*flag=*/flag,
2138
/*current_value=*/current_header,
2139
/*compare_value=*/box,
2140
/*exchange_value=*/displaced_header,
2141
/*where=*/oop,
2142
MacroAssembler::MemBarRel,
2143
MacroAssembler::cmpxchgx_hint_release_lock(),
2144
noreg,
2145
&cont);
2146
2147
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
2148
2149
// Handle existing monitor.
2150
if ((EmitSync & 0x02) == 0) {
2151
b(cont);
2152
2153
bind(object_has_monitor);
2154
addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
2155
ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
2156
ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
2157
xorr(temp, R16_thread, temp); // Will be 0 if we are the owner.
2158
orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
2159
cmpdi(flag, temp, 0);
2160
bne(flag, cont);
2161
2162
ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header);
2163
ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
2164
orr(temp, temp, displaced_header); // Will be 0 if both are 0.
2165
cmpdi(flag, temp, 0);
2166
bne(flag, cont);
2167
release();
2168
std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
2169
}
2170
2171
bind(cont);
2172
// flag == EQ indicates success
2173
// flag == NE indicates failure
2174
}
2175
2176
// Write serialization page so VM thread can do a pseudo remote membar.
2177
// We use the current thread pointer to calculate a thread specific
2178
// offset to write to within the page. This minimizes bus traffic
2179
// due to cache line collision.
2180
void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2181
srdi(tmp2, thread, os::get_serialize_page_shift_count());
2182
2183
int mask = os::vm_page_size() - sizeof(int);
2184
if (Assembler::is_simm(mask, 16)) {
2185
andi(tmp2, tmp2, mask);
2186
} else {
2187
lis(tmp1, (int)((signed short) (mask >> 16)));
2188
ori(tmp1, tmp1, mask & 0x0000ffff);
2189
andr(tmp2, tmp2, tmp1);
2190
}
2191
2192
load_const(tmp1, (long) os::get_memory_serialize_page());
2193
release();
2194
stwx(R0, tmp1, tmp2);
2195
}
2196
2197
2198
// GC barrier helper macros
2199
2200
// Write the card table byte if needed.
2201
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
2202
CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
2203
assert(bs->kind() == BarrierSet::CardTableModRef ||
2204
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
2205
#ifdef ASSERT
2206
cmpdi(CCR0, Rnew_val, 0);
2207
asm_assert_ne("null oop not allowed", 0x321);
2208
#endif
2209
card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
2210
}
2211
2212
// Write the card table byte.
2213
void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
2214
assert_different_registers(Robj, Rtmp, R0);
2215
load_const_optimized(Rtmp, (address)byte_map_base, R0);
2216
srdi(Robj, Robj, CardTableModRefBS::card_shift);
2217
li(R0, 0); // dirty
2218
if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
2219
stbx(R0, Rtmp, Robj);
2220
}
2221
2222
// Kills R31 if value is a volatile register.
2223
void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2, bool needs_frame) {
2224
Label done;
2225
cmpdi(CCR0, value, 0);
2226
beq(CCR0, done); // Use NULL as-is.
2227
2228
clrrdi(tmp1, value, JNIHandles::weak_tag_size);
2229
#if INCLUDE_ALL_GCS
2230
if (UseG1GC) { andi_(tmp2, value, JNIHandles::weak_tag_mask); }
2231
#endif
2232
ld(value, 0, tmp1); // Resolve (untagged) jobject.
2233
2234
#if INCLUDE_ALL_GCS
2235
if (UseG1GC) {
2236
Label not_weak;
2237
beq(CCR0, not_weak); // Test for jweak tag.
2238
verify_oop(value);
2239
g1_write_barrier_pre(noreg, // obj
2240
noreg, // offset
2241
value, // pre_val
2242
tmp1, tmp2, needs_frame);
2243
bind(not_weak);
2244
}
2245
#endif // INCLUDE_ALL_GCS
2246
verify_oop(value);
2247
bind(done);
2248
}
2249
2250
#if INCLUDE_ALL_GCS
2251
// General G1 pre-barrier generator.
2252
// Goal: record the previous value if it is not null.
2253
void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
2254
Register Rtmp1, Register Rtmp2, bool needs_frame) {
2255
Label runtime, filtered;
2256
2257
// Is marking active?
2258
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
2259
lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
2260
} else {
2261
guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
2262
lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
2263
}
2264
cmpdi(CCR0, Rtmp1, 0);
2265
beq(CCR0, filtered);
2266
2267
// Do we need to load the previous value?
2268
if (Robj != noreg) {
2269
// Load the previous value...
2270
if (UseCompressedOops) {
2271
lwz(Rpre_val, offset, Robj);
2272
} else {
2273
ld(Rpre_val, offset, Robj);
2274
}
2275
// Previous value has been loaded into Rpre_val.
2276
}
2277
assert(Rpre_val != noreg, "must have a real register");
2278
2279
// Is the previous value null?
2280
cmpdi(CCR0, Rpre_val, 0);
2281
beq(CCR0, filtered);
2282
2283
if (Robj != noreg && UseCompressedOops) {
2284
decode_heap_oop_not_null(Rpre_val);
2285
}
2286
2287
// OK, it's not filtered, so we'll need to call enqueue. In the normal
2288
// case, pre_val will be a scratch G-reg, but there are some cases in
2289
// which it's an O-reg. In the first case, do a normal call. In the
2290
// latter, do a save here and call the frameless version.
2291
2292
// Can we store original value in the thread's buffer?
2293
// Is index == 0?
2294
// (The index field is typed as size_t.)
2295
const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
2296
2297
ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2298
cmpdi(CCR0, Rindex, 0);
2299
beq(CCR0, runtime); // If index == 0, goto runtime.
2300
ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
2301
2302
addi(Rindex, Rindex, -wordSize); // Decrement index.
2303
std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2304
2305
// Record the previous value.
2306
stdx(Rpre_val, Rbuffer, Rindex);
2307
b(filtered);
2308
2309
bind(runtime);
2310
2311
// May need to preserve LR. Also needed if current frame is not compatible with C calling convention.
2312
if (needs_frame) {
2313
save_LR_CR(Rtmp1);
2314
push_frame_reg_args(0, Rtmp2);
2315
}
2316
2317
if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
2318
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
2319
if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
2320
2321
if (needs_frame) {
2322
pop_frame();
2323
restore_LR_CR(Rtmp1);
2324
}
2325
2326
bind(filtered);
2327
}
2328
2329
// General G1 post-barrier generator
2330
// Store cross-region card.
2331
void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
2332
Label runtime, filtered_int;
2333
Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
2334
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
2335
2336
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
2337
assert(bs->kind() == BarrierSet::G1SATBCT ||
2338
bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
2339
2340
// Does store cross heap regions?
2341
if (G1RSBarrierRegionFilter) {
2342
xorr(Rtmp1, Rstore_addr, Rnew_val);
2343
srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
2344
beq(CCR0, filtered);
2345
}
2346
2347
// Crosses regions, storing NULL?
2348
#ifdef ASSERT
2349
cmpdi(CCR0, Rnew_val, 0);
2350
asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
2351
//beq(CCR0, filtered);
2352
#endif
2353
2354
// Storing region crossing non-NULL, is card already dirty?
2355
assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
2356
const Register Rcard_addr = Rtmp1;
2357
Register Rbase = Rtmp2;
2358
load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
2359
2360
srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
2361
2362
// Get the address of the card.
2363
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
2364
cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
2365
beq(CCR0, filtered);
2366
2367
membar(Assembler::StoreLoad);
2368
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
2369
cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
2370
beq(CCR0, filtered);
2371
2372
// Storing a region crossing, non-NULL oop, card is clean.
2373
// Dirty card and log.
2374
li(Rtmp3, CardTableModRefBS::dirty_card_val());
2375
//release(); // G1: oops are allowed to get visible after dirty marking.
2376
stbx(Rtmp3, Rbase, Rcard_addr);
2377
2378
add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
2379
Rbase = noreg; // end of lifetime
2380
2381
const Register Rqueue_index = Rtmp2,
2382
Rqueue_buf = Rtmp3;
2383
ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2384
cmpdi(CCR0, Rqueue_index, 0);
2385
beq(CCR0, runtime); // index == 0 then jump to runtime
2386
ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
2387
2388
addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
2389
std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2390
2391
stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
2392
b(filtered);
2393
2394
bind(runtime);
2395
2396
// Save the live input values.
2397
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
2398
2399
bind(filtered_int);
2400
}
2401
#endif // INCLUDE_ALL_GCS
2402
2403
// Values for last_Java_pc, and last_Java_sp must comply to the rules
2404
// in frame_ppc.hpp.
2405
void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
2406
// Always set last_Java_pc and flags first because once last_Java_sp
2407
// is visible has_last_Java_frame is true and users will look at the
2408
// rest of the fields. (Note: flags should always be zero before we
2409
// get here so doesn't need to be set.)
2410
2411
// Verify that last_Java_pc was zeroed on return to Java
2412
asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
2413
"last_Java_pc not zeroed before leaving Java", 0x200);
2414
2415
// When returning from calling out from Java mode the frame anchor's
2416
// last_Java_pc will always be set to NULL. It is set here so that
2417
// if we are doing a call to native (not VM) that we capture the
2418
// known pc and don't have to rely on the native call having a
2419
// standard frame linkage where we can find the pc.
2420
if (last_Java_pc != noreg)
2421
std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
2422
2423
// Set last_Java_sp last.
2424
std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
2425
}
2426
2427
void MacroAssembler::reset_last_Java_frame(void) {
2428
asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
2429
R16_thread, "SP was not set, still zero", 0x202);
2430
2431
BLOCK_COMMENT("reset_last_Java_frame {");
2432
li(R0, 0);
2433
2434
// _last_Java_sp = 0
2435
std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
2436
2437
// _last_Java_pc = 0
2438
std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
2439
BLOCK_COMMENT("} reset_last_Java_frame");
2440
}
2441
2442
void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
2443
assert_different_registers(sp, tmp1);
2444
2445
// sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
2446
// TOP_IJAVA_FRAME_ABI.
2447
// FIXME: assert that we really have a TOP_IJAVA_FRAME here!
2448
#ifdef CC_INTERP
2449
ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
2450
#else
2451
address entry = pc();
2452
load_const_optimized(tmp1, entry);
2453
#endif
2454
2455
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
2456
}
2457
2458
void MacroAssembler::get_vm_result(Register oop_result) {
2459
// Read:
2460
// R16_thread
2461
// R16_thread->in_bytes(JavaThread::vm_result_offset())
2462
//
2463
// Updated:
2464
// oop_result
2465
// R16_thread->in_bytes(JavaThread::vm_result_offset())
2466
2467
ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
2468
li(R0, 0);
2469
std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
2470
2471
verify_oop(oop_result);
2472
}
2473
2474
void MacroAssembler::get_vm_result_2(Register metadata_result) {
2475
// Read:
2476
// R16_thread
2477
// R16_thread->in_bytes(JavaThread::vm_result_2_offset())
2478
//
2479
// Updated:
2480
// metadata_result
2481
// R16_thread->in_bytes(JavaThread::vm_result_2_offset())
2482
2483
ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
2484
li(R0, 0);
2485
std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
2486
}
2487
2488
2489
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
2490
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
2491
if (Universe::narrow_klass_base() != 0) {
2492
// Use dst as temp if it is free.
2493
load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
2494
sub(dst, current, R0);
2495
current = dst;
2496
}
2497
if (Universe::narrow_klass_shift() != 0) {
2498
srdi(dst, current, Universe::narrow_klass_shift());
2499
current = dst;
2500
}
2501
mr_if_needed(dst, current); // Move may be required.
2502
}
2503
2504
void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
2505
if (UseCompressedClassPointers) {
2506
encode_klass_not_null(ck, klass);
2507
stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
2508
} else {
2509
std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
2510
}
2511
}
2512
2513
void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
2514
if (UseCompressedClassPointers) {
2515
if (val == noreg) {
2516
val = R0;
2517
li(val, 0);
2518
}
2519
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
2520
}
2521
}
2522
2523
int MacroAssembler::instr_size_for_decode_klass_not_null() {
2524
if (!UseCompressedClassPointers) return 0;
2525
int num_instrs = 1; // shift or move
2526
if (Universe::narrow_klass_base() != 0) num_instrs = 7; // shift + load const + add
2527
return num_instrs * BytesPerInstWord;
2528
}
2529
2530
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
2531
assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
2532
if (src == noreg) src = dst;
2533
Register shifted_src = src;
2534
if (Universe::narrow_klass_shift() != 0 ||
2535
Universe::narrow_klass_base() == 0 && src != dst) { // Move required.
2536
shifted_src = dst;
2537
sldi(shifted_src, src, Universe::narrow_klass_shift());
2538
}
2539
if (Universe::narrow_klass_base() != 0) {
2540
load_const(R0, Universe::narrow_klass_base());
2541
add(dst, shifted_src, R0);
2542
}
2543
}
2544
2545
void MacroAssembler::load_klass(Register dst, Register src) {
2546
if (UseCompressedClassPointers) {
2547
lwz(dst, oopDesc::klass_offset_in_bytes(), src);
2548
// Attention: no null check here!
2549
decode_klass_not_null(dst, dst);
2550
} else {
2551
ld(dst, oopDesc::klass_offset_in_bytes(), src);
2552
}
2553
}
2554
2555
void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
2556
if (!os::zero_page_read_protected()) {
2557
if (TrapBasedNullChecks) {
2558
trap_null_check(src);
2559
}
2560
}
2561
load_klass(dst, src);
2562
}
2563
2564
void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
2565
if (Universe::heap() != NULL) {
2566
load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
2567
} else {
2568
// Heap not yet allocated. Load indirectly.
2569
int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
2570
ld(R30, simm16_offset, R30);
2571
}
2572
}
2573
2574
// Clear Array
2575
// Kills both input registers. tmp == R0 is allowed.
2576
void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
2577
// Procedure for large arrays (uses data cache block zero instruction).
2578
Label startloop, fast, fastloop, small_rest, restloop, done;
2579
const int cl_size = VM_Version::get_cache_line_size(),
2580
cl_dwords = cl_size>>3,
2581
cl_dw_addr_bits = exact_log2(cl_dwords),
2582
dcbz_min = 1; // Min count of dcbz executions, needs to be >0.
2583
2584
//2:
2585
cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included).
2586
blt(CCR1, small_rest); // Too small.
2587
rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line.
2588
beq(CCR0, fast); // Already 128byte aligned.
2589
2590
subfic(tmp, tmp, cl_dwords);
2591
mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
2592
subf(cnt_dwords, tmp, cnt_dwords); // rest.
2593
li(tmp, 0);
2594
//10:
2595
bind(startloop); // Clear at the beginning to reach 128byte boundary.
2596
std(tmp, 0, base_ptr); // Clear 8byte aligned block.
2597
addi(base_ptr, base_ptr, 8);
2598
bdnz(startloop);
2599
//13:
2600
bind(fast); // Clear 128byte blocks.
2601
srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0).
2602
andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
2603
mtctr(tmp); // Load counter.
2604
//16:
2605
bind(fastloop);
2606
dcbz(base_ptr); // Clear 128byte aligned block.
2607
addi(base_ptr, base_ptr, cl_size);
2608
bdnz(fastloop);
2609
if (InsertEndGroupPPC64) { endgroup(); } else { nop(); }
2610
//20:
2611
bind(small_rest);
2612
cmpdi(CCR0, cnt_dwords, 0); // size 0?
2613
beq(CCR0, done); // rest == 0
2614
li(tmp, 0);
2615
mtctr(cnt_dwords); // Load counter.
2616
//24:
2617
bind(restloop); // Clear rest.
2618
std(tmp, 0, base_ptr); // Clear 8byte aligned block.
2619
addi(base_ptr, base_ptr, 8);
2620
bdnz(restloop);
2621
//27:
2622
bind(done);
2623
}
2624
2625
/////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
2626
2627
// Search for a single jchar in an jchar[].
2628
//
2629
// Assumes that result differs from all other registers.
2630
//
2631
// Haystack, needle are the addresses of jchar-arrays.
2632
// NeedleChar is needle[0] if it is known at compile time.
2633
// Haycnt is the length of the haystack. We assume haycnt >=1.
2634
//
2635
// Preserves haystack, haycnt, kills all other registers.
2636
//
2637
// If needle == R0, we search for the constant needleChar.
2638
void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
2639
Register needle, jchar needleChar,
2640
Register tmp1, Register tmp2) {
2641
2642
assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
2643
2644
Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
2645
Register needle0 = needle, // Contains needle[0].
2646
addr = tmp1,
2647
ch1 = tmp2,
2648
ch2 = R0;
2649
2650
//2 (variable) or 3 (const):
2651
if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
2652
dcbtct(haystack, 0x00); // Indicate R/O access to haystack.
2653
2654
srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR).
2655
mr(addr, haystack);
2656
beq(CCR0, L_FinalCheck);
2657
mtctr(tmp2); // Move to count register.
2658
//8:
2659
bind(L_InnerLoop); // Main work horse (2x unrolled search loop).
2660
lhz(ch1, 0, addr); // Load characters from haystack.
2661
lhz(ch2, 2, addr);
2662
(needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
2663
(needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
2664
beq(CCR0, L_Found1); // Did we find the needle?
2665
beq(CCR1, L_Found2);
2666
addi(addr, addr, 4);
2667
bdnz(L_InnerLoop);
2668
//16:
2669
bind(L_FinalCheck);
2670
andi_(R0, haycnt, 1);
2671
beq(CCR0, L_NotFound);
2672
lhz(ch1, 0, addr); // One position left at which we have to compare.
2673
(needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
2674
beq(CCR1, L_Found3);
2675
//21:
2676
bind(L_NotFound);
2677
li(result, -1); // Not found.
2678
b(L_End);
2679
2680
bind(L_Found2);
2681
addi(addr, addr, 2);
2682
//24:
2683
bind(L_Found1);
2684
bind(L_Found3); // Return index ...
2685
subf(addr, haystack, addr); // relative to haystack,
2686
srdi(result, addr, 1); // in characters.
2687
bind(L_End);
2688
}
2689
2690
2691
// Implementation of IndexOf for jchar arrays.
2692
//
2693
// The length of haystack and needle are not constant, i.e. passed in a register.
2694
//
2695
// Preserves registers haystack, needle.
2696
// Kills registers haycnt, needlecnt.
2697
// Assumes that result differs from all other registers.
2698
// Haystack, needle are the addresses of jchar-arrays.
2699
// Haycnt, needlecnt are the lengths of them, respectively.
2700
//
2701
// Needlecntval must be zero or 15-bit unsigned immediate and > 1.
2702
void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
2703
Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
2704
Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
2705
2706
// Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
2707
Label L_TooShort, L_Found, L_NotFound, L_End;
2708
Register last_addr = haycnt, // Kill haycnt at the beginning.
2709
addr = tmp1,
2710
n_start = tmp2,
2711
ch1 = tmp3,
2712
ch2 = R0;
2713
2714
// **************************************************************************************************
2715
// Prepare for main loop: optimized for needle count >=2, bail out otherwise.
2716
// **************************************************************************************************
2717
2718
//1 (variable) or 3 (const):
2719
dcbtct(needle, 0x00); // Indicate R/O access to str1.
2720
dcbtct(haystack, 0x00); // Indicate R/O access to str2.
2721
2722
// Compute last haystack addr to use if no match gets found.
2723
if (needlecntval == 0) { // variable needlecnt
2724
//3:
2725
subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt.
2726
addi(addr, haystack, -2); // Accesses use pre-increment.
2727
cmpwi(CCR6, needlecnt, 2);
2728
blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately.
2729
slwi(ch1, ch1, 1); // Scale to number of bytes.
2730
lwz(n_start, 0, needle); // Load first 2 characters of needle.
2731
add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
2732
addi(needlecnt, needlecnt, -2); // Rest of needle.
2733
} else { // constant needlecnt
2734
guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
2735
assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
2736
//5:
2737
addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt.
2738
lwz(n_start, 0, needle); // Load first 2 characters of needle.
2739
addi(addr, haystack, -2); // Accesses use pre-increment.
2740
slwi(ch1, ch1, 1); // Scale to number of bytes.
2741
add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
2742
li(needlecnt, needlecntval-2); // Rest of needle.
2743
}
2744
2745
// Main Loop (now we have at least 3 characters).
2746
//11:
2747
Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
2748
bind(L_OuterLoop); // Search for 1st 2 characters.
2749
Register addr_diff = tmp4;
2750
subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
2751
addi(addr, addr, 2); // This is the new address we want to use for comparing.
2752
srdi_(ch2, addr_diff, 2);
2753
beq(CCR0, L_FinalCheck); // 2 characters left?
2754
mtctr(ch2); // addr_diff/4
2755
//16:
2756
bind(L_InnerLoop); // Main work horse (2x unrolled search loop)
2757
lwz(ch1, 0, addr); // Load 2 characters of haystack (ignore alignment).
2758
lwz(ch2, 2, addr);
2759
cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
2760
cmpw(CCR1, ch2, n_start);
2761
beq(CCR0, L_Comp1); // Did we find the needle start?
2762
beq(CCR1, L_Comp2);
2763
addi(addr, addr, 4);
2764
bdnz(L_InnerLoop);
2765
//24:
2766
bind(L_FinalCheck);
2767
rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
2768
beq(CCR0, L_NotFound);
2769
lwz(ch1, 0, addr); // One position left at which we have to compare.
2770
cmpw(CCR1, ch1, n_start);
2771
beq(CCR1, L_Comp3);
2772
//29:
2773
bind(L_NotFound);
2774
li(result, -1); // not found
2775
b(L_End);
2776
2777
2778
// **************************************************************************************************
2779
// Special Case: unfortunately, the variable needle case can be called with needlecnt<2
2780
// **************************************************************************************************
2781
//31:
2782
if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
2783
int nopcnt = 5;
2784
if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
2785
if (needlecntval == 0) { // We have to handle these cases separately.
2786
Label L_OneCharLoop;
2787
bind(L_TooShort);
2788
mtctr(haycnt);
2789
lhz(n_start, 0, needle); // First character of needle
2790
bind(L_OneCharLoop);
2791
lhzu(ch1, 2, addr);
2792
cmpw(CCR1, ch1, n_start);
2793
beq(CCR1, L_Found); // Did we find the one character needle?
2794
bdnz(L_OneCharLoop);
2795
li(result, -1); // Not found.
2796
b(L_End);
2797
} // 8 instructions, so no impact on alignment.
2798
for (int x = 0; x < nopcnt; ++x) nop();
2799
}
2800
2801
// **************************************************************************************************
2802
// Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
2803
// **************************************************************************************************
2804
2805
// Compare the rest
2806
//36 if needlecntval==0, else 37:
2807
bind(L_Comp2);
2808
addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
2809
bind(L_Comp1); // Addr points to possible needle start.
2810
bind(L_Comp3); // Could have created a copy and use a different return address but saving code size here.
2811
if (needlecntval != 2) { // Const needlecnt==2?
2812
if (needlecntval != 3) {
2813
if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
2814
Register ind_reg = tmp4;
2815
li(ind_reg, 2*2); // First 2 characters are already compared, use index 2.
2816
mtctr(needlecnt); // Decremented by 2, still > 0.
2817
//40:
2818
Label L_CompLoop;
2819
bind(L_CompLoop);
2820
lhzx(ch2, needle, ind_reg);
2821
lhzx(ch1, addr, ind_reg);
2822
cmpw(CCR1, ch1, ch2);
2823
bne(CCR1, L_OuterLoop);
2824
addi(ind_reg, ind_reg, 2);
2825
bdnz(L_CompLoop);
2826
} else { // No loop required if there's only one needle character left.
2827
lhz(ch2, 2*2, needle);
2828
lhz(ch1, 2*2, addr);
2829
cmpw(CCR1, ch1, ch2);
2830
bne(CCR1, L_OuterLoop);
2831
}
2832
}
2833
// Return index ...
2834
//46:
2835
bind(L_Found);
2836
subf(addr, haystack, addr); // relative to haystack, ...
2837
srdi(result, addr, 1); // in characters.
2838
//48:
2839
bind(L_End);
2840
}
2841
2842
// Implementation of Compare for jchar arrays.
2843
//
2844
// Kills the registers str1, str2, cnt1, cnt2.
2845
// Kills cr0, ctr.
2846
// Assumes that result differes from the input registers.
2847
void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
2848
Register result_reg, Register tmp_reg) {
2849
assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
2850
2851
Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
2852
Register cnt_diff = R0,
2853
limit_reg = cnt1_reg,
2854
chr1_reg = result_reg,
2855
chr2_reg = cnt2_reg,
2856
addr_diff = str2_reg;
2857
2858
// Offset 0 should be 32 byte aligned.
2859
//-4:
2860
dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
2861
dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
2862
//-2:
2863
// Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
2864
subf(result_reg, cnt2_reg, cnt1_reg); // difference between cnt1/2
2865
subf_(addr_diff, str1_reg, str2_reg); // alias?
2866
beq(CCR0, Ldone); // return cnt difference if both ones are identical
2867
srawi(limit_reg, result_reg, 31); // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
2868
mr(cnt_diff, result_reg);
2869
andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
2870
add_(limit_reg, cnt2_reg, limit_reg); // min(cnt1, cnt2)==0?
2871
beq(CCR0, Ldone); // return cnt difference if one has 0 length
2872
2873
lhz(chr1_reg, 0, str1_reg); // optional: early out if first characters mismatch
2874
lhzx(chr2_reg, str1_reg, addr_diff); // optional: early out if first characters mismatch
2875
addi(tmp_reg, limit_reg, -1); // min(cnt1, cnt2)-1
2876
subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
2877
bne(CCR0, Ldone); // optional: early out if first characters mismatch
2878
2879
// Set loop counter by scaling down tmp_reg
2880
srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
2881
ble(CCR0, Lslow_case); // need >4 characters for fast loop
2882
andi(limit_reg, tmp_reg, 4-1); // remaining characters
2883
2884
// Adapt str1_reg str2_reg for the first loop iteration
2885
mtctr(chr2_reg); // (min(cnt1, cnt2)-1)/4
2886
addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
2887
//16:
2888
// Compare the rest of the characters
2889
bind(Lfast_loop);
2890
ld(chr1_reg, 0, str1_reg);
2891
ldx(chr2_reg, str1_reg, addr_diff);
2892
cmpd(CCR0, chr2_reg, chr1_reg);
2893
bne(CCR0, Lslow_case); // return chr1_reg
2894
addi(str1_reg, str1_reg, 4*2);
2895
bdnz(Lfast_loop);
2896
addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
2897
//23:
2898
bind(Lslow_case);
2899
mtctr(limit_reg);
2900
//24:
2901
bind(Lslow_loop);
2902
lhz(chr1_reg, 0, str1_reg);
2903
lhzx(chr2_reg, str1_reg, addr_diff);
2904
subf_(result_reg, chr2_reg, chr1_reg);
2905
bne(CCR0, Ldone); // return chr1_reg
2906
addi(str1_reg, str1_reg, 1*2);
2907
bdnz(Lslow_loop);
2908
//30:
2909
// If strings are equal up to min length, return the length difference.
2910
mr(result_reg, cnt_diff);
2911
nop(); // alignment
2912
//32:
2913
// Otherwise, return the difference between the first mismatched chars.
2914
bind(Ldone);
2915
}
2916
2917
2918
// Compare char[] arrays.
2919
//
2920
// str1_reg USE only
2921
// str2_reg USE only
2922
// cnt_reg USE_DEF, due to tmp reg shortage
2923
// result_reg DEF only, might compromise USE only registers
2924
void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
2925
Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
2926
Register tmp5_reg) {
2927
2928
// Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
2929
assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
2930
assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
2931
2932
// Offset 0 should be 32 byte aligned.
2933
Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
2934
Register index_reg = tmp5_reg;
2935
Register cbc_iter = tmp4_reg;
2936
2937
//-1:
2938
dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
2939
dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
2940
//1:
2941
andi(cbc_iter, cnt_reg, 4-1); // Remaining iterations after 4 java characters per iteration loop.
2942
li(index_reg, 0); // init
2943
li(result_reg, 0); // assume false
2944
srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
2945
2946
cmpwi(CCR1, cbc_iter, 0); // CCR1 = (cbc_iter==0)
2947
beq(CCR0, Linit_cbc); // too short
2948
mtctr(tmp2_reg);
2949
//8:
2950
bind(Lloop);
2951
ldx(tmp1_reg, str1_reg, index_reg);
2952
ldx(tmp2_reg, str2_reg, index_reg);
2953
cmpd(CCR0, tmp1_reg, tmp2_reg);
2954
bne(CCR0, Ldone_false); // Unequal char pair found -> done.
2955
addi(index_reg, index_reg, 4*sizeof(jchar));
2956
bdnz(Lloop);
2957
//14:
2958
bind(Linit_cbc);
2959
beq(CCR1, Ldone_true);
2960
mtctr(cbc_iter);
2961
//16:
2962
bind(Lcbc);
2963
lhzx(tmp1_reg, str1_reg, index_reg);
2964
lhzx(tmp2_reg, str2_reg, index_reg);
2965
cmpw(CCR0, tmp1_reg, tmp2_reg);
2966
bne(CCR0, Ldone_false); // Unequal char pair found -> done.
2967
addi(index_reg, index_reg, 1*sizeof(jchar));
2968
bdnz(Lcbc);
2969
nop();
2970
bind(Ldone_true);
2971
li(result_reg, 1);
2972
//24:
2973
bind(Ldone_false);
2974
}
2975
2976
2977
void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
2978
Register tmp1_reg, Register tmp2_reg) {
2979
// Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
2980
assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
2981
assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
2982
assert(sizeof(jchar) == 2, "must be");
2983
assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
2984
2985
Label Ldone_false;
2986
2987
if (cntval < 16) { // short case
2988
if (cntval != 0) li(result_reg, 0); // assume false
2989
2990
const int num_bytes = cntval*sizeof(jchar);
2991
int index = 0;
2992
for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
2993
ld(tmp1_reg, index, str1_reg);
2994
ld(tmp2_reg, index, str2_reg);
2995
cmpd(CCR0, tmp1_reg, tmp2_reg);
2996
bne(CCR0, Ldone_false);
2997
}
2998
if (cntval & 2) {
2999
lwz(tmp1_reg, index, str1_reg);
3000
lwz(tmp2_reg, index, str2_reg);
3001
cmpw(CCR0, tmp1_reg, tmp2_reg);
3002
bne(CCR0, Ldone_false);
3003
index += 4;
3004
}
3005
if (cntval & 1) {
3006
lhz(tmp1_reg, index, str1_reg);
3007
lhz(tmp2_reg, index, str2_reg);
3008
cmpw(CCR0, tmp1_reg, tmp2_reg);
3009
bne(CCR0, Ldone_false);
3010
}
3011
// fallthrough: true
3012
} else {
3013
Label Lloop;
3014
Register index_reg = tmp1_reg;
3015
const int loopcnt = cntval/4;
3016
assert(loopcnt > 0, "must be");
3017
// Offset 0 should be 32 byte aligned.
3018
//2:
3019
dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
3020
dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
3021
li(tmp2_reg, loopcnt);
3022
li(index_reg, 0); // init
3023
li(result_reg, 0); // assume false
3024
mtctr(tmp2_reg);
3025
//8:
3026
bind(Lloop);
3027
ldx(R0, str1_reg, index_reg);
3028
ldx(tmp2_reg, str2_reg, index_reg);
3029
cmpd(CCR0, R0, tmp2_reg);
3030
bne(CCR0, Ldone_false); // Unequal char pair found -> done.
3031
addi(index_reg, index_reg, 4*sizeof(jchar));
3032
bdnz(Lloop);
3033
//14:
3034
if (cntval & 2) {
3035
lwzx(R0, str1_reg, index_reg);
3036
lwzx(tmp2_reg, str2_reg, index_reg);
3037
cmpw(CCR0, R0, tmp2_reg);
3038
bne(CCR0, Ldone_false);
3039
if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
3040
}
3041
if (cntval & 1) {
3042
lhzx(R0, str1_reg, index_reg);
3043
lhzx(tmp2_reg, str2_reg, index_reg);
3044
cmpw(CCR0, R0, tmp2_reg);
3045
bne(CCR0, Ldone_false);
3046
}
3047
// fallthru: true
3048
}
3049
li(result_reg, 1);
3050
bind(Ldone_false);
3051
}
3052
3053
// Helpers for Intrinsic Emitters
3054
//
3055
// Revert the byte order of a 32bit value in a register
3056
// src: 0x44556677
3057
// dst: 0x77665544
3058
// Three steps to obtain the result:
3059
// 1) Rotate src (as doubleword) left 5 bytes. That puts the leftmost byte of the src word
3060
// into the rightmost byte position. Afterwards, everything left of the rightmost byte is cleared.
3061
// This value initializes dst.
3062
// 2) Rotate src (as word) left 3 bytes. That puts the rightmost byte of the src word into the leftmost
3063
// byte position. Furthermore, byte 5 is rotated into byte 6 position where it is supposed to go.
3064
// This value is mask inserted into dst with a [0..23] mask of 1s.
3065
// 3) Rotate src (as word) left 1 byte. That puts byte 6 into byte 5 position.
3066
// This value is mask inserted into dst with a [8..15] mask of 1s.
3067
void MacroAssembler::load_reverse_32(Register dst, Register src) {
3068
assert_different_registers(dst, src);
3069
3070
rldicl(dst, src, (4+1)*8, 56); // Rotate byte 4 into position 7 (rightmost), clear all to the left.
3071
rlwimi(dst, src, 3*8, 0, 23); // Insert byte 5 into position 6, 7 into 4, leave pos 7 alone.
3072
rlwimi(dst, src, 1*8, 8, 15); // Insert byte 6 into position 5, leave the rest alone.
3073
}
3074
3075
// Calculate the column addresses of the crc32 lookup table into distinct registers.
3076
// This loop-invariant calculation is moved out of the loop body, reducing the loop
3077
// body size from 20 to 16 instructions.
3078
// Returns the offset that was used to calculate the address of column tc3.
3079
// Due to register shortage, setting tc3 may overwrite table. With the return offset
3080
// at hand, the original table address can be easily reconstructed.
3081
int MacroAssembler::crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3) {
3082
3083
#ifdef VM_LITTLE_ENDIAN
3084
// This is what we implement (the DOLIT4 part):
3085
// ========================================================================= */
3086
// #define DOLIT4 c ^= *buf4++; \
3087
// c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \
3088
// crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]
3089
// #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4
3090
// ========================================================================= */
3091
const int ix0 = 3*(4*CRC32_COLUMN_SIZE);
3092
const int ix1 = 2*(4*CRC32_COLUMN_SIZE);
3093
const int ix2 = 1*(4*CRC32_COLUMN_SIZE);
3094
const int ix3 = 0*(4*CRC32_COLUMN_SIZE);
3095
#else
3096
// This is what we implement (the DOBIG4 part):
3097
// =========================================================================
3098
// #define DOBIG4 c ^= *++buf4; \
3099
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
3100
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
3101
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
3102
// =========================================================================
3103
const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
3104
const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
3105
const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
3106
const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
3107
#endif
3108
assert_different_registers(table, tc0, tc1, tc2);
3109
assert(table == tc3, "must be!");
3110
3111
if (ix0 != 0) addi(tc0, table, ix0);
3112
if (ix1 != 0) addi(tc1, table, ix1);
3113
if (ix2 != 0) addi(tc2, table, ix2);
3114
if (ix3 != 0) addi(tc3, table, ix3);
3115
3116
return ix3;
3117
}
3118
3119
/**
3120
* uint32_t crc;
3121
* timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
3122
*/
3123
void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
3124
assert_different_registers(crc, table, tmp);
3125
assert_different_registers(val, table);
3126
3127
if (crc == val) { // Must rotate first to use the unmodified value.
3128
rlwinm(tmp, val, 2, 24-2, 31-2); // Insert (rightmost) byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
3129
// As we use a word (4-byte) instruction, we have to adapt the mask bit positions.
3130
srwi(crc, crc, 8); // Unsigned shift, clear leftmost 8 bits.
3131
} else {
3132
srwi(crc, crc, 8); // Unsigned shift, clear leftmost 8 bits.
3133
rlwinm(tmp, val, 2, 24-2, 31-2); // Insert (rightmost) byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
3134
}
3135
lwzx(tmp, table, tmp);
3136
xorr(crc, crc, tmp);
3137
}
3138
3139
/**
3140
* uint32_t crc;
3141
* timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
3142
*/
3143
void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
3144
fold_byte_crc32(crc, crc, table, tmp);
3145
}
3146
3147
/**
3148
* Emits code to update CRC-32 with a byte value according to constants in table.
3149
*
3150
* @param [in,out]crc Register containing the crc.
3151
* @param [in]val Register containing the byte to fold into the CRC.
3152
* @param [in]table Register containing the table of crc constants.
3153
*
3154
* uint32_t crc;
3155
* val = crc_table[(val ^ crc) & 0xFF];
3156
* crc = val ^ (crc >> 8);
3157
*/
3158
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
3159
BLOCK_COMMENT("update_byte_crc32:");
3160
xorr(val, val, crc);
3161
fold_byte_crc32(crc, val, table, val);
3162
}
3163
3164
/**
3165
* @param crc register containing existing CRC (32-bit)
3166
* @param buf register pointing to input byte buffer (byte*)
3167
* @param len register containing number of bytes
3168
* @param table register pointing to CRC table
3169
*/
3170
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
3171
Register data, bool loopAlignment, bool invertCRC) {
3172
assert_different_registers(crc, buf, len, table, data);
3173
3174
Label L_mainLoop, L_done;
3175
const int mainLoop_stepping = 1;
3176
const int mainLoop_alignment = loopAlignment ? 32 : 4; // (InputForNewCode > 4 ? InputForNewCode : 32) : 4;
3177
3178
// Process all bytes in a single-byte loop.
3179
cmpdi(CCR0, len, 0); // Anything to do?
3180
mtctr(len);
3181
beq(CCR0, L_done);
3182
3183
if (invertCRC) {
3184
nand(crc, crc, crc); // ~c
3185
}
3186
3187
align(mainLoop_alignment);
3188
BIND(L_mainLoop);
3189
lbz(data, 0, buf); // Byte from buffer, zero-extended.
3190
addi(buf, buf, mainLoop_stepping); // Advance buffer position.
3191
update_byte_crc32(crc, data, table);
3192
bdnz(L_mainLoop); // Iterate.
3193
3194
if (invertCRC) {
3195
nand(crc, crc, crc); // ~c
3196
}
3197
3198
bind(L_done);
3199
}
3200
3201
/**
3202
* Emits code to update CRC-32 with a 4-byte value according to constants in table
3203
* Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c
3204
*/
3205
// A not on the lookup table address(es):
3206
// The lookup table consists of two sets of four columns each.
3207
// The columns {0..3} are used for little-endian machines.
3208
// The columns {4..7} are used for big-endian machines.
3209
// To save the effort of adding the column offset to the table address each time
3210
// a table element is looked up, it is possible to pass the pre-calculated
3211
// column addresses.
3212
// Uses R9..R12 as work register. Must be saved/restored by caller, if necessary.
3213
void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
3214
Register t0, Register t1, Register t2, Register t3,
3215
Register tc0, Register tc1, Register tc2, Register tc3) {
3216
assert_different_registers(crc, t3);
3217
3218
// XOR crc with next four bytes of buffer.
3219
lwz(t3, bufDisp, buf);
3220
if (bufInc != 0) {
3221
addi(buf, buf, bufInc);
3222
}
3223
xorr(t3, t3, crc);
3224
3225
// Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
3226
rlwinm(t0, t3, 2, 24-2, 31-2); // ((t1 >> 0) & 0xff) << 2
3227
rlwinm(t1, t3, 32+(2- 8), 24-2, 31-2); // ((t1 >> 8) & 0xff) << 2
3228
rlwinm(t2, t3, 32+(2-16), 24-2, 31-2); // ((t1 >> 16) & 0xff) << 2
3229
rlwinm(t3, t3, 32+(2-24), 24-2, 31-2); // ((t1 >> 24) & 0xff) << 2
3230
3231
// Use the pre-calculated column addresses.
3232
// Load pre-calculated table values.
3233
lwzx(t0, tc0, t0);
3234
lwzx(t1, tc1, t1);
3235
lwzx(t2, tc2, t2);
3236
lwzx(t3, tc3, t3);
3237
3238
// Calculate new crc from table values.
3239
xorr(t0, t0, t1);
3240
xorr(t2, t2, t3);
3241
xorr(crc, t0, t2); // Now crc contains the final checksum value.
3242
}
3243
3244
/**
3245
* @param crc register containing existing CRC (32-bit)
3246
* @param buf register pointing to input byte buffer (byte*)
3247
* @param len register containing number of bytes
3248
* @param table register pointing to CRC table
3249
*
3250
* Uses R9..R12 as work register. Must be saved/restored by caller!
3251
*/
3252
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
3253
Register t0, Register t1, Register t2, Register t3,
3254
Register tc0, Register tc1, Register tc2, Register tc3) {
3255
assert_different_registers(crc, buf, len, table);
3256
3257
Label L_mainLoop, L_tail;
3258
Register tmp = t0;
3259
Register data = t0;
3260
Register tmp2 = t1;
3261
const int mainLoop_stepping = 8;
3262
const int tailLoop_stepping = 1;
3263
const int log_stepping = exact_log2(mainLoop_stepping);
3264
const int mainLoop_alignment = 32; // InputForNewCode > 4 ? InputForNewCode : 32;
3265
const int complexThreshold = 2*mainLoop_stepping;
3266
3267
// Don't test for len <= 0 here. This pathological case should not occur anyway.
3268
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
3269
// The situation itself is detected and handled correctly by the conditional branches
3270
// following aghi(len, -stepping) and aghi(len, +stepping).
3271
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
3272
3273
BLOCK_COMMENT("kernel_crc32_2word {");
3274
3275
nand(crc, crc, crc); // ~c
3276
3277
// Check for short (<mainLoop_stepping) buffer.
3278
cmpdi(CCR0, len, complexThreshold);
3279
blt(CCR0, L_tail);
3280
3281
// Pre-mainLoop alignment did show a slight (1%) positive effect on performance.
3282
// We leave the code in for reference. Maybe we need alignment when we exploit vector instructions.
3283
{
3284
// Align buf addr to mainLoop_stepping boundary.
3285
neg(tmp2, buf); // Calculate # preLoop iterations for alignment.
3286
rldicl(tmp2, tmp2, 0, 64-log_stepping); // Rotate tmp2 0 bits, insert into tmp2, anding with mask with 1s from 62..63.
3287
3288
if (complexThreshold > mainLoop_stepping) {
3289
sub(len, len, tmp2); // Remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
3290
} else {
3291
sub(tmp, len, tmp2); // Remaining bytes for main loop.
3292
cmpdi(CCR0, tmp, mainLoop_stepping);
3293
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
3294
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
3295
}
3296
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
3297
}
3298
3299
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
3300
andi(len, len, mainLoop_stepping-1); // remaining bytes for tailLoop
3301
mtctr(tmp2);
3302
3303
#ifdef VM_LITTLE_ENDIAN
3304
Register crc_rv = crc;
3305
#else
3306
Register crc_rv = tmp; // Load_reverse needs separate registers to work on.
3307
// Occupies tmp, but frees up crc.
3308
load_reverse_32(crc_rv, crc); // Revert byte order because we are dealing with big-endian data.
3309
tmp = crc;
3310
#endif
3311
3312
int reconstructTableOffset = crc32_table_columns(table, tc0, tc1, tc2, tc3);
3313
3314
align(mainLoop_alignment); // Octoword-aligned loop address. Shows 2% improvement.
3315
BIND(L_mainLoop);
3316
update_1word_crc32(crc_rv, buf, table, 0, 0, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3);
3317
update_1word_crc32(crc_rv, buf, table, 4, mainLoop_stepping, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3);
3318
bdnz(L_mainLoop);
3319
3320
#ifndef VM_LITTLE_ENDIAN
3321
load_reverse_32(crc, crc_rv); // Revert byte order because we are dealing with big-endian data.
3322
tmp = crc_rv; // Tmp uses it's original register again.
3323
#endif
3324
3325
// Restore original table address for tailLoop.
3326
if (reconstructTableOffset != 0) {
3327
addi(table, table, -reconstructTableOffset);
3328
}
3329
3330
// Process last few (<complexThreshold) bytes of buffer.
3331
BIND(L_tail);
3332
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
3333
3334
nand(crc, crc, crc); // ~c
3335
BLOCK_COMMENT("} kernel_crc32_2word");
3336
}
3337
3338
/**
3339
* @param crc register containing existing CRC (32-bit)
3340
* @param buf register pointing to input byte buffer (byte*)
3341
* @param len register containing number of bytes
3342
* @param table register pointing to CRC table
3343
*
3344
* uses R9..R12 as work register. Must be saved/restored by caller!
3345
*/
3346
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
3347
Register t0, Register t1, Register t2, Register t3,
3348
Register tc0, Register tc1, Register tc2, Register tc3) {
3349
assert_different_registers(crc, buf, len, table);
3350
3351
Label L_mainLoop, L_tail;
3352
Register tmp = t0;
3353
Register data = t0;
3354
Register tmp2 = t1;
3355
const int mainLoop_stepping = 4;
3356
const int tailLoop_stepping = 1;
3357
const int log_stepping = exact_log2(mainLoop_stepping);
3358
const int mainLoop_alignment = 32; // InputForNewCode > 4 ? InputForNewCode : 32;
3359
const int complexThreshold = 2*mainLoop_stepping;
3360
3361
// Don't test for len <= 0 here. This pathological case should not occur anyway.
3362
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
3363
// The situation itself is detected and handled correctly by the conditional branches
3364
// following aghi(len, -stepping) and aghi(len, +stepping).
3365
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
3366
3367
BLOCK_COMMENT("kernel_crc32_1word {");
3368
3369
nand(crc, crc, crc); // ~c
3370
3371
// Check for short (<mainLoop_stepping) buffer.
3372
cmpdi(CCR0, len, complexThreshold);
3373
blt(CCR0, L_tail);
3374
3375
// Pre-mainLoop alignment did show a slight (1%) positive effect on performance.
3376
// We leave the code in for reference. Maybe we need alignment when we exploit vector instructions.
3377
{
3378
// Align buf addr to mainLoop_stepping boundary.
3379
neg(tmp2, buf); // Calculate # preLoop iterations for alignment.
3380
rldicl(tmp2, tmp2, 0, 64-log_stepping); // Rotate tmp2 0 bits, insert into tmp2, anding with mask with 1s from 62..63.
3381
3382
if (complexThreshold > mainLoop_stepping) {
3383
sub(len, len, tmp2); // Remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
3384
} else {
3385
sub(tmp, len, tmp2); // Remaining bytes for main loop.
3386
cmpdi(CCR0, tmp, mainLoop_stepping);
3387
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
3388
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
3389
}
3390
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
3391
}
3392
3393
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
3394
andi(len, len, mainLoop_stepping-1); // remaining bytes for tailLoop
3395
mtctr(tmp2);
3396
3397
#ifdef VM_LITTLE_ENDIAN
3398
Register crc_rv = crc;
3399
#else
3400
Register crc_rv = tmp; // Load_reverse needs separate registers to work on.
3401
// Occupies tmp, but frees up crc.
3402
load_reverse_32(crc_rv, crc); // evert byte order because we are dealing with big-endian data.
3403
tmp = crc;
3404
#endif
3405
3406
int reconstructTableOffset = crc32_table_columns(table, tc0, tc1, tc2, tc3);
3407
3408
align(mainLoop_alignment); // Octoword-aligned loop address. Shows 2% improvement.
3409
BIND(L_mainLoop);
3410
update_1word_crc32(crc_rv, buf, table, 0, mainLoop_stepping, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3);
3411
bdnz(L_mainLoop);
3412
3413
#ifndef VM_LITTLE_ENDIAN
3414
load_reverse_32(crc, crc_rv); // Revert byte order because we are dealing with big-endian data.
3415
tmp = crc_rv; // Tmp uses it's original register again.
3416
#endif
3417
3418
// Restore original table address for tailLoop.
3419
if (reconstructTableOffset != 0) {
3420
addi(table, table, -reconstructTableOffset);
3421
}
3422
3423
// Process last few (<complexThreshold) bytes of buffer.
3424
BIND(L_tail);
3425
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
3426
3427
nand(crc, crc, crc); // ~c
3428
BLOCK_COMMENT("} kernel_crc32_1word");
3429
}
3430
3431
/**
3432
* @param crc register containing existing CRC (32-bit)
3433
* @param buf register pointing to input byte buffer (byte*)
3434
* @param len register containing number of bytes
3435
* @param table register pointing to CRC table
3436
*
3437
* Uses R7_ARG5, R8_ARG6 as work registers.
3438
*/
3439
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
3440
Register t0, Register t1, Register t2, Register t3) {
3441
assert_different_registers(crc, buf, len, table);
3442
3443
Register data = t0; // Holds the current byte to be folded into crc.
3444
3445
BLOCK_COMMENT("kernel_crc32_1byte {");
3446
3447
// Process all bytes in a single-byte loop.
3448
update_byteLoop_crc32(crc, buf, len, table, data, true, true);
3449
3450
BLOCK_COMMENT("} kernel_crc32_1byte");
3451
}
3452
3453
/**
3454
* @param crc register containing existing CRC (32-bit)
3455
* @param buf register pointing to input byte buffer (byte*)
3456
* @param len register containing number of bytes
3457
* @param table register pointing to CRC table
3458
* @param constants register pointing to CRC table for 128-bit aligned memory
3459
* @param barretConstants register pointing to table for barrett reduction
3460
* @param t0 volatile register
3461
* @param t1 volatile register
3462
* @param t2 volatile register
3463
* @param t3 volatile register
3464
*/
3465
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
3466
Register constants, Register barretConstants,
3467
Register t0, Register t1, Register t2, Register t3, Register t4) {
3468
assert_different_registers(crc, buf, len, table);
3469
3470
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
3471
3472
Register prealign = t0;
3473
Register postalign = t0;
3474
3475
BLOCK_COMMENT("kernel_crc32_1word_vpmsumb {");
3476
3477
// 1. use kernel_crc32_1word for shorter than 384bit
3478
clrldi(len, len, 32);
3479
cmpdi(CCR0, len, 384);
3480
bge(CCR0, L_start);
3481
3482
Register tc0 = t4;
3483
Register tc1 = constants;
3484
Register tc2 = barretConstants;
3485
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table);
3486
b(L_end);
3487
3488
BIND(L_start);
3489
3490
// 2. ~c
3491
nand(crc, crc, crc);
3492
3493
// 3. calculate from 0 to first 128bit-aligned address
3494
clrldi_(prealign, buf, 57);
3495
beq(CCR0, L_alignedHead);
3496
3497
subfic(prealign, prealign, 128);
3498
3499
subf(len, prealign, len);
3500
update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false);
3501
3502
// 4. calculate from first 128bit-aligned address to last 128bit-aligned address
3503
BIND(L_alignedHead);
3504
3505
clrldi(postalign, len, 57);
3506
subf(len, postalign, len);
3507
3508
// len must be more than 256bit
3509
kernel_crc32_1word_aligned(crc, buf, len, constants, barretConstants, t1, t2, t3);
3510
3511
// 5. calculate remaining
3512
cmpdi(CCR0, postalign, 0);
3513
beq(CCR0, L_tail);
3514
3515
update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false);
3516
3517
BIND(L_tail);
3518
3519
// 6. ~c
3520
nand(crc, crc, crc);
3521
3522
BIND(L_end);
3523
3524
BLOCK_COMMENT("} kernel_crc32_1word_vpmsumb");
3525
}
3526
3527
/**
3528
* @param crc register containing existing CRC (32-bit)
3529
* @param buf register pointing to input byte buffer (byte*)
3530
* @param len register containing number of bytes
3531
* @param constants register pointing to CRC table for 128-bit aligned memory
3532
* @param barretConstants register pointing to table for barrett reduction
3533
* @param t0 volatile register
3534
* @param t1 volatile register
3535
* @param t2 volatile register
3536
*/
3537
void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
3538
Register constants, Register barretConstants, Register t0, Register t1, Register t2) {
3539
Label L_mainLoop, L_tail, L_alignTail, L_barrett_reduction, L_end, L_first_warm_up_done, L_first_cool_down, L_second_cool_down, L_XOR, L_test;
3540
Label L_lv0, L_lv1, L_lv2, L_lv3, L_lv4, L_lv5, L_lv6, L_lv7, L_lv8, L_lv9, L_lv10, L_lv11, L_lv12, L_lv13, L_lv14, L_lv15;
3541
Label L_1, L_2, L_3, L_4;
3542
3543
Register rLoaded = t0;
3544
Register rTmp1 = t1;
3545
Register rTmp2 = t2;
3546
Register off16 = R22;
3547
Register off32 = R23;
3548
Register off48 = R24;
3549
Register off64 = R25;
3550
Register off80 = R26;
3551
Register off96 = R27;
3552
Register off112 = R28;
3553
Register rIdx = R29;
3554
Register rMax = R30;
3555
Register constantsPos = R31;
3556
3557
VectorRegister mask_32bit = VR24;
3558
VectorRegister mask_64bit = VR25;
3559
VectorRegister zeroes = VR26;
3560
VectorRegister const1 = VR27;
3561
VectorRegister const2 = VR28;
3562
3563
// Save non-volatile vector registers (frameless).
3564
Register offset = t1; int offsetInt = 0;
3565
offsetInt -= 16; li(offset, -16); stvx(VR20, offset, R1_SP);
3566
offsetInt -= 16; addi(offset, offset, -16); stvx(VR21, offset, R1_SP);
3567
offsetInt -= 16; addi(offset, offset, -16); stvx(VR22, offset, R1_SP);
3568
offsetInt -= 16; addi(offset, offset, -16); stvx(VR23, offset, R1_SP);
3569
offsetInt -= 16; addi(offset, offset, -16); stvx(VR24, offset, R1_SP);
3570
offsetInt -= 16; addi(offset, offset, -16); stvx(VR25, offset, R1_SP);
3571
offsetInt -= 16; addi(offset, offset, -16); stvx(VR26, offset, R1_SP);
3572
offsetInt -= 16; addi(offset, offset, -16); stvx(VR27, offset, R1_SP);
3573
offsetInt -= 16; addi(offset, offset, -16); stvx(VR28, offset, R1_SP);
3574
offsetInt -= 8; std(R22, offsetInt, R1_SP);
3575
offsetInt -= 8; std(R23, offsetInt, R1_SP);
3576
offsetInt -= 8; std(R24, offsetInt, R1_SP);
3577
offsetInt -= 8; std(R25, offsetInt, R1_SP);
3578
offsetInt -= 8; std(R26, offsetInt, R1_SP);
3579
offsetInt -= 8; std(R27, offsetInt, R1_SP);
3580
offsetInt -= 8; std(R28, offsetInt, R1_SP);
3581
offsetInt -= 8; std(R29, offsetInt, R1_SP);
3582
offsetInt -= 8; std(R30, offsetInt, R1_SP);
3583
offsetInt -= 8; std(R31, offsetInt, R1_SP);
3584
3585
// Set constants
3586
li(off16, 16);
3587
li(off32, 32);
3588
li(off48, 48);
3589
li(off64, 64);
3590
li(off80, 80);
3591
li(off96, 96);
3592
li(off112, 112);
3593
3594
clrldi(crc, crc, 32);
3595
3596
vxor(zeroes, zeroes, zeroes);
3597
vspltisw(VR0, -1);
3598
3599
vsldoi(mask_32bit, zeroes, VR0, 4);
3600
vsldoi(mask_64bit, zeroes, VR0, 8);
3601
3602
// Get the initial value into v8
3603
vxor(VR8, VR8, VR8);
3604
mtvrd(VR8, crc);
3605
vsldoi(VR8, zeroes, VR8, 8); // shift into bottom 32 bits
3606
3607
li (rLoaded, 0);
3608
3609
rldicr(rIdx, len, 0, 56);
3610
3611
{
3612
BIND(L_1);
3613
// Checksum in blocks of MAX_SIZE (32768)
3614
lis(rMax, 0);
3615
ori(rMax, rMax, 32768);
3616
mr(rTmp2, rMax);
3617
cmpd(CCR0, rIdx, rMax);
3618
bgt(CCR0, L_2);
3619
mr(rMax, rIdx);
3620
3621
BIND(L_2);
3622
subf(rIdx, rMax, rIdx);
3623
3624
// our main loop does 128 bytes at a time
3625
srdi(rMax, rMax, 7);
3626
3627
/*
3628
* Work out the offset into the constants table to start at. Each
3629
* constant is 16 bytes, and it is used against 128 bytes of input
3630
* data - 128 / 16 = 8
3631
*/
3632
sldi(rTmp1, rMax, 4);
3633
srdi(rTmp2, rTmp2, 3);
3634
subf(rTmp1, rTmp1, rTmp2);
3635
3636
// We reduce our final 128 bytes in a separate step
3637
addi(rMax, rMax, -1);
3638
mtctr(rMax);
3639
3640
// Find the start of our constants
3641
add(constantsPos, constants, rTmp1);
3642
3643
// zero VR0-v7 which will contain our checksums
3644
vxor(VR0, VR0, VR0);
3645
vxor(VR1, VR1, VR1);
3646
vxor(VR2, VR2, VR2);
3647
vxor(VR3, VR3, VR3);
3648
vxor(VR4, VR4, VR4);
3649
vxor(VR5, VR5, VR5);
3650
vxor(VR6, VR6, VR6);
3651
vxor(VR7, VR7, VR7);
3652
3653
lvx(const1, constantsPos);
3654
3655
/*
3656
* If we are looping back to consume more data we use the values
3657
* already in VR16-v23.
3658
*/
3659
cmpdi(CCR0, rLoaded, 1);
3660
beq(CCR0, L_3);
3661
{
3662
3663
// First warm up pass
3664
lvx(VR16, buf);
3665
lvx(VR17, off16, buf);
3666
lvx(VR18, off32, buf);
3667
lvx(VR19, off48, buf);
3668
lvx(VR20, off64, buf);
3669
lvx(VR21, off80, buf);
3670
lvx(VR22, off96, buf);
3671
lvx(VR23, off112, buf);
3672
addi(buf, buf, 8*16);
3673
3674
// xor in initial value
3675
vxor(VR16, VR16, VR8);
3676
}
3677
3678
BIND(L_3);
3679
bdz(L_first_warm_up_done);
3680
3681
addi(constantsPos, constantsPos, 16);
3682
lvx(const2, constantsPos);
3683
3684
// Second warm up pass
3685
vpmsumd(VR8, VR16, const1);
3686
lvx(VR16, buf);
3687
3688
vpmsumd(VR9, VR17, const1);
3689
lvx(VR17, off16, buf);
3690
3691
vpmsumd(VR10, VR18, const1);
3692
lvx(VR18, off32, buf);
3693
3694
vpmsumd(VR11, VR19, const1);
3695
lvx(VR19, off48, buf);
3696
3697
vpmsumd(VR12, VR20, const1);
3698
lvx(VR20, off64, buf);
3699
3700
vpmsumd(VR13, VR21, const1);
3701
lvx(VR21, off80, buf);
3702
3703
vpmsumd(VR14, VR22, const1);
3704
lvx(VR22, off96, buf);
3705
3706
vpmsumd(VR15, VR23, const1);
3707
lvx(VR23, off112, buf);
3708
3709
addi(buf, buf, 8 * 16);
3710
3711
bdz(L_first_cool_down);
3712
3713
/*
3714
* main loop. We modulo schedule it such that it takes three iterations
3715
* to complete - first iteration load, second iteration vpmsum, third
3716
* iteration xor.
3717
*/
3718
{
3719
BIND(L_4);
3720
lvx(const1, constantsPos); addi(constantsPos, constantsPos, 16);
3721
3722
vxor(VR0, VR0, VR8);
3723
vpmsumd(VR8, VR16, const2);
3724
lvx(VR16, buf);
3725
3726
vxor(VR1, VR1, VR9);
3727
vpmsumd(VR9, VR17, const2);
3728
lvx(VR17, off16, buf);
3729
3730
vxor(VR2, VR2, VR10);
3731
vpmsumd(VR10, VR18, const2);
3732
lvx(VR18, off32, buf);
3733
3734
vxor(VR3, VR3, VR11);
3735
vpmsumd(VR11, VR19, const2);
3736
lvx(VR19, off48, buf);
3737
lvx(const2, constantsPos);
3738
3739
vxor(VR4, VR4, VR12);
3740
vpmsumd(VR12, VR20, const1);
3741
lvx(VR20, off64, buf);
3742
3743
vxor(VR5, VR5, VR13);
3744
vpmsumd(VR13, VR21, const1);
3745
lvx(VR21, off80, buf);
3746
3747
vxor(VR6, VR6, VR14);
3748
vpmsumd(VR14, VR22, const1);
3749
lvx(VR22, off96, buf);
3750
3751
vxor(VR7, VR7, VR15);
3752
vpmsumd(VR15, VR23, const1);
3753
lvx(VR23, off112, buf);
3754
3755
addi(buf, buf, 8 * 16);
3756
3757
bdnz(L_4);
3758
}
3759
3760
BIND(L_first_cool_down);
3761
3762
// First cool down pass
3763
lvx(const1, constantsPos);
3764
addi(constantsPos, constantsPos, 16);
3765
3766
vxor(VR0, VR0, VR8);
3767
vpmsumd(VR8, VR16, const1);
3768
3769
vxor(VR1, VR1, VR9);
3770
vpmsumd(VR9, VR17, const1);
3771
3772
vxor(VR2, VR2, VR10);
3773
vpmsumd(VR10, VR18, const1);
3774
3775
vxor(VR3, VR3, VR11);
3776
vpmsumd(VR11, VR19, const1);
3777
3778
vxor(VR4, VR4, VR12);
3779
vpmsumd(VR12, VR20, const1);
3780
3781
vxor(VR5, VR5, VR13);
3782
vpmsumd(VR13, VR21, const1);
3783
3784
vxor(VR6, VR6, VR14);
3785
vpmsumd(VR14, VR22, const1);
3786
3787
vxor(VR7, VR7, VR15);
3788
vpmsumd(VR15, VR23, const1);
3789
3790
BIND(L_second_cool_down);
3791
// Second cool down pass
3792
vxor(VR0, VR0, VR8);
3793
vxor(VR1, VR1, VR9);
3794
vxor(VR2, VR2, VR10);
3795
vxor(VR3, VR3, VR11);
3796
vxor(VR4, VR4, VR12);
3797
vxor(VR5, VR5, VR13);
3798
vxor(VR6, VR6, VR14);
3799
vxor(VR7, VR7, VR15);
3800
3801
/*
3802
* vpmsumd produces a 96 bit result in the least significant bits
3803
* of the register. Since we are bit reflected we have to shift it
3804
* left 32 bits so it occupies the least significant bits in the
3805
* bit reflected domain.
3806
*/
3807
vsldoi(VR0, VR0, zeroes, 4);
3808
vsldoi(VR1, VR1, zeroes, 4);
3809
vsldoi(VR2, VR2, zeroes, 4);
3810
vsldoi(VR3, VR3, zeroes, 4);
3811
vsldoi(VR4, VR4, zeroes, 4);
3812
vsldoi(VR5, VR5, zeroes, 4);
3813
vsldoi(VR6, VR6, zeroes, 4);
3814
vsldoi(VR7, VR7, zeroes, 4);
3815
3816
// xor with last 1024 bits
3817
lvx(VR8, buf);
3818
lvx(VR9, off16, buf);
3819
lvx(VR10, off32, buf);
3820
lvx(VR11, off48, buf);
3821
lvx(VR12, off64, buf);
3822
lvx(VR13, off80, buf);
3823
lvx(VR14, off96, buf);
3824
lvx(VR15, off112, buf);
3825
addi(buf, buf, 8 * 16);
3826
3827
vxor(VR16, VR0, VR8);
3828
vxor(VR17, VR1, VR9);
3829
vxor(VR18, VR2, VR10);
3830
vxor(VR19, VR3, VR11);
3831
vxor(VR20, VR4, VR12);
3832
vxor(VR21, VR5, VR13);
3833
vxor(VR22, VR6, VR14);
3834
vxor(VR23, VR7, VR15);
3835
3836
li(rLoaded, 1);
3837
cmpdi(CCR0, rIdx, 0);
3838
addi(rIdx, rIdx, 128);
3839
bne(CCR0, L_1);
3840
}
3841
3842
// Work out how many bytes we have left
3843
andi_(len, len, 127);
3844
3845
// Calculate where in the constant table we need to start
3846
subfic(rTmp1, len, 128);
3847
add(constantsPos, constantsPos, rTmp1);
3848
3849
// How many 16 byte chunks are in the tail
3850
srdi(rIdx, len, 4);
3851
mtctr(rIdx);
3852
3853
/*
3854
* Reduce the previously calculated 1024 bits to 64 bits, shifting
3855
* 32 bits to include the trailing 32 bits of zeros
3856
*/
3857
lvx(VR0, constantsPos);
3858
lvx(VR1, off16, constantsPos);
3859
lvx(VR2, off32, constantsPos);
3860
lvx(VR3, off48, constantsPos);
3861
lvx(VR4, off64, constantsPos);
3862
lvx(VR5, off80, constantsPos);
3863
lvx(VR6, off96, constantsPos);
3864
lvx(VR7, off112, constantsPos);
3865
addi(constantsPos, constantsPos, 8 * 16);
3866
3867
vpmsumw(VR0, VR16, VR0);
3868
vpmsumw(VR1, VR17, VR1);
3869
vpmsumw(VR2, VR18, VR2);
3870
vpmsumw(VR3, VR19, VR3);
3871
vpmsumw(VR4, VR20, VR4);
3872
vpmsumw(VR5, VR21, VR5);
3873
vpmsumw(VR6, VR22, VR6);
3874
vpmsumw(VR7, VR23, VR7);
3875
3876
// Now reduce the tail (0 - 112 bytes)
3877
cmpdi(CCR0, rIdx, 0);
3878
beq(CCR0, L_XOR);
3879
3880
lvx(VR16, buf); addi(buf, buf, 16);
3881
lvx(VR17, constantsPos);
3882
vpmsumw(VR16, VR16, VR17);
3883
vxor(VR0, VR0, VR16);
3884
beq(CCR0, L_XOR);
3885
3886
lvx(VR16, buf); addi(buf, buf, 16);
3887
lvx(VR17, off16, constantsPos);
3888
vpmsumw(VR16, VR16, VR17);
3889
vxor(VR0, VR0, VR16);
3890
beq(CCR0, L_XOR);
3891
3892
lvx(VR16, buf); addi(buf, buf, 16);
3893
lvx(VR17, off32, constantsPos);
3894
vpmsumw(VR16, VR16, VR17);
3895
vxor(VR0, VR0, VR16);
3896
beq(CCR0, L_XOR);
3897
3898
lvx(VR16, buf); addi(buf, buf, 16);
3899
lvx(VR17, off48,constantsPos);
3900
vpmsumw(VR16, VR16, VR17);
3901
vxor(VR0, VR0, VR16);
3902
beq(CCR0, L_XOR);
3903
3904
lvx(VR16, buf); addi(buf, buf, 16);
3905
lvx(VR17, off64, constantsPos);
3906
vpmsumw(VR16, VR16, VR17);
3907
vxor(VR0, VR0, VR16);
3908
beq(CCR0, L_XOR);
3909
3910
lvx(VR16, buf); addi(buf, buf, 16);
3911
lvx(VR17, off80, constantsPos);
3912
vpmsumw(VR16, VR16, VR17);
3913
vxor(VR0, VR0, VR16);
3914
beq(CCR0, L_XOR);
3915
3916
lvx(VR16, buf); addi(buf, buf, 16);
3917
lvx(VR17, off96, constantsPos);
3918
vpmsumw(VR16, VR16, VR17);
3919
vxor(VR0, VR0, VR16);
3920
3921
// Now xor all the parallel chunks together
3922
BIND(L_XOR);
3923
vxor(VR0, VR0, VR1);
3924
vxor(VR2, VR2, VR3);
3925
vxor(VR4, VR4, VR5);
3926
vxor(VR6, VR6, VR7);
3927
3928
vxor(VR0, VR0, VR2);
3929
vxor(VR4, VR4, VR6);
3930
3931
vxor(VR0, VR0, VR4);
3932
3933
b(L_barrett_reduction);
3934
3935
BIND(L_first_warm_up_done);
3936
lvx(const1, constantsPos);
3937
addi(constantsPos, constantsPos, 16);
3938
vpmsumd(VR8, VR16, const1);
3939
vpmsumd(VR9, VR17, const1);
3940
vpmsumd(VR10, VR18, const1);
3941
vpmsumd(VR11, VR19, const1);
3942
vpmsumd(VR12, VR20, const1);
3943
vpmsumd(VR13, VR21, const1);
3944
vpmsumd(VR14, VR22, const1);
3945
vpmsumd(VR15, VR23, const1);
3946
b(L_second_cool_down);
3947
3948
BIND(L_barrett_reduction);
3949
3950
lvx(const1, barretConstants);
3951
addi(barretConstants, barretConstants, 16);
3952
lvx(const2, barretConstants);
3953
3954
vsldoi(VR1, VR0, VR0, 8);
3955
vxor(VR0, VR0, VR1); // xor two 64 bit results together
3956
3957
// shift left one bit
3958
vspltisb(VR1, 1);
3959
vsl(VR0, VR0, VR1);
3960
3961
vand(VR0, VR0, mask_64bit);
3962
3963
/*
3964
* The reflected version of Barrett reduction. Instead of bit
3965
* reflecting our data (which is expensive to do), we bit reflect our
3966
* constants and our algorithm, which means the intermediate data in
3967
* our vector registers goes from 0-63 instead of 63-0. We can reflect
3968
* the algorithm because we don't carry in mod 2 arithmetic.
3969
*/
3970
vand(VR1, VR0, mask_32bit); // bottom 32 bits of a
3971
vpmsumd(VR1, VR1, const1); // ma
3972
vand(VR1, VR1, mask_32bit); // bottom 32bits of ma
3973
vpmsumd(VR1, VR1, const2); // qn */
3974
vxor(VR0, VR0, VR1); // a - qn, subtraction is xor in GF(2)
3975
3976
/*
3977
* Since we are bit reflected, the result (ie the low 32 bits) is in
3978
* the high 32 bits. We just need to shift it left 4 bytes
3979
* V0 [ 0 1 X 3 ]
3980
* V0 [ 0 X 2 3 ]
3981
*/
3982
vsldoi(VR0, VR0, zeroes, 4); // shift result into top 64 bits of
3983
3984
// Get it into r3
3985
mfvrd(crc, VR0);
3986
3987
BIND(L_end);
3988
3989
offsetInt = 0;
3990
// Restore non-volatile Vector registers (frameless).
3991
offsetInt -= 16; li(offset, -16); lvx(VR20, offset, R1_SP);
3992
offsetInt -= 16; addi(offset, offset, -16); lvx(VR21, offset, R1_SP);
3993
offsetInt -= 16; addi(offset, offset, -16); lvx(VR22, offset, R1_SP);
3994
offsetInt -= 16; addi(offset, offset, -16); lvx(VR23, offset, R1_SP);
3995
offsetInt -= 16; addi(offset, offset, -16); lvx(VR24, offset, R1_SP);
3996
offsetInt -= 16; addi(offset, offset, -16); lvx(VR25, offset, R1_SP);
3997
offsetInt -= 16; addi(offset, offset, -16); lvx(VR26, offset, R1_SP);
3998
offsetInt -= 16; addi(offset, offset, -16); lvx(VR27, offset, R1_SP);
3999
offsetInt -= 16; addi(offset, offset, -16); lvx(VR28, offset, R1_SP);
4000
offsetInt -= 8; ld(R22, offsetInt, R1_SP);
4001
offsetInt -= 8; ld(R23, offsetInt, R1_SP);
4002
offsetInt -= 8; ld(R24, offsetInt, R1_SP);
4003
offsetInt -= 8; ld(R25, offsetInt, R1_SP);
4004
offsetInt -= 8; ld(R26, offsetInt, R1_SP);
4005
offsetInt -= 8; ld(R27, offsetInt, R1_SP);
4006
offsetInt -= 8; ld(R28, offsetInt, R1_SP);
4007
offsetInt -= 8; ld(R29, offsetInt, R1_SP);
4008
offsetInt -= 8; ld(R30, offsetInt, R1_SP);
4009
offsetInt -= 8; ld(R31, offsetInt, R1_SP);
4010
}
4011
4012
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
4013
assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
4014
4015
BLOCK_COMMENT("kernel_crc32_singleByte:");
4016
nand(crc, crc, crc); // ~c
4017
4018
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
4019
update_byte_crc32(crc, tmp, table);
4020
4021
nand(crc, crc, crc); // ~c
4022
}
4023
4024
4025
void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
4026
#ifdef ASSERT
4027
Label ok;
4028
if (check_equal) {
4029
beq(CCR0, ok);
4030
} else {
4031
bne(CCR0, ok);
4032
}
4033
stop(msg, id);
4034
bind(ok);
4035
#endif
4036
}
4037
4038
void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
4039
Register mem_base, const char* msg, int id) {
4040
#ifdef ASSERT
4041
switch (size) {
4042
case 4:
4043
lwz(R0, mem_offset, mem_base);
4044
cmpwi(CCR0, R0, 0);
4045
break;
4046
case 8:
4047
ld(R0, mem_offset, mem_base);
4048
cmpdi(CCR0, R0, 0);
4049
break;
4050
default:
4051
ShouldNotReachHere();
4052
}
4053
asm_assert(check_equal, msg, id);
4054
#endif // ASSERT
4055
}
4056
4057
void MacroAssembler::verify_thread() {
4058
if (VerifyThread) {
4059
unimplemented("'VerifyThread' currently not implemented on PPC");
4060
}
4061
}
4062
4063
// READ: oop. KILL: R0. Volatile floats perhaps.
4064
void MacroAssembler::verify_oop(Register oop, const char* msg) {
4065
if (!VerifyOops) {
4066
return;
4067
}
4068
4069
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
4070
const Register tmp = R11; // Will be preserved.
4071
const int nbytes_save = 11*8; // Volatile gprs except R0.
4072
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
4073
4074
if (oop == tmp) mr(R4_ARG2, oop);
4075
save_LR_CR(tmp); // save in old frame
4076
push_frame_reg_args(nbytes_save, tmp);
4077
// load FunctionDescriptor** / entry_address *
4078
load_const_optimized(tmp, fd, R0);
4079
// load FunctionDescriptor* / entry_address
4080
ld(tmp, 0, tmp);
4081
if (oop != tmp) mr_if_needed(R4_ARG2, oop);
4082
load_const_optimized(R3_ARG1, (address)msg, R0);
4083
// Call destination for its side effect.
4084
call_c(tmp);
4085
4086
pop_frame();
4087
restore_LR_CR(tmp);
4088
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
4089
}
4090
4091
const char* stop_types[] = {
4092
"stop",
4093
"untested",
4094
"unimplemented",
4095
"shouldnotreachhere"
4096
};
4097
4098
static void stop_on_request(int tp, const char* msg) {
4099
tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
4100
guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
4101
}
4102
4103
// Call a C-function that prints output.
4104
void MacroAssembler::stop(int type, const char* msg, int id) {
4105
#ifndef PRODUCT
4106
block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
4107
#else
4108
block_comment("stop {");
4109
#endif
4110
4111
// setup arguments
4112
load_const_optimized(R3_ARG1, type);
4113
load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
4114
call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
4115
illtrap();
4116
emit_int32(id);
4117
block_comment("} stop;");
4118
}
4119
4120
#ifndef PRODUCT
4121
// Write pattern 0x0101010101010101 in memory region [low-before, high+after].
4122
// Val, addr are temp registers.
4123
// If low == addr, addr is killed.
4124
// High is preserved.
4125
void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
4126
if (!ZapMemory) return;
4127
4128
assert_different_registers(low, val);
4129
4130
BLOCK_COMMENT("zap memory region {");
4131
load_const_optimized(val, 0x0101010101010101);
4132
int size = before + after;
4133
if (low == high && size < 5 && size > 0) {
4134
int offset = -before*BytesPerWord;
4135
for (int i = 0; i < size; ++i) {
4136
std(val, offset, low);
4137
offset += (1*BytesPerWord);
4138
}
4139
} else {
4140
addi(addr, low, -before*BytesPerWord);
4141
assert_different_registers(high, val);
4142
if (after) addi(high, high, after * BytesPerWord);
4143
Label loop;
4144
bind(loop);
4145
std(val, 0, addr);
4146
addi(addr, addr, 8);
4147
cmpd(CCR6, addr, high);
4148
ble(CCR6, loop);
4149
if (after) addi(high, high, -after * BytesPerWord); // Correct back to old value.
4150
}
4151
BLOCK_COMMENT("} zap memory region");
4152
}
4153
4154
#endif // !PRODUCT
4155
4156
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
4157
int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
4158
assert(sizeof(bool) == 1, "PowerPC ABI");
4159
masm->lbz(temp, simm16_offset, temp);
4160
masm->cmpwi(CCR0, temp, 0);
4161
masm->beq(CCR0, _label);
4162
}
4163
4164
SkipIfEqualZero::~SkipIfEqualZero() {
4165
_masm->bind(_label);
4166
}
4167
4168