Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/opto/library_call.cpp
83404 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/systemDictionary.hpp"
27
#include "classfile/vmSymbols.hpp"
28
#include "compiler/compileBroker.hpp"
29
#include "compiler/compileLog.hpp"
30
#include "jfr/support/jfrIntrinsics.hpp"
31
#include "oops/objArrayKlass.hpp"
32
#include "opto/addnode.hpp"
33
#include "opto/callGenerator.hpp"
34
#include "opto/cfgnode.hpp"
35
#include "opto/connode.hpp"
36
#include "opto/idealKit.hpp"
37
#include "opto/mathexactnode.hpp"
38
#include "opto/mulnode.hpp"
39
#include "opto/parse.hpp"
40
#include "opto/runtime.hpp"
41
#include "opto/subnode.hpp"
42
#include "prims/nativeLookup.hpp"
43
#include "runtime/sharedRuntime.hpp"
44
#include "utilities/macros.hpp"
45
46
class LibraryIntrinsic : public InlineCallGenerator {
47
// Extend the set of intrinsics known to the runtime:
48
public:
49
private:
50
bool _is_virtual;
51
bool _does_virtual_dispatch;
52
int8_t _predicates_count; // Intrinsic is predicated by several conditions
53
int8_t _last_predicate; // Last generated predicate
54
vmIntrinsics::ID _intrinsic_id;
55
56
public:
57
LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
58
: InlineCallGenerator(m),
59
_is_virtual(is_virtual),
60
_does_virtual_dispatch(does_virtual_dispatch),
61
_predicates_count((int8_t)predicates_count),
62
_last_predicate((int8_t)-1),
63
_intrinsic_id(id)
64
{
65
}
66
virtual bool is_intrinsic() const { return true; }
67
virtual bool is_virtual() const { return _is_virtual; }
68
virtual bool is_predicated() const { return _predicates_count > 0; }
69
virtual int predicates_count() const { return _predicates_count; }
70
virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
71
virtual JVMState* generate(JVMState* jvms);
72
virtual Node* generate_predicate(JVMState* jvms, int predicate);
73
vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
74
};
75
76
77
// Local helper class for LibraryIntrinsic:
78
class LibraryCallKit : public GraphKit {
79
private:
80
LibraryIntrinsic* _intrinsic; // the library intrinsic being called
81
Node* _result; // the result node, if any
82
int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
83
84
const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
85
86
public:
87
LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
88
: GraphKit(jvms),
89
_intrinsic(intrinsic),
90
_result(NULL)
91
{
92
// Check if this is a root compile. In that case we don't have a caller.
93
if (!jvms->has_method()) {
94
_reexecute_sp = sp();
95
} else {
96
// Find out how many arguments the interpreter needs when deoptimizing
97
// and save the stack pointer value so it can used by uncommon_trap.
98
// We find the argument count by looking at the declared signature.
99
bool ignored_will_link;
100
ciSignature* declared_signature = NULL;
101
ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
102
const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
103
_reexecute_sp = sp() + nargs; // "push" arguments back on stack
104
}
105
}
106
107
virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
108
109
ciMethod* caller() const { return jvms()->method(); }
110
int bci() const { return jvms()->bci(); }
111
LibraryIntrinsic* intrinsic() const { return _intrinsic; }
112
vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
113
ciMethod* callee() const { return _intrinsic->method(); }
114
115
bool try_to_inline(int predicate);
116
Node* try_to_predicate(int predicate);
117
118
void push_result() {
119
// Push the result onto the stack.
120
if (!stopped() && result() != NULL) {
121
BasicType bt = result()->bottom_type()->basic_type();
122
push_node(bt, result());
123
}
124
}
125
126
private:
127
void fatal_unexpected_iid(vmIntrinsics::ID iid) {
128
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
129
}
130
131
void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
132
void set_result(RegionNode* region, PhiNode* value);
133
Node* result() { return _result; }
134
135
virtual int reexecute_sp() { return _reexecute_sp; }
136
137
// Helper functions to inline natives
138
Node* generate_guard(Node* test, RegionNode* region, float true_prob);
139
Node* generate_slow_guard(Node* test, RegionNode* region);
140
Node* generate_fair_guard(Node* test, RegionNode* region);
141
Node* generate_negative_guard(Node* index, RegionNode* region,
142
// resulting CastII of index:
143
Node* *pos_index = NULL);
144
Node* generate_nonpositive_guard(Node* index, bool never_negative,
145
// resulting CastII of index:
146
Node* *pos_index = NULL);
147
Node* generate_limit_guard(Node* offset, Node* subseq_length,
148
Node* array_length,
149
RegionNode* region);
150
Node* generate_current_thread(Node* &tls_output);
151
address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
152
bool disjoint_bases, const char* &name, bool dest_uninitialized);
153
Node* load_mirror_from_klass(Node* klass);
154
Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
155
RegionNode* region, int null_path,
156
int offset);
157
Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
158
RegionNode* region, int null_path) {
159
int offset = java_lang_Class::klass_offset_in_bytes();
160
return load_klass_from_mirror_common(mirror, never_see_null,
161
region, null_path,
162
offset);
163
}
164
Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
165
RegionNode* region, int null_path) {
166
int offset = java_lang_Class::array_klass_offset_in_bytes();
167
return load_klass_from_mirror_common(mirror, never_see_null,
168
region, null_path,
169
offset);
170
}
171
Node* generate_access_flags_guard(Node* kls,
172
int modifier_mask, int modifier_bits,
173
RegionNode* region);
174
Node* generate_interface_guard(Node* kls, RegionNode* region);
175
Node* generate_array_guard(Node* kls, RegionNode* region) {
176
return generate_array_guard_common(kls, region, false, false);
177
}
178
Node* generate_non_array_guard(Node* kls, RegionNode* region) {
179
return generate_array_guard_common(kls, region, false, true);
180
}
181
Node* generate_objArray_guard(Node* kls, RegionNode* region) {
182
return generate_array_guard_common(kls, region, true, false);
183
}
184
Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
185
return generate_array_guard_common(kls, region, true, true);
186
}
187
Node* generate_array_guard_common(Node* kls, RegionNode* region,
188
bool obj_array, bool not_array);
189
Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
190
CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
191
bool is_virtual = false, bool is_static = false);
192
CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
193
return generate_method_call(method_id, false, true);
194
}
195
CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
196
return generate_method_call(method_id, true, false);
197
}
198
Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static);
199
200
Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2);
201
Node* make_string_method_node(int opcode, Node* str1, Node* str2);
202
bool inline_string_compareTo();
203
bool inline_string_indexOf();
204
Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
205
bool inline_string_equals();
206
Node* round_double_node(Node* n);
207
bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
208
bool inline_math_native(vmIntrinsics::ID id);
209
bool inline_trig(vmIntrinsics::ID id);
210
bool inline_math(vmIntrinsics::ID id);
211
template <typename OverflowOp>
212
bool inline_math_overflow(Node* arg1, Node* arg2);
213
void inline_math_mathExact(Node* math, Node* test);
214
bool inline_math_addExactI(bool is_increment);
215
bool inline_math_addExactL(bool is_increment);
216
bool inline_math_multiplyExactI();
217
bool inline_math_multiplyExactL();
218
bool inline_math_negateExactI();
219
bool inline_math_negateExactL();
220
bool inline_math_subtractExactI(bool is_decrement);
221
bool inline_math_subtractExactL(bool is_decrement);
222
bool inline_exp();
223
bool inline_pow();
224
Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
225
bool inline_min_max(vmIntrinsics::ID id);
226
Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
227
// This returns Type::AnyPtr, RawPtr, or OopPtr.
228
int classify_unsafe_addr(Node* &base, Node* &offset);
229
Node* make_unsafe_address(Node* base, Node* offset);
230
// Helper for inline_unsafe_access.
231
// Generates the guards that check whether the result of
232
// Unsafe.getObject should be recorded in an SATB log buffer.
233
void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
234
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
235
bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
236
static bool klass_needs_init_guard(Node* kls);
237
bool inline_unsafe_allocate();
238
bool inline_unsafe_copyMemory();
239
bool inline_native_currentThread();
240
#ifdef JFR_HAVE_INTRINSICS
241
bool inline_native_classID();
242
bool inline_native_getEventWriter();
243
#endif
244
bool inline_native_time_funcs(address method, const char* funcName);
245
bool inline_native_isInterrupted();
246
bool inline_native_Class_query(vmIntrinsics::ID id);
247
bool inline_native_subtype_check();
248
249
bool inline_native_newArray();
250
bool inline_native_getLength();
251
bool inline_array_copyOf(bool is_copyOfRange);
252
bool inline_array_equals();
253
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
254
bool inline_native_clone(bool is_virtual);
255
bool inline_native_Reflection_getCallerClass();
256
// Helper function for inlining native object hash method
257
bool inline_native_hashcode(bool is_virtual, bool is_static);
258
bool inline_native_getClass();
259
260
// Helper functions for inlining arraycopy
261
bool inline_arraycopy();
262
void generate_arraycopy(const TypePtr* adr_type,
263
BasicType basic_elem_type,
264
Node* src, Node* src_offset,
265
Node* dest, Node* dest_offset,
266
Node* copy_length,
267
bool disjoint_bases = false,
268
bool length_never_negative = false,
269
RegionNode* slow_region = NULL);
270
AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
271
RegionNode* slow_region);
272
void generate_clear_array(const TypePtr* adr_type,
273
Node* dest,
274
BasicType basic_elem_type,
275
Node* slice_off,
276
Node* slice_len,
277
Node* slice_end);
278
bool generate_block_arraycopy(const TypePtr* adr_type,
279
BasicType basic_elem_type,
280
AllocateNode* alloc,
281
Node* src, Node* src_offset,
282
Node* dest, Node* dest_offset,
283
Node* dest_size, bool dest_uninitialized);
284
void generate_slow_arraycopy(const TypePtr* adr_type,
285
Node* src, Node* src_offset,
286
Node* dest, Node* dest_offset,
287
Node* copy_length, bool dest_uninitialized);
288
Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
289
Node* dest_elem_klass,
290
Node* src, Node* src_offset,
291
Node* dest, Node* dest_offset,
292
Node* copy_length, bool dest_uninitialized);
293
Node* generate_generic_arraycopy(const TypePtr* adr_type,
294
Node* src, Node* src_offset,
295
Node* dest, Node* dest_offset,
296
Node* copy_length, bool dest_uninitialized);
297
void generate_unchecked_arraycopy(const TypePtr* adr_type,
298
BasicType basic_elem_type,
299
bool disjoint_bases,
300
Node* src, Node* src_offset,
301
Node* dest, Node* dest_offset,
302
Node* copy_length, bool dest_uninitialized);
303
typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
304
bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
305
bool inline_unsafe_ordered_store(BasicType type);
306
bool inline_unsafe_fence(vmIntrinsics::ID id);
307
bool inline_fp_conversions(vmIntrinsics::ID id);
308
bool inline_number_methods(vmIntrinsics::ID id);
309
bool inline_reference_get();
310
bool inline_aescrypt_Block(vmIntrinsics::ID id);
311
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
312
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
313
Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
314
Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
315
bool inline_ghash_processBlocks();
316
bool inline_sha_implCompress(vmIntrinsics::ID id);
317
bool inline_digestBase_implCompressMB(int predicate);
318
bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
319
bool long_state, address stubAddr, const char *stubName,
320
Node* src_start, Node* ofs, Node* limit);
321
Node* get_state_from_sha_object(Node *sha_object);
322
Node* get_state_from_sha5_object(Node *sha_object);
323
Node* inline_digestBase_implCompressMB_predicate(int predicate);
324
bool inline_encodeISOArray();
325
bool inline_updateCRC32();
326
bool inline_updateBytesCRC32();
327
bool inline_updateByteBufferCRC32();
328
bool inline_multiplyToLen();
329
bool inline_squareToLen();
330
bool inline_mulAdd();
331
bool inline_montgomeryMultiply();
332
bool inline_montgomerySquare();
333
334
bool inline_profileBoolean();
335
};
336
337
338
//---------------------------make_vm_intrinsic----------------------------
339
CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
340
vmIntrinsics::ID id = m->intrinsic_id();
341
assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
342
343
ccstr disable_intr = NULL;
344
345
if ((DisableIntrinsic[0] != '\0'
346
&& strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
347
(method_has_option_value("DisableIntrinsic", disable_intr)
348
&& strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
349
// disabled by a user request on the command line:
350
// example: -XX:DisableIntrinsic=_hashCode,_getClass
351
return NULL;
352
}
353
354
if (!m->is_loaded()) {
355
// do not attempt to inline unloaded methods
356
return NULL;
357
}
358
359
// Only a few intrinsics implement a virtual dispatch.
360
// They are expensive calls which are also frequently overridden.
361
if (is_virtual) {
362
switch (id) {
363
case vmIntrinsics::_hashCode:
364
case vmIntrinsics::_clone:
365
// OK, Object.hashCode and Object.clone intrinsics come in both flavors
366
break;
367
default:
368
return NULL;
369
}
370
}
371
372
// -XX:-InlineNatives disables nearly all intrinsics:
373
if (!InlineNatives) {
374
switch (id) {
375
case vmIntrinsics::_indexOf:
376
case vmIntrinsics::_compareTo:
377
case vmIntrinsics::_equals:
378
case vmIntrinsics::_equalsC:
379
case vmIntrinsics::_getAndAddInt:
380
case vmIntrinsics::_getAndAddLong:
381
case vmIntrinsics::_getAndSetInt:
382
case vmIntrinsics::_getAndSetLong:
383
case vmIntrinsics::_getAndSetObject:
384
case vmIntrinsics::_loadFence:
385
case vmIntrinsics::_storeFence:
386
case vmIntrinsics::_fullFence:
387
break; // InlineNatives does not control String.compareTo
388
case vmIntrinsics::_Reference_get:
389
break; // InlineNatives does not control Reference.get
390
default:
391
return NULL;
392
}
393
}
394
395
int predicates = 0;
396
bool does_virtual_dispatch = false;
397
398
switch (id) {
399
case vmIntrinsics::_compareTo:
400
if (!SpecialStringCompareTo) return NULL;
401
if (!Matcher::match_rule_supported(Op_StrComp)) return NULL;
402
break;
403
case vmIntrinsics::_indexOf:
404
if (!SpecialStringIndexOf) return NULL;
405
break;
406
case vmIntrinsics::_equals:
407
if (!SpecialStringEquals) return NULL;
408
if (!Matcher::match_rule_supported(Op_StrEquals)) return NULL;
409
break;
410
case vmIntrinsics::_equalsC:
411
if (!SpecialArraysEquals) return NULL;
412
if (!Matcher::match_rule_supported(Op_AryEq)) return NULL;
413
break;
414
case vmIntrinsics::_arraycopy:
415
if (!InlineArrayCopy) return NULL;
416
break;
417
case vmIntrinsics::_copyMemory:
418
if (StubRoutines::unsafe_arraycopy() == NULL) return NULL;
419
if (!InlineArrayCopy) return NULL;
420
break;
421
case vmIntrinsics::_hashCode:
422
if (!InlineObjectHash) return NULL;
423
does_virtual_dispatch = true;
424
break;
425
case vmIntrinsics::_clone:
426
does_virtual_dispatch = true;
427
case vmIntrinsics::_copyOf:
428
case vmIntrinsics::_copyOfRange:
429
if (!InlineObjectCopy) return NULL;
430
// These also use the arraycopy intrinsic mechanism:
431
if (!InlineArrayCopy) return NULL;
432
break;
433
case vmIntrinsics::_encodeISOArray:
434
if (!SpecialEncodeISOArray) return NULL;
435
if (!Matcher::match_rule_supported(Op_EncodeISOArray)) return NULL;
436
break;
437
case vmIntrinsics::_checkIndex:
438
// We do not intrinsify this. The optimizer does fine with it.
439
return NULL;
440
441
case vmIntrinsics::_getCallerClass:
442
if (!UseNewReflection) return NULL;
443
if (!InlineReflectionGetCallerClass) return NULL;
444
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL;
445
break;
446
447
case vmIntrinsics::_bitCount_i:
448
if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
449
break;
450
451
case vmIntrinsics::_bitCount_l:
452
if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
453
break;
454
455
case vmIntrinsics::_numberOfLeadingZeros_i:
456
if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
457
break;
458
459
case vmIntrinsics::_numberOfLeadingZeros_l:
460
if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
461
break;
462
463
case vmIntrinsics::_numberOfTrailingZeros_i:
464
if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
465
break;
466
467
case vmIntrinsics::_numberOfTrailingZeros_l:
468
if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
469
break;
470
471
case vmIntrinsics::_reverseBytes_c:
472
if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
473
break;
474
case vmIntrinsics::_reverseBytes_s:
475
if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return NULL;
476
break;
477
case vmIntrinsics::_reverseBytes_i:
478
if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return NULL;
479
break;
480
case vmIntrinsics::_reverseBytes_l:
481
if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return NULL;
482
break;
483
484
case vmIntrinsics::_Reference_get:
485
// Use the intrinsic version of Reference.get() so that the value in
486
// the referent field can be registered by the G1 pre-barrier code.
487
// Also add memory barrier to prevent commoning reads from this field
488
// across safepoint since GC can change it value.
489
break;
490
491
case vmIntrinsics::_compareAndSwapObject:
492
#ifdef _LP64
493
if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL;
494
#endif
495
break;
496
497
case vmIntrinsics::_compareAndSwapLong:
498
if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL;
499
break;
500
501
case vmIntrinsics::_getAndAddInt:
502
if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL;
503
break;
504
505
case vmIntrinsics::_getAndAddLong:
506
if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL;
507
break;
508
509
case vmIntrinsics::_getAndSetInt:
510
if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL;
511
break;
512
513
case vmIntrinsics::_getAndSetLong:
514
if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL;
515
break;
516
517
case vmIntrinsics::_getAndSetObject:
518
#ifdef _LP64
519
if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
520
if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL;
521
break;
522
#else
523
if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
524
break;
525
#endif
526
527
case vmIntrinsics::_aescrypt_encryptBlock:
528
case vmIntrinsics::_aescrypt_decryptBlock:
529
if (!UseAESIntrinsics) return NULL;
530
break;
531
532
case vmIntrinsics::_multiplyToLen:
533
if (!UseMultiplyToLenIntrinsic) return NULL;
534
break;
535
536
case vmIntrinsics::_squareToLen:
537
if (!UseSquareToLenIntrinsic) return NULL;
538
break;
539
540
case vmIntrinsics::_mulAdd:
541
if (!UseMulAddIntrinsic) return NULL;
542
break;
543
544
case vmIntrinsics::_montgomeryMultiply:
545
if (!UseMontgomeryMultiplyIntrinsic) return NULL;
546
break;
547
case vmIntrinsics::_montgomerySquare:
548
if (!UseMontgomerySquareIntrinsic) return NULL;
549
break;
550
551
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
552
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
553
if (!UseAESIntrinsics) return NULL;
554
// these two require the predicated logic
555
predicates = 1;
556
break;
557
558
case vmIntrinsics::_sha_implCompress:
559
if (!UseSHA1Intrinsics) return NULL;
560
break;
561
562
case vmIntrinsics::_sha2_implCompress:
563
if (!UseSHA256Intrinsics) return NULL;
564
break;
565
566
case vmIntrinsics::_sha5_implCompress:
567
if (!UseSHA512Intrinsics) return NULL;
568
break;
569
570
case vmIntrinsics::_digestBase_implCompressMB:
571
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return NULL;
572
predicates = 3;
573
break;
574
575
case vmIntrinsics::_ghash_processBlocks:
576
if (!UseGHASHIntrinsics) return NULL;
577
break;
578
579
case vmIntrinsics::_updateCRC32:
580
case vmIntrinsics::_updateBytesCRC32:
581
case vmIntrinsics::_updateByteBufferCRC32:
582
if (!UseCRC32Intrinsics) return NULL;
583
break;
584
585
case vmIntrinsics::_incrementExactI:
586
case vmIntrinsics::_addExactI:
587
if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
588
break;
589
case vmIntrinsics::_incrementExactL:
590
case vmIntrinsics::_addExactL:
591
if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
592
break;
593
case vmIntrinsics::_decrementExactI:
594
case vmIntrinsics::_subtractExactI:
595
if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
596
break;
597
case vmIntrinsics::_decrementExactL:
598
case vmIntrinsics::_subtractExactL:
599
if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
600
break;
601
case vmIntrinsics::_negateExactI:
602
if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
603
break;
604
case vmIntrinsics::_negateExactL:
605
if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
606
break;
607
case vmIntrinsics::_multiplyExactI:
608
if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
609
break;
610
case vmIntrinsics::_multiplyExactL:
611
if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
612
break;
613
614
default:
615
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
616
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
617
break;
618
}
619
620
// -XX:-InlineClassNatives disables natives from the Class class.
621
// The flag applies to all reflective calls, notably Array.newArray
622
// (visible to Java programmers as Array.newInstance).
623
if (m->holder()->name() == ciSymbol::java_lang_Class() ||
624
m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
625
if (!InlineClassNatives) return NULL;
626
}
627
628
// -XX:-InlineThreadNatives disables natives from the Thread class.
629
if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
630
if (!InlineThreadNatives) return NULL;
631
}
632
633
// -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
634
if (m->holder()->name() == ciSymbol::java_lang_Math() ||
635
m->holder()->name() == ciSymbol::java_lang_Float() ||
636
m->holder()->name() == ciSymbol::java_lang_Double()) {
637
if (!InlineMathNatives) return NULL;
638
}
639
640
// -XX:-InlineUnsafeOps disables natives from the Unsafe class.
641
if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
642
if (!InlineUnsafeOps) return NULL;
643
}
644
645
return new LibraryIntrinsic(m, is_virtual, predicates, does_virtual_dispatch, (vmIntrinsics::ID) id);
646
}
647
648
//----------------------register_library_intrinsics-----------------------
649
// Initialize this file's data structures, for each Compile instance.
650
void Compile::register_library_intrinsics() {
651
// Nothing to do here.
652
}
653
654
JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
655
LibraryCallKit kit(jvms, this);
656
Compile* C = kit.C;
657
int nodes = C->unique();
658
#ifndef PRODUCT
659
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
660
char buf[1000];
661
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
662
tty->print_cr("Intrinsic %s", str);
663
}
664
#endif
665
ciMethod* callee = kit.callee();
666
const int bci = kit.bci();
667
668
// Try to inline the intrinsic.
669
if (kit.try_to_inline(_last_predicate)) {
670
if (C->print_intrinsics() || C->print_inlining()) {
671
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
672
}
673
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
674
if (C->log()) {
675
C->log()->elem("intrinsic id='%s'%s nodes='%d'",
676
vmIntrinsics::name_at(intrinsic_id()),
677
(is_virtual() ? " virtual='1'" : ""),
678
C->unique() - nodes);
679
}
680
// Push the result from the inlined method onto the stack.
681
kit.push_result();
682
return kit.transfer_exceptions_into_jvms();
683
}
684
685
// The intrinsic bailed out
686
if (C->print_intrinsics() || C->print_inlining()) {
687
if (jvms->has_method()) {
688
// Not a root compile.
689
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
690
C->print_inlining(callee, jvms->depth() - 1, bci, msg);
691
} else {
692
// Root compile
693
tty->print("Did not generate intrinsic %s%s at bci:%d in",
694
vmIntrinsics::name_at(intrinsic_id()),
695
(is_virtual() ? " (virtual)" : ""), bci);
696
}
697
}
698
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
699
return NULL;
700
}
701
702
Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
703
LibraryCallKit kit(jvms, this);
704
Compile* C = kit.C;
705
int nodes = C->unique();
706
_last_predicate = predicate;
707
#ifndef PRODUCT
708
assert(is_predicated() && predicate < predicates_count(), "sanity");
709
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
710
char buf[1000];
711
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
712
tty->print_cr("Predicate for intrinsic %s", str);
713
}
714
#endif
715
ciMethod* callee = kit.callee();
716
const int bci = kit.bci();
717
718
Node* slow_ctl = kit.try_to_predicate(predicate);
719
if (!kit.failing()) {
720
if (C->print_intrinsics() || C->print_inlining()) {
721
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)");
722
}
723
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
724
if (C->log()) {
725
C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
726
vmIntrinsics::name_at(intrinsic_id()),
727
(is_virtual() ? " virtual='1'" : ""),
728
C->unique() - nodes);
729
}
730
return slow_ctl; // Could be NULL if the check folds.
731
}
732
733
// The intrinsic bailed out
734
if (C->print_intrinsics() || C->print_inlining()) {
735
if (jvms->has_method()) {
736
// Not a root compile.
737
const char* msg = "failed to generate predicate for intrinsic";
738
C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
739
} else {
740
// Root compile
741
C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
742
vmIntrinsics::name_at(intrinsic_id()),
743
(is_virtual() ? " (virtual)" : ""), bci);
744
}
745
}
746
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
747
return NULL;
748
}
749
750
bool LibraryCallKit::try_to_inline(int predicate) {
751
// Handle symbolic names for otherwise undistinguished boolean switches:
752
const bool is_store = true;
753
const bool is_native_ptr = true;
754
const bool is_static = true;
755
const bool is_volatile = true;
756
757
if (!jvms()->has_method()) {
758
// Root JVMState has a null method.
759
assert(map()->memory()->Opcode() == Op_Parm, "");
760
// Insert the memory aliasing node
761
set_all_memory(reset_memory());
762
}
763
assert(merged_memory(), "");
764
765
766
switch (intrinsic_id()) {
767
case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
768
case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
769
case vmIntrinsics::_getClass: return inline_native_getClass();
770
771
case vmIntrinsics::_dsin:
772
case vmIntrinsics::_dcos:
773
case vmIntrinsics::_dtan:
774
case vmIntrinsics::_dabs:
775
case vmIntrinsics::_datan2:
776
case vmIntrinsics::_dsqrt:
777
case vmIntrinsics::_dexp:
778
case vmIntrinsics::_dlog:
779
case vmIntrinsics::_dlog10:
780
case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id());
781
782
case vmIntrinsics::_min:
783
case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
784
785
case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
786
case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
787
case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
788
case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
789
case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
790
case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
791
case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
792
case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
793
case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
794
case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
795
case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
796
case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
797
798
case vmIntrinsics::_arraycopy: return inline_arraycopy();
799
800
case vmIntrinsics::_compareTo: return inline_string_compareTo();
801
case vmIntrinsics::_indexOf: return inline_string_indexOf();
802
case vmIntrinsics::_equals: return inline_string_equals();
803
804
case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile, false);
805
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
806
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile, false);
807
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, false);
808
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, false);
809
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, false);
810
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, false);
811
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile, false);
812
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false);
813
814
case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile, false);
815
case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile, false);
816
case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile, false);
817
case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, false);
818
case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, false);
819
case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, false);
820
case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, false);
821
case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile, false);
822
case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile, false);
823
824
case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile, false);
825
case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile, false);
826
case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile, false);
827
case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile, false);
828
case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile, false);
829
case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile, false);
830
case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false);
831
case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
832
833
case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile, false);
834
case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile, false);
835
case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile, false);
836
case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile, false);
837
case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile, false);
838
case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile, false);
839
case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile, false);
840
case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile, false);
841
842
case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile, false);
843
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile, false);
844
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile, false);
845
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile, false);
846
case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile, false);
847
case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile, false);
848
case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile, false);
849
case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile, false);
850
case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile, false);
851
852
case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile, false);
853
case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile, false);
854
case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile, false);
855
case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile, false);
856
case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile, false);
857
case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile, false);
858
case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile, false);
859
case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile, false);
860
case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile, false);
861
862
case vmIntrinsics::_prefetchRead: return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
863
case vmIntrinsics::_prefetchWrite: return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
864
case vmIntrinsics::_prefetchReadStatic: return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
865
case vmIntrinsics::_prefetchWriteStatic: return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
866
867
case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
868
case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg);
869
case vmIntrinsics::_compareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
870
871
case vmIntrinsics::_putOrderedObject: return inline_unsafe_ordered_store(T_OBJECT);
872
case vmIntrinsics::_putOrderedInt: return inline_unsafe_ordered_store(T_INT);
873
case vmIntrinsics::_putOrderedLong: return inline_unsafe_ordered_store(T_LONG);
874
875
case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_xadd);
876
case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_xadd);
877
case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_xchg);
878
case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_xchg);
879
case vmIntrinsics::_getAndSetObject: return inline_unsafe_load_store(T_OBJECT, LS_xchg);
880
881
case vmIntrinsics::_loadFence:
882
case vmIntrinsics::_storeFence:
883
case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
884
885
case vmIntrinsics::_currentThread: return inline_native_currentThread();
886
case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted();
887
888
#ifdef JFR_HAVE_INTRINSICS
889
case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
890
case vmIntrinsics::_getClassId: return inline_native_classID();
891
case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
892
#endif
893
case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
894
case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
895
case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
896
case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
897
case vmIntrinsics::_newArray: return inline_native_newArray();
898
case vmIntrinsics::_getLength: return inline_native_getLength();
899
case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
900
case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
901
case vmIntrinsics::_equalsC: return inline_array_equals();
902
case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
903
904
case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
905
906
case vmIntrinsics::_isInstance:
907
case vmIntrinsics::_getModifiers:
908
case vmIntrinsics::_isInterface:
909
case vmIntrinsics::_isArray:
910
case vmIntrinsics::_isPrimitive:
911
case vmIntrinsics::_getSuperclass:
912
case vmIntrinsics::_getComponentType:
913
case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
914
915
case vmIntrinsics::_floatToRawIntBits:
916
case vmIntrinsics::_floatToIntBits:
917
case vmIntrinsics::_intBitsToFloat:
918
case vmIntrinsics::_doubleToRawLongBits:
919
case vmIntrinsics::_doubleToLongBits:
920
case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
921
922
case vmIntrinsics::_numberOfLeadingZeros_i:
923
case vmIntrinsics::_numberOfLeadingZeros_l:
924
case vmIntrinsics::_numberOfTrailingZeros_i:
925
case vmIntrinsics::_numberOfTrailingZeros_l:
926
case vmIntrinsics::_bitCount_i:
927
case vmIntrinsics::_bitCount_l:
928
case vmIntrinsics::_reverseBytes_i:
929
case vmIntrinsics::_reverseBytes_l:
930
case vmIntrinsics::_reverseBytes_s:
931
case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
932
933
case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
934
935
case vmIntrinsics::_Reference_get: return inline_reference_get();
936
937
case vmIntrinsics::_aescrypt_encryptBlock:
938
case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
939
940
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
941
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
942
return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
943
944
case vmIntrinsics::_sha_implCompress:
945
case vmIntrinsics::_sha2_implCompress:
946
case vmIntrinsics::_sha5_implCompress:
947
return inline_sha_implCompress(intrinsic_id());
948
949
case vmIntrinsics::_digestBase_implCompressMB:
950
return inline_digestBase_implCompressMB(predicate);
951
952
case vmIntrinsics::_multiplyToLen:
953
return inline_multiplyToLen();
954
955
case vmIntrinsics::_squareToLen:
956
return inline_squareToLen();
957
958
case vmIntrinsics::_mulAdd:
959
return inline_mulAdd();
960
961
case vmIntrinsics::_montgomeryMultiply:
962
return inline_montgomeryMultiply();
963
case vmIntrinsics::_montgomerySquare:
964
return inline_montgomerySquare();
965
966
case vmIntrinsics::_ghash_processBlocks:
967
return inline_ghash_processBlocks();
968
969
case vmIntrinsics::_encodeISOArray:
970
return inline_encodeISOArray();
971
972
case vmIntrinsics::_updateCRC32:
973
return inline_updateCRC32();
974
case vmIntrinsics::_updateBytesCRC32:
975
return inline_updateBytesCRC32();
976
case vmIntrinsics::_updateByteBufferCRC32:
977
return inline_updateByteBufferCRC32();
978
979
case vmIntrinsics::_profileBoolean:
980
return inline_profileBoolean();
981
982
default:
983
// If you get here, it may be that someone has added a new intrinsic
984
// to the list in vmSymbols.hpp without implementing it here.
985
#ifndef PRODUCT
986
if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
987
tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
988
vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
989
}
990
#endif
991
return false;
992
}
993
}
994
995
Node* LibraryCallKit::try_to_predicate(int predicate) {
996
if (!jvms()->has_method()) {
997
// Root JVMState has a null method.
998
assert(map()->memory()->Opcode() == Op_Parm, "");
999
// Insert the memory aliasing node
1000
set_all_memory(reset_memory());
1001
}
1002
assert(merged_memory(), "");
1003
1004
switch (intrinsic_id()) {
1005
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
1006
return inline_cipherBlockChaining_AESCrypt_predicate(false);
1007
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
1008
return inline_cipherBlockChaining_AESCrypt_predicate(true);
1009
case vmIntrinsics::_digestBase_implCompressMB:
1010
return inline_digestBase_implCompressMB_predicate(predicate);
1011
1012
default:
1013
// If you get here, it may be that someone has added a new intrinsic
1014
// to the list in vmSymbols.hpp without implementing it here.
1015
#ifndef PRODUCT
1016
if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
1017
tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
1018
vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
1019
}
1020
#endif
1021
Node* slow_ctl = control();
1022
set_control(top()); // No fast path instrinsic
1023
return slow_ctl;
1024
}
1025
}
1026
1027
//------------------------------set_result-------------------------------
1028
// Helper function for finishing intrinsics.
1029
void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
1030
record_for_igvn(region);
1031
set_control(_gvn.transform(region));
1032
set_result( _gvn.transform(value));
1033
assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
1034
}
1035
1036
//------------------------------generate_guard---------------------------
1037
// Helper function for generating guarded fast-slow graph structures.
1038
// The given 'test', if true, guards a slow path. If the test fails
1039
// then a fast path can be taken. (We generally hope it fails.)
1040
// In all cases, GraphKit::control() is updated to the fast path.
1041
// The returned value represents the control for the slow path.
1042
// The return value is never 'top'; it is either a valid control
1043
// or NULL if it is obvious that the slow path can never be taken.
1044
// Also, if region and the slow control are not NULL, the slow edge
1045
// is appended to the region.
1046
Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
1047
if (stopped()) {
1048
// Already short circuited.
1049
return NULL;
1050
}
1051
1052
// Build an if node and its projections.
1053
// If test is true we take the slow path, which we assume is uncommon.
1054
if (_gvn.type(test) == TypeInt::ZERO) {
1055
// The slow branch is never taken. No need to build this guard.
1056
return NULL;
1057
}
1058
1059
IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
1060
1061
Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff));
1062
if (if_slow == top()) {
1063
// The slow branch is never taken. No need to build this guard.
1064
return NULL;
1065
}
1066
1067
if (region != NULL)
1068
region->add_req(if_slow);
1069
1070
Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff));
1071
set_control(if_fast);
1072
1073
return if_slow;
1074
}
1075
1076
inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
1077
return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
1078
}
1079
inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
1080
return generate_guard(test, region, PROB_FAIR);
1081
}
1082
1083
inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
1084
Node* *pos_index) {
1085
if (stopped())
1086
return NULL; // already stopped
1087
if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
1088
return NULL; // index is already adequately typed
1089
Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0)));
1090
Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
1091
Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
1092
if (is_neg != NULL && pos_index != NULL) {
1093
// Emulate effect of Parse::adjust_map_after_if.
1094
Node* ccast = new (C) CastIINode(index, TypeInt::POS);
1095
ccast->set_req(0, control());
1096
(*pos_index) = _gvn.transform(ccast);
1097
}
1098
return is_neg;
1099
}
1100
1101
inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
1102
Node* *pos_index) {
1103
if (stopped())
1104
return NULL; // already stopped
1105
if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
1106
return NULL; // index is already adequately typed
1107
Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0)));
1108
BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
1109
Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq));
1110
Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
1111
if (is_notp != NULL && pos_index != NULL) {
1112
// Emulate effect of Parse::adjust_map_after_if.
1113
Node* ccast = new (C) CastIINode(index, TypeInt::POS1);
1114
ccast->set_req(0, control());
1115
(*pos_index) = _gvn.transform(ccast);
1116
}
1117
return is_notp;
1118
}
1119
1120
// Make sure that 'position' is a valid limit index, in [0..length].
1121
// There are two equivalent plans for checking this:
1122
// A. (offset + copyLength) unsigned<= arrayLength
1123
// B. offset <= (arrayLength - copyLength)
1124
// We require that all of the values above, except for the sum and
1125
// difference, are already known to be non-negative.
1126
// Plan A is robust in the face of overflow, if offset and copyLength
1127
// are both hugely positive.
1128
//
1129
// Plan B is less direct and intuitive, but it does not overflow at
1130
// all, since the difference of two non-negatives is always
1131
// representable. Whenever Java methods must perform the equivalent
1132
// check they generally use Plan B instead of Plan A.
1133
// For the moment we use Plan A.
1134
inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
1135
Node* subseq_length,
1136
Node* array_length,
1137
RegionNode* region) {
1138
if (stopped())
1139
return NULL; // already stopped
1140
bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
1141
if (zero_offset && subseq_length->eqv_uncast(array_length))
1142
return NULL; // common case of whole-array copy
1143
Node* last = subseq_length;
1144
if (!zero_offset) // last += offset
1145
last = _gvn.transform(new (C) AddINode(last, offset));
1146
Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last));
1147
Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
1148
Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
1149
return is_over;
1150
}
1151
1152
1153
//--------------------------generate_current_thread--------------------
1154
Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1155
ciKlass* thread_klass = env()->Thread_klass();
1156
const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1157
Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1158
Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1159
Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1160
tls_output = thread;
1161
return threadObj;
1162
}
1163
1164
1165
//------------------------------make_string_method_node------------------------
1166
// Helper method for String intrinsic functions. This version is called
1167
// with str1 and str2 pointing to String object nodes.
1168
//
1169
Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
1170
Node* no_ctrl = NULL;
1171
1172
// Get start addr of string
1173
Node* str1_value = load_String_value(no_ctrl, str1);
1174
Node* str1_offset = load_String_offset(no_ctrl, str1);
1175
Node* str1_start = array_element_address(str1_value, str1_offset, T_CHAR);
1176
1177
// Get length of string 1
1178
Node* str1_len = load_String_length(no_ctrl, str1);
1179
1180
Node* str2_value = load_String_value(no_ctrl, str2);
1181
Node* str2_offset = load_String_offset(no_ctrl, str2);
1182
Node* str2_start = array_element_address(str2_value, str2_offset, T_CHAR);
1183
1184
Node* str2_len = NULL;
1185
Node* result = NULL;
1186
1187
switch (opcode) {
1188
case Op_StrIndexOf:
1189
// Get length of string 2
1190
str2_len = load_String_length(no_ctrl, str2);
1191
1192
result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1193
str1_start, str1_len, str2_start, str2_len);
1194
break;
1195
case Op_StrComp:
1196
// Get length of string 2
1197
str2_len = load_String_length(no_ctrl, str2);
1198
1199
result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1200
str1_start, str1_len, str2_start, str2_len);
1201
break;
1202
case Op_StrEquals:
1203
result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
1204
str1_start, str2_start, str1_len);
1205
break;
1206
default:
1207
ShouldNotReachHere();
1208
return NULL;
1209
}
1210
1211
// All these intrinsics have checks.
1212
C->set_has_split_ifs(true); // Has chance for split-if optimization
1213
1214
return _gvn.transform(result);
1215
}
1216
1217
// Helper method for String intrinsic functions. This version is called
1218
// with str1 and str2 pointing to char[] nodes, with cnt1 and cnt2 pointing
1219
// to Int nodes containing the lenghts of str1 and str2.
1220
//
1221
Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
1222
Node* result = NULL;
1223
switch (opcode) {
1224
case Op_StrIndexOf:
1225
result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1226
str1_start, cnt1, str2_start, cnt2);
1227
break;
1228
case Op_StrComp:
1229
result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1230
str1_start, cnt1, str2_start, cnt2);
1231
break;
1232
case Op_StrEquals:
1233
result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
1234
str1_start, str2_start, cnt1);
1235
break;
1236
default:
1237
ShouldNotReachHere();
1238
return NULL;
1239
}
1240
1241
// All these intrinsics have checks.
1242
C->set_has_split_ifs(true); // Has chance for split-if optimization
1243
1244
return _gvn.transform(result);
1245
}
1246
1247
//------------------------------inline_string_compareTo------------------------
1248
// public int java.lang.String.compareTo(String anotherString);
1249
bool LibraryCallKit::inline_string_compareTo() {
1250
Node* receiver = null_check(argument(0));
1251
Node* arg = null_check(argument(1));
1252
if (stopped()) {
1253
return true;
1254
}
1255
set_result(make_string_method_node(Op_StrComp, receiver, arg));
1256
return true;
1257
}
1258
1259
//------------------------------inline_string_equals------------------------
1260
bool LibraryCallKit::inline_string_equals() {
1261
Node* receiver = null_check_receiver();
1262
// NOTE: Do not null check argument for String.equals() because spec
1263
// allows to specify NULL as argument.
1264
Node* argument = this->argument(1);
1265
if (stopped()) {
1266
return true;
1267
}
1268
1269
// paths (plus control) merge
1270
RegionNode* region = new (C) RegionNode(5);
1271
Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
1272
1273
// does source == target string?
1274
Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
1275
Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
1276
1277
Node* if_eq = generate_slow_guard(bol, NULL);
1278
if (if_eq != NULL) {
1279
// receiver == argument
1280
phi->init_req(2, intcon(1));
1281
region->init_req(2, if_eq);
1282
}
1283
1284
// get String klass for instanceOf
1285
ciInstanceKlass* klass = env()->String_klass();
1286
1287
if (!stopped()) {
1288
Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1289
Node* cmp = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
1290
Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
1291
1292
Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1293
//instanceOf == true, fallthrough
1294
1295
if (inst_false != NULL) {
1296
phi->init_req(3, intcon(0));
1297
region->init_req(3, inst_false);
1298
}
1299
}
1300
1301
if (!stopped()) {
1302
const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1303
1304
// Properly cast the argument to String
1305
argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
1306
// This path is taken only when argument's type is String:NotNull.
1307
argument = cast_not_null(argument, false);
1308
1309
Node* no_ctrl = NULL;
1310
1311
// Get start addr of receiver
1312
Node* receiver_val = load_String_value(no_ctrl, receiver);
1313
Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1314
Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1315
1316
// Get length of receiver
1317
Node* receiver_cnt = load_String_length(no_ctrl, receiver);
1318
1319
// Get start addr of argument
1320
Node* argument_val = load_String_value(no_ctrl, argument);
1321
Node* argument_offset = load_String_offset(no_ctrl, argument);
1322
Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1323
1324
// Get length of argument
1325
Node* argument_cnt = load_String_length(no_ctrl, argument);
1326
1327
// Check for receiver count != argument count
1328
Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
1329
Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
1330
Node* if_ne = generate_slow_guard(bol, NULL);
1331
if (if_ne != NULL) {
1332
phi->init_req(4, intcon(0));
1333
region->init_req(4, if_ne);
1334
}
1335
1336
// Check for count == 0 is done by assembler code for StrEquals.
1337
1338
if (!stopped()) {
1339
Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1340
phi->init_req(1, equals);
1341
region->init_req(1, control());
1342
}
1343
}
1344
1345
// post merge
1346
set_control(_gvn.transform(region));
1347
record_for_igvn(region);
1348
1349
set_result(_gvn.transform(phi));
1350
return true;
1351
}
1352
1353
//------------------------------inline_array_equals----------------------------
1354
bool LibraryCallKit::inline_array_equals() {
1355
Node* arg1 = argument(0);
1356
Node* arg2 = argument(1);
1357
set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1358
return true;
1359
}
1360
1361
// Java version of String.indexOf(constant string)
1362
// class StringDecl {
1363
// StringDecl(char[] ca) {
1364
// offset = 0;
1365
// count = ca.length;
1366
// value = ca;
1367
// }
1368
// int offset;
1369
// int count;
1370
// char[] value;
1371
// }
1372
//
1373
// static int string_indexOf_J(StringDecl string_object, char[] target_object,
1374
// int targetOffset, int cache_i, int md2) {
1375
// int cache = cache_i;
1376
// int sourceOffset = string_object.offset;
1377
// int sourceCount = string_object.count;
1378
// int targetCount = target_object.length;
1379
//
1380
// int targetCountLess1 = targetCount - 1;
1381
// int sourceEnd = sourceOffset + sourceCount - targetCountLess1;
1382
//
1383
// char[] source = string_object.value;
1384
// char[] target = target_object;
1385
// int lastChar = target[targetCountLess1];
1386
//
1387
// outer_loop:
1388
// for (int i = sourceOffset; i < sourceEnd; ) {
1389
// int src = source[i + targetCountLess1];
1390
// if (src == lastChar) {
1391
// // With random strings and a 4-character alphabet,
1392
// // reverse matching at this point sets up 0.8% fewer
1393
// // frames, but (paradoxically) makes 0.3% more probes.
1394
// // Since those probes are nearer the lastChar probe,
1395
// // there is may be a net D$ win with reverse matching.
1396
// // But, reversing loop inhibits unroll of inner loop
1397
// // for unknown reason. So, does running outer loop from
1398
// // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount)
1399
// for (int j = 0; j < targetCountLess1; j++) {
1400
// if (target[targetOffset + j] != source[i+j]) {
1401
// if ((cache & (1 << source[i+j])) == 0) {
1402
// if (md2 < j+1) {
1403
// i += j+1;
1404
// continue outer_loop;
1405
// }
1406
// }
1407
// i += md2;
1408
// continue outer_loop;
1409
// }
1410
// }
1411
// return i - sourceOffset;
1412
// }
1413
// if ((cache & (1 << src)) == 0) {
1414
// i += targetCountLess1;
1415
// } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1416
// i++;
1417
// }
1418
// return -1;
1419
// }
1420
1421
//------------------------------string_indexOf------------------------
1422
Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1423
jint cache_i, jint md2_i) {
1424
1425
Node* no_ctrl = NULL;
1426
float likely = PROB_LIKELY(0.9);
1427
float unlikely = PROB_UNLIKELY(0.9);
1428
1429
const int nargs = 0; // no arguments to push back for uncommon trap in predicate
1430
1431
Node* source = load_String_value(no_ctrl, string_object);
1432
Node* sourceOffset = load_String_offset(no_ctrl, string_object);
1433
Node* sourceCount = load_String_length(no_ctrl, string_object);
1434
1435
Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1436
jint target_length = target_array->length();
1437
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1438
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1439
1440
// String.value field is known to be @Stable.
1441
if (UseImplicitStableValues) {
1442
target = cast_array_to_stable(target, target_type);
1443
}
1444
1445
IdealKit kit(this, false, true);
1446
#define __ kit.
1447
Node* zero = __ ConI(0);
1448
Node* one = __ ConI(1);
1449
Node* cache = __ ConI(cache_i);
1450
Node* md2 = __ ConI(md2_i);
1451
Node* lastChar = __ ConI(target_array->char_at(target_length - 1));
1452
Node* targetCount = __ ConI(target_length);
1453
Node* targetCountLess1 = __ ConI(target_length - 1);
1454
Node* targetOffset = __ ConI(targetOffset_i);
1455
Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1456
1457
IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1458
Node* outer_loop = __ make_label(2 /* goto */);
1459
Node* return_ = __ make_label(1);
1460
1461
__ set(rtn,__ ConI(-1));
1462
__ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
1463
Node* i2 = __ AddI(__ value(i), targetCountLess1);
1464
// pin to prohibit loading of "next iteration" value which may SEGV (rare)
1465
Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
1466
__ if_then(src, BoolTest::eq, lastChar, unlikely); {
1467
__ loop(this, nargs, j, zero, BoolTest::lt, targetCountLess1); {
1468
Node* tpj = __ AddI(targetOffset, __ value(j));
1469
Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
1470
Node* ipj = __ AddI(__ value(i), __ value(j));
1471
Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS);
1472
__ if_then(targ, BoolTest::ne, src2); {
1473
__ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); {
1474
__ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); {
1475
__ increment(i, __ AddI(__ value(j), one));
1476
__ goto_(outer_loop);
1477
} __ end_if(); __ dead(j);
1478
}__ end_if(); __ dead(j);
1479
__ increment(i, md2);
1480
__ goto_(outer_loop);
1481
}__ end_if();
1482
__ increment(j, one);
1483
}__ end_loop(); __ dead(j);
1484
__ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i);
1485
__ goto_(return_);
1486
}__ end_if();
1487
__ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); {
1488
__ increment(i, targetCountLess1);
1489
}__ end_if();
1490
__ increment(i, one);
1491
__ bind(outer_loop);
1492
}__ end_loop(); __ dead(i);
1493
__ bind(return_);
1494
1495
// Final sync IdealKit and GraphKit.
1496
final_sync(kit);
1497
Node* result = __ value(rtn);
1498
#undef __
1499
C->set_has_loops(true);
1500
return result;
1501
}
1502
1503
//------------------------------inline_string_indexOf------------------------
1504
bool LibraryCallKit::inline_string_indexOf() {
1505
Node* receiver = argument(0);
1506
Node* arg = argument(1);
1507
1508
Node* result;
1509
// Disable the use of pcmpestri until it can be guaranteed that
1510
// the load doesn't cross into the uncommited space.
1511
if (Matcher::has_match_rule(Op_StrIndexOf) &&
1512
UseSSE42Intrinsics) {
1513
// Generate SSE4.2 version of indexOf
1514
// We currently only have match rules that use SSE4.2
1515
1516
receiver = null_check(receiver);
1517
arg = null_check(arg);
1518
if (stopped()) {
1519
return true;
1520
}
1521
1522
ciInstanceKlass* str_klass = env()->String_klass();
1523
const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass);
1524
1525
// Make the merge point
1526
RegionNode* result_rgn = new (C) RegionNode(4);
1527
Node* result_phi = new (C) PhiNode(result_rgn, TypeInt::INT);
1528
Node* no_ctrl = NULL;
1529
1530
// Get start addr of source string
1531
Node* source = load_String_value(no_ctrl, receiver);
1532
Node* source_offset = load_String_offset(no_ctrl, receiver);
1533
Node* source_start = array_element_address(source, source_offset, T_CHAR);
1534
1535
// Get length of source string
1536
Node* source_cnt = load_String_length(no_ctrl, receiver);
1537
1538
// Get start addr of substring
1539
Node* substr = load_String_value(no_ctrl, arg);
1540
Node* substr_offset = load_String_offset(no_ctrl, arg);
1541
Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
1542
1543
// Get length of source string
1544
Node* substr_cnt = load_String_length(no_ctrl, arg);
1545
1546
// Check for substr count > string count
1547
Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt));
1548
Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt));
1549
Node* if_gt = generate_slow_guard(bol, NULL);
1550
if (if_gt != NULL) {
1551
result_phi->init_req(2, intcon(-1));
1552
result_rgn->init_req(2, if_gt);
1553
}
1554
1555
if (!stopped()) {
1556
// Check for substr count == 0
1557
cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0)));
1558
bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
1559
Node* if_zero = generate_slow_guard(bol, NULL);
1560
if (if_zero != NULL) {
1561
result_phi->init_req(3, intcon(0));
1562
result_rgn->init_req(3, if_zero);
1563
}
1564
}
1565
1566
if (!stopped()) {
1567
result = make_string_method_node(Op_StrIndexOf, source_start, source_cnt, substr_start, substr_cnt);
1568
result_phi->init_req(1, result);
1569
result_rgn->init_req(1, control());
1570
}
1571
set_control(_gvn.transform(result_rgn));
1572
record_for_igvn(result_rgn);
1573
result = _gvn.transform(result_phi);
1574
1575
} else { // Use LibraryCallKit::string_indexOf
1576
// don't intrinsify if argument isn't a constant string.
1577
if (!arg->is_Con()) {
1578
return false;
1579
}
1580
const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr();
1581
if (str_type == NULL) {
1582
return false;
1583
}
1584
ciInstanceKlass* klass = env()->String_klass();
1585
ciObject* str_const = str_type->const_oop();
1586
if (str_const == NULL || str_const->klass() != klass) {
1587
return false;
1588
}
1589
ciInstance* str = str_const->as_instance();
1590
assert(str != NULL, "must be instance");
1591
1592
ciObject* v = str->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
1593
ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1594
1595
int o;
1596
int c;
1597
if (java_lang_String::has_offset_field()) {
1598
o = str->field_value_by_offset(java_lang_String::offset_offset_in_bytes()).as_int();
1599
c = str->field_value_by_offset(java_lang_String::count_offset_in_bytes()).as_int();
1600
} else {
1601
o = 0;
1602
c = pat->length();
1603
}
1604
1605
// constant strings have no offset and count == length which
1606
// simplifies the resulting code somewhat so lets optimize for that.
1607
if (o != 0 || c != pat->length()) {
1608
return false;
1609
}
1610
1611
receiver = null_check(receiver, T_OBJECT);
1612
// NOTE: No null check on the argument is needed since it's a constant String oop.
1613
if (stopped()) {
1614
return true;
1615
}
1616
1617
// The null string as a pattern always returns 0 (match at beginning of string)
1618
if (c == 0) {
1619
set_result(intcon(0));
1620
return true;
1621
}
1622
1623
// Generate default indexOf
1624
jchar lastChar = pat->char_at(o + (c - 1));
1625
int cache = 0;
1626
int i;
1627
for (i = 0; i < c - 1; i++) {
1628
assert(i < pat->length(), "out of range");
1629
cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1630
}
1631
1632
int md2 = c;
1633
for (i = 0; i < c - 1; i++) {
1634
assert(i < pat->length(), "out of range");
1635
if (pat->char_at(o + i) == lastChar) {
1636
md2 = (c - 1) - i;
1637
}
1638
}
1639
1640
result = string_indexOf(receiver, pat, o, cache, md2);
1641
}
1642
set_result(result);
1643
return true;
1644
}
1645
1646
//--------------------------round_double_node--------------------------------
1647
// Round a double node if necessary.
1648
Node* LibraryCallKit::round_double_node(Node* n) {
1649
if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1650
n = _gvn.transform(new (C) RoundDoubleNode(0, n));
1651
return n;
1652
}
1653
1654
//------------------------------inline_math-----------------------------------
1655
// public static double Math.abs(double)
1656
// public static double Math.sqrt(double)
1657
// public static double Math.log(double)
1658
// public static double Math.log10(double)
1659
bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1660
Node* arg = round_double_node(argument(0));
1661
Node* n = NULL;
1662
switch (id) {
1663
case vmIntrinsics::_dabs: n = new (C) AbsDNode( arg); break;
1664
case vmIntrinsics::_dsqrt: n = new (C) SqrtDNode(C, control(), arg); break;
1665
case vmIntrinsics::_dlog: n = new (C) LogDNode(C, control(), arg); break;
1666
case vmIntrinsics::_dlog10: n = new (C) Log10DNode(C, control(), arg); break;
1667
default: fatal_unexpected_iid(id); break;
1668
}
1669
set_result(_gvn.transform(n));
1670
return true;
1671
}
1672
1673
//------------------------------inline_trig----------------------------------
1674
// Inline sin/cos/tan instructions, if possible. If rounding is required, do
1675
// argument reduction which will turn into a fast/slow diamond.
1676
bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1677
Node* arg = round_double_node(argument(0));
1678
Node* n = NULL;
1679
1680
switch (id) {
1681
case vmIntrinsics::_dsin: n = new (C) SinDNode(C, control(), arg); break;
1682
case vmIntrinsics::_dcos: n = new (C) CosDNode(C, control(), arg); break;
1683
case vmIntrinsics::_dtan: n = new (C) TanDNode(C, control(), arg); break;
1684
default: fatal_unexpected_iid(id); break;
1685
}
1686
n = _gvn.transform(n);
1687
1688
// Rounding required? Check for argument reduction!
1689
if (Matcher::strict_fp_requires_explicit_rounding) {
1690
static const double pi_4 = 0.7853981633974483;
1691
static const double neg_pi_4 = -0.7853981633974483;
1692
// pi/2 in 80-bit extended precision
1693
// static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
1694
// -pi/2 in 80-bit extended precision
1695
// static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
1696
// Cutoff value for using this argument reduction technique
1697
//static const double pi_2_minus_epsilon = 1.564660403643354;
1698
//static const double neg_pi_2_plus_epsilon = -1.564660403643354;
1699
1700
// Pseudocode for sin:
1701
// if (x <= Math.PI / 4.0) {
1702
// if (x >= -Math.PI / 4.0) return fsin(x);
1703
// if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
1704
// } else {
1705
// if (x <= Math.PI / 2.0) return fcos(x - Math.PI / 2.0);
1706
// }
1707
// return StrictMath.sin(x);
1708
1709
// Pseudocode for cos:
1710
// if (x <= Math.PI / 4.0) {
1711
// if (x >= -Math.PI / 4.0) return fcos(x);
1712
// if (x >= -Math.PI / 2.0) return fsin(x + Math.PI / 2.0);
1713
// } else {
1714
// if (x <= Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
1715
// }
1716
// return StrictMath.cos(x);
1717
1718
// Actually, sticking in an 80-bit Intel value into C2 will be tough; it
1719
// requires a special machine instruction to load it. Instead we'll try
1720
// the 'easy' case. If we really need the extra range +/- PI/2 we'll
1721
// probably do the math inside the SIN encoding.
1722
1723
// Make the merge point
1724
RegionNode* r = new (C) RegionNode(3);
1725
Node* phi = new (C) PhiNode(r, Type::DOUBLE);
1726
1727
// Flatten arg so we need only 1 test
1728
Node *abs = _gvn.transform(new (C) AbsDNode(arg));
1729
// Node for PI/4 constant
1730
Node *pi4 = makecon(TypeD::make(pi_4));
1731
// Check PI/4 : abs(arg)
1732
Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs));
1733
// Check: If PI/4 < abs(arg) then go slow
1734
Node *bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::lt ));
1735
// Branch either way
1736
IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1737
set_control(opt_iff(r,iff));
1738
1739
// Set fast path result
1740
phi->init_req(2, n);
1741
1742
// Slow path - non-blocking leaf call
1743
Node* call = NULL;
1744
switch (id) {
1745
case vmIntrinsics::_dsin:
1746
call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1747
CAST_FROM_FN_PTR(address, SharedRuntime::dsin),
1748
"Sin", NULL, arg, top());
1749
break;
1750
case vmIntrinsics::_dcos:
1751
call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1752
CAST_FROM_FN_PTR(address, SharedRuntime::dcos),
1753
"Cos", NULL, arg, top());
1754
break;
1755
case vmIntrinsics::_dtan:
1756
call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1757
CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1758
"Tan", NULL, arg, top());
1759
break;
1760
}
1761
assert(control()->in(0) == call, "");
1762
Node* slow_result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
1763
r->init_req(1, control());
1764
phi->init_req(1, slow_result);
1765
1766
// Post-merge
1767
set_control(_gvn.transform(r));
1768
record_for_igvn(r);
1769
n = _gvn.transform(phi);
1770
1771
C->set_has_split_ifs(true); // Has chance for split-if optimization
1772
}
1773
set_result(n);
1774
return true;
1775
}
1776
1777
Node* LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
1778
//-------------------
1779
//result=(result.isNaN())? funcAddr():result;
1780
// Check: If isNaN() by checking result!=result? then either trap
1781
// or go to runtime
1782
Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result, result));
1783
// Build the boolean node
1784
Node* bolisnum = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::eq));
1785
1786
if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1787
{ BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1788
// The pow or exp intrinsic returned a NaN, which requires a call
1789
// to the runtime. Recompile with the runtime call.
1790
uncommon_trap(Deoptimization::Reason_intrinsic,
1791
Deoptimization::Action_make_not_entrant);
1792
}
1793
return result;
1794
} else {
1795
// If this inlining ever returned NaN in the past, we compile a call
1796
// to the runtime to properly handle corner cases
1797
1798
IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1799
Node* if_slow = _gvn.transform(new (C) IfFalseNode(iff));
1800
Node* if_fast = _gvn.transform(new (C) IfTrueNode(iff));
1801
1802
if (!if_slow->is_top()) {
1803
RegionNode* result_region = new (C) RegionNode(3);
1804
PhiNode* result_val = new (C) PhiNode(result_region, Type::DOUBLE);
1805
1806
result_region->init_req(1, if_fast);
1807
result_val->init_req(1, result);
1808
1809
set_control(if_slow);
1810
1811
const TypePtr* no_memory_effects = NULL;
1812
Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1813
no_memory_effects,
1814
x, top(), y, y ? top() : NULL);
1815
Node* value = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+0));
1816
#ifdef ASSERT
1817
Node* value_top = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+1));
1818
assert(value_top == top(), "second value must be top");
1819
#endif
1820
1821
result_region->init_req(2, control());
1822
result_val->init_req(2, value);
1823
set_control(_gvn.transform(result_region));
1824
return _gvn.transform(result_val);
1825
} else {
1826
return result;
1827
}
1828
}
1829
}
1830
1831
//------------------------------inline_exp-------------------------------------
1832
// Inline exp instructions, if possible. The Intel hardware only misses
1833
// really odd corner cases (+/- Infinity). Just uncommon-trap them.
1834
bool LibraryCallKit::inline_exp() {
1835
Node* arg = round_double_node(argument(0));
1836
Node* n = _gvn.transform(new (C) ExpDNode(C, control(), arg));
1837
1838
n = finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1839
set_result(n);
1840
1841
C->set_has_split_ifs(true); // Has chance for split-if optimization
1842
return true;
1843
}
1844
1845
//------------------------------inline_pow-------------------------------------
1846
// Inline power instructions, if possible.
1847
bool LibraryCallKit::inline_pow() {
1848
// Pseudocode for pow
1849
// if (y == 2) {
1850
// return x * x;
1851
// } else {
1852
// if (x <= 0.0) {
1853
// long longy = (long)y;
1854
// if ((double)longy == y) { // if y is long
1855
// if (y + 1 == y) longy = 0; // huge number: even
1856
// result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
1857
// } else {
1858
// result = NaN;
1859
// }
1860
// } else {
1861
// result = DPow(x,y);
1862
// }
1863
// if (result != result)? {
1864
// result = uncommon_trap() or runtime_call();
1865
// }
1866
// return result;
1867
// }
1868
1869
Node* x = round_double_node(argument(0));
1870
Node* y = round_double_node(argument(2));
1871
1872
Node* result = NULL;
1873
1874
Node* const_two_node = makecon(TypeD::make(2.0));
1875
Node* cmp_node = _gvn.transform(new (C) CmpDNode(y, const_two_node));
1876
Node* bool_node = _gvn.transform(new (C) BoolNode(cmp_node, BoolTest::eq));
1877
IfNode* if_node = create_and_xform_if(control(), bool_node, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1878
Node* if_true = _gvn.transform(new (C) IfTrueNode(if_node));
1879
Node* if_false = _gvn.transform(new (C) IfFalseNode(if_node));
1880
1881
RegionNode* region_node = new (C) RegionNode(3);
1882
region_node->init_req(1, if_true);
1883
1884
Node* phi_node = new (C) PhiNode(region_node, Type::DOUBLE);
1885
// special case for x^y where y == 2, we can convert it to x * x
1886
phi_node->init_req(1, _gvn.transform(new (C) MulDNode(x, x)));
1887
1888
// set control to if_false since we will now process the false branch
1889
set_control(if_false);
1890
1891
if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1892
// Short form: skip the fancy tests and just check for NaN result.
1893
result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
1894
} else {
1895
// If this inlining ever returned NaN in the past, include all
1896
// checks + call to the runtime.
1897
1898
// Set the merge point for If node with condition of (x <= 0.0)
1899
// There are four possible paths to region node and phi node
1900
RegionNode *r = new (C) RegionNode(4);
1901
Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1902
1903
// Build the first if node: if (x <= 0.0)
1904
// Node for 0 constant
1905
Node *zeronode = makecon(TypeD::ZERO);
1906
// Check x:0
1907
Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode));
1908
// Check: If (x<=0) then go complex path
1909
Node *bol1 = _gvn.transform(new (C) BoolNode( cmp, BoolTest::le ));
1910
// Branch either way
1911
IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1912
// Fast path taken; set region slot 3
1913
Node *fast_taken = _gvn.transform(new (C) IfFalseNode(if1));
1914
r->init_req(3,fast_taken); // Capture fast-control
1915
1916
// Fast path not-taken, i.e. slow path
1917
Node *complex_path = _gvn.transform(new (C) IfTrueNode(if1));
1918
1919
// Set fast path result
1920
Node *fast_result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
1921
phi->init_req(3, fast_result);
1922
1923
// Complex path
1924
// Build the second if node (if y is long)
1925
// Node for (long)y
1926
Node *longy = _gvn.transform(new (C) ConvD2LNode(y));
1927
// Node for (double)((long) y)
1928
Node *doublelongy= _gvn.transform(new (C) ConvL2DNode(longy));
1929
// Check (double)((long) y) : y
1930
Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y));
1931
// Check if (y isn't long) then go to slow path
1932
1933
Node *bol2 = _gvn.transform(new (C) BoolNode( cmplongy, BoolTest::ne ));
1934
// Branch either way
1935
IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1936
Node* ylong_path = _gvn.transform(new (C) IfFalseNode(if2));
1937
1938
Node *slow_path = _gvn.transform(new (C) IfTrueNode(if2));
1939
1940
// Calculate DPow(abs(x), y)*(1 & (long)y)
1941
// Node for constant 1
1942
Node *conone = longcon(1);
1943
// 1& (long)y
1944
Node *signnode= _gvn.transform(new (C) AndLNode(conone, longy));
1945
1946
// A huge number is always even. Detect a huge number by checking
1947
// if y + 1 == y and set integer to be tested for parity to 0.
1948
// Required for corner case:
1949
// (long)9.223372036854776E18 = max_jlong
1950
// (double)(long)9.223372036854776E18 = 9.223372036854776E18
1951
// max_jlong is odd but 9.223372036854776E18 is even
1952
Node* yplus1 = _gvn.transform(new (C) AddDNode(y, makecon(TypeD::make(1))));
1953
Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y));
1954
Node *bolyplus1 = _gvn.transform(new (C) BoolNode( cmpyplus1, BoolTest::eq ));
1955
Node* correctedsign = NULL;
1956
if (ConditionalMoveLimit != 0) {
1957
correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
1958
} else {
1959
IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
1960
RegionNode *r = new (C) RegionNode(3);
1961
Node *phi = new (C) PhiNode(r, TypeLong::LONG);
1962
r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyplus1)));
1963
r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyplus1)));
1964
phi->init_req(1, signnode);
1965
phi->init_req(2, longcon(0));
1966
correctedsign = _gvn.transform(phi);
1967
ylong_path = _gvn.transform(r);
1968
record_for_igvn(r);
1969
}
1970
1971
// zero node
1972
Node *conzero = longcon(0);
1973
// Check (1&(long)y)==0?
1974
Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero));
1975
// Check if (1&(long)y)!=0?, if so the result is negative
1976
Node *bol3 = _gvn.transform(new (C) BoolNode( cmpeq1, BoolTest::ne ));
1977
// abs(x)
1978
Node *absx=_gvn.transform(new (C) AbsDNode(x));
1979
// abs(x)^y
1980
Node *absxpowy = _gvn.transform(new (C) PowDNode(C, control(), absx, y));
1981
// -abs(x)^y
1982
Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
1983
// (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1984
Node *signresult = NULL;
1985
if (ConditionalMoveLimit != 0) {
1986
signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1987
} else {
1988
IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
1989
RegionNode *r = new (C) RegionNode(3);
1990
Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1991
r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyeven)));
1992
r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyeven)));
1993
phi->init_req(1, absxpowy);
1994
phi->init_req(2, negabsxpowy);
1995
signresult = _gvn.transform(phi);
1996
ylong_path = _gvn.transform(r);
1997
record_for_igvn(r);
1998
}
1999
// Set complex path fast result
2000
r->init_req(2, ylong_path);
2001
phi->init_req(2, signresult);
2002
2003
static const jlong nan_bits = CONST64(0x7ff8000000000000);
2004
Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
2005
r->init_req(1,slow_path);
2006
phi->init_req(1,slow_result);
2007
2008
// Post merge
2009
set_control(_gvn.transform(r));
2010
record_for_igvn(r);
2011
result = _gvn.transform(phi);
2012
}
2013
2014
result = finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
2015
2016
// control from finish_pow_exp is now input to the region node
2017
region_node->set_req(2, control());
2018
// the result from finish_pow_exp is now input to the phi node
2019
phi_node->init_req(2, result);
2020
set_control(_gvn.transform(region_node));
2021
record_for_igvn(region_node);
2022
set_result(_gvn.transform(phi_node));
2023
2024
C->set_has_split_ifs(true); // Has chance for split-if optimization
2025
return true;
2026
}
2027
2028
//------------------------------runtime_math-----------------------------
2029
bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
2030
assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
2031
"must be (DD)D or (D)D type");
2032
2033
// Inputs
2034
Node* a = round_double_node(argument(0));
2035
Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
2036
2037
const TypePtr* no_memory_effects = NULL;
2038
Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
2039
no_memory_effects,
2040
a, top(), b, b ? top() : NULL);
2041
Node* value = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+0));
2042
#ifdef ASSERT
2043
Node* value_top = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+1));
2044
assert(value_top == top(), "second value must be top");
2045
#endif
2046
2047
set_result(value);
2048
return true;
2049
}
2050
2051
//------------------------------inline_math_native-----------------------------
2052
bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
2053
#define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
2054
switch (id) {
2055
// These intrinsics are not properly supported on all hardware
2056
case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
2057
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS");
2058
case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
2059
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN");
2060
case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
2061
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN");
2062
2063
case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_math(id) :
2064
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
2065
case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
2066
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
2067
2068
// These intrinsics are supported on all hardware
2069
case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
2070
case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false;
2071
2072
case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() :
2073
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
2074
case vmIntrinsics::_dpow: return Matcher::has_match_rule(Op_PowD) ? inline_pow() :
2075
runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW");
2076
#undef FN_PTR
2077
2078
// These intrinsics are not yet correctly implemented
2079
case vmIntrinsics::_datan2:
2080
return false;
2081
2082
default:
2083
fatal_unexpected_iid(id);
2084
return false;
2085
}
2086
}
2087
2088
static bool is_simple_name(Node* n) {
2089
return (n->req() == 1 // constant
2090
|| (n->is_Type() && n->as_Type()->type()->singleton())
2091
|| n->is_Proj() // parameter or return value
2092
|| n->is_Phi() // local of some sort
2093
);
2094
}
2095
2096
//----------------------------inline_min_max-----------------------------------
2097
bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
2098
set_result(generate_min_max(id, argument(0), argument(1)));
2099
return true;
2100
}
2101
2102
void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
2103
Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) );
2104
IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2105
Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
2106
Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
2107
2108
{
2109
PreserveJVMState pjvms(this);
2110
PreserveReexecuteState preexecs(this);
2111
jvms()->set_should_reexecute(true);
2112
2113
set_control(slow_path);
2114
set_i_o(i_o());
2115
2116
uncommon_trap(Deoptimization::Reason_intrinsic,
2117
Deoptimization::Action_none);
2118
}
2119
2120
set_control(fast_path);
2121
set_result(math);
2122
}
2123
2124
template <typename OverflowOp>
2125
bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
2126
typedef typename OverflowOp::MathOp MathOp;
2127
2128
MathOp* mathOp = new(C) MathOp(arg1, arg2);
2129
Node* operation = _gvn.transform( mathOp );
2130
Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) );
2131
inline_math_mathExact(operation, ofcheck);
2132
return true;
2133
}
2134
2135
bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2136
return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2137
}
2138
2139
bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2140
return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
2141
}
2142
2143
bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2144
return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
2145
}
2146
2147
bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2148
return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
2149
}
2150
2151
bool LibraryCallKit::inline_math_negateExactI() {
2152
return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
2153
}
2154
2155
bool LibraryCallKit::inline_math_negateExactL() {
2156
return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
2157
}
2158
2159
bool LibraryCallKit::inline_math_multiplyExactI() {
2160
return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2161
}
2162
2163
bool LibraryCallKit::inline_math_multiplyExactL() {
2164
return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2165
}
2166
2167
Node*
2168
LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2169
// These are the candidate return value:
2170
Node* xvalue = x0;
2171
Node* yvalue = y0;
2172
2173
if (xvalue == yvalue) {
2174
return xvalue;
2175
}
2176
2177
bool want_max = (id == vmIntrinsics::_max);
2178
2179
const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
2180
const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
2181
if (txvalue == NULL || tyvalue == NULL) return top();
2182
// This is not really necessary, but it is consistent with a
2183
// hypothetical MaxINode::Value method:
2184
int widen = MAX2(txvalue->_widen, tyvalue->_widen);
2185
2186
// %%% This folding logic should (ideally) be in a different place.
2187
// Some should be inside IfNode, and there to be a more reliable
2188
// transformation of ?: style patterns into cmoves. We also want
2189
// more powerful optimizations around cmove and min/max.
2190
2191
// Try to find a dominating comparison of these guys.
2192
// It can simplify the index computation for Arrays.copyOf
2193
// and similar uses of System.arraycopy.
2194
// First, compute the normalized version of CmpI(x, y).
2195
int cmp_op = Op_CmpI;
2196
Node* xkey = xvalue;
2197
Node* ykey = yvalue;
2198
Node* ideal_cmpxy = _gvn.transform(new(C) CmpINode(xkey, ykey));
2199
if (ideal_cmpxy->is_Cmp()) {
2200
// E.g., if we have CmpI(length - offset, count),
2201
// it might idealize to CmpI(length, count + offset)
2202
cmp_op = ideal_cmpxy->Opcode();
2203
xkey = ideal_cmpxy->in(1);
2204
ykey = ideal_cmpxy->in(2);
2205
}
2206
2207
// Start by locating any relevant comparisons.
2208
Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
2209
Node* cmpxy = NULL;
2210
Node* cmpyx = NULL;
2211
for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
2212
Node* cmp = start_from->fast_out(k);
2213
if (cmp->outcnt() > 0 && // must have prior uses
2214
cmp->in(0) == NULL && // must be context-independent
2215
cmp->Opcode() == cmp_op) { // right kind of compare
2216
if (cmp->in(1) == xkey && cmp->in(2) == ykey) cmpxy = cmp;
2217
if (cmp->in(1) == ykey && cmp->in(2) == xkey) cmpyx = cmp;
2218
}
2219
}
2220
2221
const int NCMPS = 2;
2222
Node* cmps[NCMPS] = { cmpxy, cmpyx };
2223
int cmpn;
2224
for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2225
if (cmps[cmpn] != NULL) break; // find a result
2226
}
2227
if (cmpn < NCMPS) {
2228
// Look for a dominating test that tells us the min and max.
2229
int depth = 0; // Limit search depth for speed
2230
Node* dom = control();
2231
for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
2232
if (++depth >= 100) break;
2233
Node* ifproj = dom;
2234
if (!ifproj->is_Proj()) continue;
2235
Node* iff = ifproj->in(0);
2236
if (!iff->is_If()) continue;
2237
Node* bol = iff->in(1);
2238
if (!bol->is_Bool()) continue;
2239
Node* cmp = bol->in(1);
2240
if (cmp == NULL) continue;
2241
for (cmpn = 0; cmpn < NCMPS; cmpn++)
2242
if (cmps[cmpn] == cmp) break;
2243
if (cmpn == NCMPS) continue;
2244
BoolTest::mask btest = bol->as_Bool()->_test._test;
2245
if (ifproj->is_IfFalse()) btest = BoolTest(btest).negate();
2246
if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2247
// At this point, we know that 'x btest y' is true.
2248
switch (btest) {
2249
case BoolTest::eq:
2250
// They are proven equal, so we can collapse the min/max.
2251
// Either value is the answer. Choose the simpler.
2252
if (is_simple_name(yvalue) && !is_simple_name(xvalue))
2253
return yvalue;
2254
return xvalue;
2255
case BoolTest::lt: // x < y
2256
case BoolTest::le: // x <= y
2257
return (want_max ? yvalue : xvalue);
2258
case BoolTest::gt: // x > y
2259
case BoolTest::ge: // x >= y
2260
return (want_max ? xvalue : yvalue);
2261
}
2262
}
2263
}
2264
2265
// We failed to find a dominating test.
2266
// Let's pick a test that might GVN with prior tests.
2267
Node* best_bol = NULL;
2268
BoolTest::mask best_btest = BoolTest::illegal;
2269
for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2270
Node* cmp = cmps[cmpn];
2271
if (cmp == NULL) continue;
2272
for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
2273
Node* bol = cmp->fast_out(j);
2274
if (!bol->is_Bool()) continue;
2275
BoolTest::mask btest = bol->as_Bool()->_test._test;
2276
if (btest == BoolTest::eq || btest == BoolTest::ne) continue;
2277
if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2278
if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
2279
best_bol = bol->as_Bool();
2280
best_btest = btest;
2281
}
2282
}
2283
}
2284
2285
Node* answer_if_true = NULL;
2286
Node* answer_if_false = NULL;
2287
switch (best_btest) {
2288
default:
2289
if (cmpxy == NULL)
2290
cmpxy = ideal_cmpxy;
2291
best_bol = _gvn.transform(new(C) BoolNode(cmpxy, BoolTest::lt));
2292
// and fall through:
2293
case BoolTest::lt: // x < y
2294
case BoolTest::le: // x <= y
2295
answer_if_true = (want_max ? yvalue : xvalue);
2296
answer_if_false = (want_max ? xvalue : yvalue);
2297
break;
2298
case BoolTest::gt: // x > y
2299
case BoolTest::ge: // x >= y
2300
answer_if_true = (want_max ? xvalue : yvalue);
2301
answer_if_false = (want_max ? yvalue : xvalue);
2302
break;
2303
}
2304
2305
jint hi, lo;
2306
if (want_max) {
2307
// We can sharpen the minimum.
2308
hi = MAX2(txvalue->_hi, tyvalue->_hi);
2309
lo = MAX2(txvalue->_lo, tyvalue->_lo);
2310
} else {
2311
// We can sharpen the maximum.
2312
hi = MIN2(txvalue->_hi, tyvalue->_hi);
2313
lo = MIN2(txvalue->_lo, tyvalue->_lo);
2314
}
2315
2316
// Use a flow-free graph structure, to avoid creating excess control edges
2317
// which could hinder other optimizations.
2318
// Since Math.min/max is often used with arraycopy, we want
2319
// tightly_coupled_allocation to be able to see beyond min/max expressions.
2320
Node* cmov = CMoveNode::make(C, NULL, best_bol,
2321
answer_if_false, answer_if_true,
2322
TypeInt::make(lo, hi, widen));
2323
2324
return _gvn.transform(cmov);
2325
2326
/*
2327
// This is not as desirable as it may seem, since Min and Max
2328
// nodes do not have a full set of optimizations.
2329
// And they would interfere, anyway, with 'if' optimizations
2330
// and with CMoveI canonical forms.
2331
switch (id) {
2332
case vmIntrinsics::_min:
2333
result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2334
case vmIntrinsics::_max:
2335
result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2336
default:
2337
ShouldNotReachHere();
2338
}
2339
*/
2340
}
2341
2342
inline int
2343
LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
2344
const TypePtr* base_type = TypePtr::NULL_PTR;
2345
if (base != NULL) base_type = _gvn.type(base)->isa_ptr();
2346
if (base_type == NULL) {
2347
// Unknown type.
2348
return Type::AnyPtr;
2349
} else if (base_type == TypePtr::NULL_PTR) {
2350
// Since this is a NULL+long form, we have to switch to a rawptr.
2351
base = _gvn.transform(new (C) CastX2PNode(offset));
2352
offset = MakeConX(0);
2353
return Type::RawPtr;
2354
} else if (base_type->base() == Type::RawPtr) {
2355
return Type::RawPtr;
2356
} else if (base_type->isa_oopptr()) {
2357
// Base is never null => always a heap address.
2358
if (base_type->ptr() == TypePtr::NotNull) {
2359
return Type::OopPtr;
2360
}
2361
// Offset is small => always a heap address.
2362
const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2363
if (offset_type != NULL &&
2364
base_type->offset() == 0 && // (should always be?)
2365
offset_type->_lo >= 0 &&
2366
!MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2367
return Type::OopPtr;
2368
}
2369
// Otherwise, it might either be oop+off or NULL+addr.
2370
return Type::AnyPtr;
2371
} else {
2372
// No information:
2373
return Type::AnyPtr;
2374
}
2375
}
2376
2377
inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
2378
int kind = classify_unsafe_addr(base, offset);
2379
if (kind == Type::RawPtr) {
2380
return basic_plus_adr(top(), base, offset);
2381
} else {
2382
return basic_plus_adr(base, offset);
2383
}
2384
}
2385
2386
//--------------------------inline_number_methods-----------------------------
2387
// inline int Integer.numberOfLeadingZeros(int)
2388
// inline int Long.numberOfLeadingZeros(long)
2389
//
2390
// inline int Integer.numberOfTrailingZeros(int)
2391
// inline int Long.numberOfTrailingZeros(long)
2392
//
2393
// inline int Integer.bitCount(int)
2394
// inline int Long.bitCount(long)
2395
//
2396
// inline char Character.reverseBytes(char)
2397
// inline short Short.reverseBytes(short)
2398
// inline int Integer.reverseBytes(int)
2399
// inline long Long.reverseBytes(long)
2400
bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2401
Node* arg = argument(0);
2402
Node* n = NULL;
2403
switch (id) {
2404
case vmIntrinsics::_numberOfLeadingZeros_i: n = new (C) CountLeadingZerosINode( arg); break;
2405
case vmIntrinsics::_numberOfLeadingZeros_l: n = new (C) CountLeadingZerosLNode( arg); break;
2406
case vmIntrinsics::_numberOfTrailingZeros_i: n = new (C) CountTrailingZerosINode(arg); break;
2407
case vmIntrinsics::_numberOfTrailingZeros_l: n = new (C) CountTrailingZerosLNode(arg); break;
2408
case vmIntrinsics::_bitCount_i: n = new (C) PopCountINode( arg); break;
2409
case vmIntrinsics::_bitCount_l: n = new (C) PopCountLNode( arg); break;
2410
case vmIntrinsics::_reverseBytes_c: n = new (C) ReverseBytesUSNode(0, arg); break;
2411
case vmIntrinsics::_reverseBytes_s: n = new (C) ReverseBytesSNode( 0, arg); break;
2412
case vmIntrinsics::_reverseBytes_i: n = new (C) ReverseBytesINode( 0, arg); break;
2413
case vmIntrinsics::_reverseBytes_l: n = new (C) ReverseBytesLNode( 0, arg); break;
2414
default: fatal_unexpected_iid(id); break;
2415
}
2416
set_result(_gvn.transform(n));
2417
return true;
2418
}
2419
2420
//----------------------------inline_unsafe_access----------------------------
2421
2422
const static BasicType T_ADDRESS_HOLDER = T_LONG;
2423
2424
// Helper that guards and inserts a pre-barrier.
2425
void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2426
Node* pre_val, bool need_mem_bar) {
2427
// We could be accessing the referent field of a reference object. If so, when G1
2428
// is enabled, we need to log the value in the referent field in an SATB buffer.
2429
// This routine performs some compile time filters and generates suitable
2430
// runtime filters that guard the pre-barrier code.
2431
// Also add memory barrier for non volatile load from the referent field
2432
// to prevent commoning of loads across safepoint.
2433
if (!UseG1GC && !need_mem_bar)
2434
return;
2435
2436
// Some compile time checks.
2437
2438
// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2439
const TypeX* otype = offset->find_intptr_t_type();
2440
if (otype != NULL && otype->is_con() &&
2441
otype->get_con() != java_lang_ref_Reference::referent_offset) {
2442
// Constant offset but not the reference_offset so just return
2443
return;
2444
}
2445
2446
// We only need to generate the runtime guards for instances.
2447
const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2448
if (btype != NULL) {
2449
if (btype->isa_aryptr()) {
2450
// Array type so nothing to do
2451
return;
2452
}
2453
2454
const TypeInstPtr* itype = btype->isa_instptr();
2455
if (itype != NULL) {
2456
// Can the klass of base_oop be statically determined to be
2457
// _not_ a sub-class of Reference and _not_ Object?
2458
ciKlass* klass = itype->klass();
2459
if ( klass->is_loaded() &&
2460
!klass->is_subtype_of(env()->Reference_klass()) &&
2461
!env()->Object_klass()->is_subtype_of(klass)) {
2462
return;
2463
}
2464
}
2465
}
2466
2467
// The compile time filters did not reject base_oop/offset so
2468
// we need to generate the following runtime filters
2469
//
2470
// if (offset == java_lang_ref_Reference::_reference_offset) {
2471
// if (instance_of(base, java.lang.ref.Reference)) {
2472
// pre_barrier(_, pre_val, ...);
2473
// }
2474
// }
2475
2476
float likely = PROB_LIKELY( 0.999);
2477
float unlikely = PROB_UNLIKELY(0.999);
2478
2479
IdealKit ideal(this);
2480
#define __ ideal.
2481
2482
Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
2483
2484
__ if_then(offset, BoolTest::eq, referent_off, unlikely); {
2485
// Update graphKit memory and control from IdealKit.
2486
sync_kit(ideal);
2487
2488
Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
2489
Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
2490
2491
// Update IdealKit memory and control from graphKit.
2492
__ sync_kit(this);
2493
2494
Node* one = __ ConI(1);
2495
// is_instof == 0 if base_oop == NULL
2496
__ if_then(is_instof, BoolTest::eq, one, unlikely); {
2497
2498
// Update graphKit from IdeakKit.
2499
sync_kit(ideal);
2500
2501
// Use the pre-barrier to record the value in the referent field
2502
pre_barrier(false /* do_load */,
2503
__ ctrl(),
2504
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2505
pre_val /* pre_val */,
2506
T_OBJECT);
2507
if (need_mem_bar) {
2508
// Add memory barrier to prevent commoning reads from this field
2509
// across safepoint since GC can change its value.
2510
insert_mem_bar(Op_MemBarCPUOrder);
2511
}
2512
// Update IdealKit from graphKit.
2513
__ sync_kit(this);
2514
2515
} __ end_if(); // _ref_type != ref_none
2516
} __ end_if(); // offset == referent_offset
2517
2518
// Final sync IdealKit and GraphKit.
2519
final_sync(ideal);
2520
#undef __
2521
}
2522
2523
2524
// Interpret Unsafe.fieldOffset cookies correctly:
2525
extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2526
2527
const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2528
// Attempt to infer a sharper value type from the offset and base type.
2529
ciKlass* sharpened_klass = NULL;
2530
2531
// See if it is an instance field, with an object type.
2532
if (alias_type->field() != NULL) {
2533
assert(!is_native_ptr, "native pointer op cannot use a java address");
2534
if (alias_type->field()->type()->is_klass()) {
2535
sharpened_klass = alias_type->field()->type()->as_klass();
2536
}
2537
}
2538
2539
// See if it is a narrow oop array.
2540
if (adr_type->isa_aryptr()) {
2541
if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2542
const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2543
if (elem_type != NULL) {
2544
sharpened_klass = elem_type->klass();
2545
}
2546
}
2547
}
2548
2549
// The sharpened class might be unloaded if there is no class loader
2550
// contraint in place.
2551
if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2552
const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2553
2554
#ifndef PRODUCT
2555
if (C->print_intrinsics() || C->print_inlining()) {
2556
tty->print(" from base type: "); adr_type->dump(); tty->cr();
2557
tty->print(" sharpened value: "); tjp->dump(); tty->cr();
2558
}
2559
#endif
2560
// Sharpen the value type.
2561
return tjp;
2562
}
2563
return NULL;
2564
}
2565
2566
bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
2567
if (callee()->is_static()) return false; // caller must have the capability!
2568
assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2569
2570
#ifndef PRODUCT
2571
{
2572
ResourceMark rm;
2573
// Check the signatures.
2574
ciSignature* sig = callee()->signature();
2575
#ifdef ASSERT
2576
if (!is_store) {
2577
// Object getObject(Object base, int/long offset), etc.
2578
BasicType rtype = sig->return_type()->basic_type();
2579
if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2580
rtype = T_ADDRESS; // it is really a C void*
2581
assert(rtype == type, "getter must return the expected value");
2582
if (!is_native_ptr) {
2583
assert(sig->count() == 2, "oop getter has 2 arguments");
2584
assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2585
assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2586
} else {
2587
assert(sig->count() == 1, "native getter has 1 argument");
2588
assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2589
}
2590
} else {
2591
// void putObject(Object base, int/long offset, Object x), etc.
2592
assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2593
if (!is_native_ptr) {
2594
assert(sig->count() == 3, "oop putter has 3 arguments");
2595
assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2596
assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2597
} else {
2598
assert(sig->count() == 2, "native putter has 2 arguments");
2599
assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
2600
}
2601
BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2602
if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
2603
vtype = T_ADDRESS; // it is really a C void*
2604
assert(vtype == type, "putter must accept the expected value");
2605
}
2606
#endif // ASSERT
2607
}
2608
#endif //PRODUCT
2609
2610
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2611
2612
Node* receiver = argument(0); // type: oop
2613
2614
// Build address expression. See the code in inline_unsafe_prefetch.
2615
Node* adr;
2616
Node* heap_base_oop = top();
2617
Node* offset = top();
2618
Node* val;
2619
2620
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
2621
Node* base = argument(1); // type: oop
2622
2623
if (!is_native_ptr) {
2624
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2625
offset = argument(2); // type: long
2626
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2627
// to be plain byte offsets, which are also the same as those accepted
2628
// by oopDesc::field_base.
2629
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2630
"fieldOffset must be byte-scaled");
2631
// 32-bit machines ignore the high half!
2632
offset = ConvL2X(offset);
2633
adr = make_unsafe_address(base, offset);
2634
heap_base_oop = base;
2635
val = is_store ? argument(4) : NULL;
2636
} else {
2637
Node* ptr = argument(1); // type: long
2638
ptr = ConvL2X(ptr); // adjust Java long to machine word
2639
adr = make_unsafe_address(NULL, ptr);
2640
val = is_store ? argument(3) : NULL;
2641
}
2642
2643
if ((_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) && type == T_OBJECT) {
2644
return false; // off-heap oop accesses are not supported
2645
}
2646
2647
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2648
2649
// Try to categorize the address.
2650
Compile::AliasType* alias_type = C->alias_type(adr_type);
2651
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2652
2653
if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2654
alias_type->adr_type() == TypeAryPtr::RANGE) {
2655
return false; // not supported
2656
}
2657
2658
bool mismatched = false;
2659
BasicType bt = alias_type->basic_type();
2660
if (bt != T_ILLEGAL) {
2661
assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2662
if (bt == T_BYTE && adr_type->isa_aryptr()) {
2663
// Alias type doesn't differentiate between byte[] and boolean[]).
2664
// Use address type to get the element type.
2665
bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2666
}
2667
if (bt == T_ARRAY || bt == T_NARROWOOP) {
2668
// accessing an array field with getObject is not a mismatch
2669
bt = T_OBJECT;
2670
}
2671
if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2672
// Don't intrinsify mismatched object accesses
2673
return false;
2674
}
2675
mismatched = (bt != type);
2676
}
2677
2678
assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2679
2680
// First guess at the value type.
2681
const Type *value_type = Type::get_const_basic_type(type);
2682
2683
// We will need memory barriers unless we can determine a unique
2684
// alias category for this reference. (Note: If for some reason
2685
// the barriers get omitted and the unsafe reference begins to "pollute"
2686
// the alias analysis of the rest of the graph, either Compile::can_alias
2687
// or Compile::must_alias will throw a diagnostic assert.)
2688
bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2689
2690
// If we are reading the value of the referent field of a Reference
2691
// object (either by using Unsafe directly or through reflection)
2692
// then, if G1 is enabled, we need to record the referent in an
2693
// SATB log buffer using the pre-barrier mechanism.
2694
// Also we need to add memory barrier to prevent commoning reads
2695
// from this field across safepoint since GC can change its value.
2696
bool need_read_barrier = !is_native_ptr && !is_store &&
2697
offset != top() && heap_base_oop != top();
2698
2699
if (!is_store && type == T_OBJECT) {
2700
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2701
if (tjp != NULL) {
2702
value_type = tjp;
2703
}
2704
}
2705
2706
receiver = null_check(receiver);
2707
if (stopped()) {
2708
return true;
2709
}
2710
// Heap pointers get a null-check from the interpreter,
2711
// as a courtesy. However, this is not guaranteed by Unsafe,
2712
// and it is not possible to fully distinguish unintended nulls
2713
// from intended ones in this API.
2714
2715
Node* load = NULL;
2716
Node* store = NULL;
2717
Node* leading_membar = NULL;
2718
if (is_volatile) {
2719
// We need to emit leading and trailing CPU membars (see below) in
2720
// addition to memory membars when is_volatile. This is a little
2721
// too strong, but avoids the need to insert per-alias-type
2722
// volatile membars (for stores; compare Parse::do_put_xxx), which
2723
// we cannot do effectively here because we probably only have a
2724
// rough approximation of type.
2725
need_mem_bar = true;
2726
// For Stores, place a memory ordering barrier now.
2727
if (is_store) {
2728
leading_membar = insert_mem_bar(Op_MemBarRelease);
2729
} else {
2730
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2731
leading_membar = insert_mem_bar(Op_MemBarVolatile);
2732
}
2733
}
2734
}
2735
2736
// Memory barrier to prevent normal and 'unsafe' accesses from
2737
// bypassing each other. Happens after null checks, so the
2738
// exception paths do not take memory state from the memory barrier,
2739
// so there's no problems making a strong assert about mixing users
2740
// of safe & unsafe memory. Otherwise fails in a CTW of rt.jar
2741
// around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2742
if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2743
2744
if (!is_store) {
2745
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2746
// To be valid, unsafe loads may depend on other conditions than
2747
// the one that guards them: pin the Load node
2748
load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2749
// load value
2750
switch (type) {
2751
case T_BOOLEAN:
2752
case T_CHAR:
2753
case T_BYTE:
2754
case T_SHORT:
2755
case T_INT:
2756
case T_LONG:
2757
case T_FLOAT:
2758
case T_DOUBLE:
2759
break;
2760
case T_OBJECT:
2761
if (need_read_barrier) {
2762
insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2763
}
2764
break;
2765
case T_ADDRESS:
2766
// Cast to an int type.
2767
load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2768
load = ConvX2UL(load);
2769
break;
2770
default:
2771
fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2772
break;
2773
}
2774
// The load node has the control of the preceding MemBarCPUOrder. All
2775
// following nodes will have the control of the MemBarCPUOrder inserted at
2776
// the end of this method. So, pushing the load onto the stack at a later
2777
// point is fine.
2778
set_result(load);
2779
} else {
2780
// place effect of store into memory
2781
switch (type) {
2782
case T_DOUBLE:
2783
val = dstore_rounding(val);
2784
break;
2785
case T_ADDRESS:
2786
// Repackage the long as a pointer.
2787
val = ConvL2X(val);
2788
val = _gvn.transform(new (C) CastX2PNode(val));
2789
break;
2790
}
2791
2792
MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2793
if (type == T_OBJECT ) {
2794
store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2795
} else {
2796
store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2797
}
2798
}
2799
2800
if (is_volatile) {
2801
if (!is_store) {
2802
Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2803
mb->as_MemBar()->set_trailing_load();
2804
} else {
2805
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2806
Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2807
MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2808
}
2809
}
2810
}
2811
2812
if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2813
2814
return true;
2815
}
2816
2817
//----------------------------inline_unsafe_prefetch----------------------------
2818
2819
bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2820
#ifndef PRODUCT
2821
{
2822
ResourceMark rm;
2823
// Check the signatures.
2824
ciSignature* sig = callee()->signature();
2825
#ifdef ASSERT
2826
// Object getObject(Object base, int/long offset), etc.
2827
BasicType rtype = sig->return_type()->basic_type();
2828
if (!is_native_ptr) {
2829
assert(sig->count() == 2, "oop prefetch has 2 arguments");
2830
assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object");
2831
assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct");
2832
} else {
2833
assert(sig->count() == 1, "native prefetch has 1 argument");
2834
assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long");
2835
}
2836
#endif // ASSERT
2837
}
2838
#endif // !PRODUCT
2839
2840
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2841
2842
const int idx = is_static ? 0 : 1;
2843
if (!is_static) {
2844
null_check_receiver();
2845
if (stopped()) {
2846
return true;
2847
}
2848
}
2849
2850
// Build address expression. See the code in inline_unsafe_access.
2851
Node *adr;
2852
if (!is_native_ptr) {
2853
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
2854
Node* base = argument(idx + 0); // type: oop
2855
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2856
Node* offset = argument(idx + 1); // type: long
2857
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2858
// to be plain byte offsets, which are also the same as those accepted
2859
// by oopDesc::field_base.
2860
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2861
"fieldOffset must be byte-scaled");
2862
// 32-bit machines ignore the high half!
2863
offset = ConvL2X(offset);
2864
adr = make_unsafe_address(base, offset);
2865
} else {
2866
Node* ptr = argument(idx + 0); // type: long
2867
ptr = ConvL2X(ptr); // adjust Java long to machine word
2868
adr = make_unsafe_address(NULL, ptr);
2869
}
2870
2871
// Generate the read or write prefetch
2872
Node *prefetch;
2873
if (is_store) {
2874
prefetch = new (C) PrefetchWriteNode(i_o(), adr);
2875
} else {
2876
prefetch = new (C) PrefetchReadNode(i_o(), adr);
2877
}
2878
prefetch->init_req(0, control());
2879
set_i_o(_gvn.transform(prefetch));
2880
2881
return true;
2882
}
2883
2884
//----------------------------inline_unsafe_load_store----------------------------
2885
// This method serves a couple of different customers (depending on LoadStoreKind):
2886
//
2887
// LS_cmpxchg:
2888
// public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
2889
// public final native boolean compareAndSwapInt( Object o, long offset, int expected, int x);
2890
// public final native boolean compareAndSwapLong( Object o, long offset, long expected, long x);
2891
//
2892
// LS_xadd:
2893
// public int getAndAddInt( Object o, long offset, int delta)
2894
// public long getAndAddLong(Object o, long offset, long delta)
2895
//
2896
// LS_xchg:
2897
// int getAndSet(Object o, long offset, int newValue)
2898
// long getAndSet(Object o, long offset, long newValue)
2899
// Object getAndSet(Object o, long offset, Object newValue)
2900
//
2901
bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
2902
// This basic scheme here is the same as inline_unsafe_access, but
2903
// differs in enough details that combining them would make the code
2904
// overly confusing. (This is a true fact! I originally combined
2905
// them, but even I was confused by it!) As much code/comments as
2906
// possible are retained from inline_unsafe_access though to make
2907
// the correspondences clearer. - dl
2908
2909
if (callee()->is_static()) return false; // caller must have the capability!
2910
2911
#ifndef PRODUCT
2912
BasicType rtype;
2913
{
2914
ResourceMark rm;
2915
// Check the signatures.
2916
ciSignature* sig = callee()->signature();
2917
rtype = sig->return_type()->basic_type();
2918
if (kind == LS_xadd || kind == LS_xchg) {
2919
// Check the signatures.
2920
#ifdef ASSERT
2921
assert(rtype == type, "get and set must return the expected type");
2922
assert(sig->count() == 3, "get and set has 3 arguments");
2923
assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2924
assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2925
assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2926
#endif // ASSERT
2927
} else if (kind == LS_cmpxchg) {
2928
// Check the signatures.
2929
#ifdef ASSERT
2930
assert(rtype == T_BOOLEAN, "CAS must return boolean");
2931
assert(sig->count() == 4, "CAS has 4 arguments");
2932
assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2933
assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2934
#endif // ASSERT
2935
} else {
2936
ShouldNotReachHere();
2937
}
2938
}
2939
#endif //PRODUCT
2940
2941
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2942
2943
// Get arguments:
2944
Node* receiver = NULL;
2945
Node* base = NULL;
2946
Node* offset = NULL;
2947
Node* oldval = NULL;
2948
Node* newval = NULL;
2949
if (kind == LS_cmpxchg) {
2950
const bool two_slot_type = type2size[type] == 2;
2951
receiver = argument(0); // type: oop
2952
base = argument(1); // type: oop
2953
offset = argument(2); // type: long
2954
oldval = argument(4); // type: oop, int, or long
2955
newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2956
} else if (kind == LS_xadd || kind == LS_xchg){
2957
receiver = argument(0); // type: oop
2958
base = argument(1); // type: oop
2959
offset = argument(2); // type: long
2960
oldval = NULL;
2961
newval = argument(4); // type: oop, int, or long
2962
}
2963
2964
// Build field offset expression.
2965
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2966
// to be plain byte offsets, which are also the same as those accepted
2967
// by oopDesc::field_base.
2968
assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2969
// 32-bit machines ignore the high half of long offsets
2970
offset = ConvL2X(offset);
2971
Node* adr = make_unsafe_address(base, offset);
2972
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2973
2974
Compile::AliasType* alias_type = C->alias_type(adr_type);
2975
BasicType bt = alias_type->basic_type();
2976
if (bt != T_ILLEGAL &&
2977
((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2978
// Don't intrinsify mismatched object accesses.
2979
return false;
2980
}
2981
2982
// For CAS, unlike inline_unsafe_access, there seems no point in
2983
// trying to refine types. Just use the coarse types here.
2984
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2985
const Type *value_type = Type::get_const_basic_type(type);
2986
2987
if (kind == LS_xchg && type == T_OBJECT) {
2988
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2989
if (tjp != NULL) {
2990
value_type = tjp;
2991
}
2992
}
2993
2994
// Null check receiver.
2995
receiver = null_check(receiver);
2996
if (stopped()) {
2997
return true;
2998
}
2999
3000
int alias_idx = C->get_alias_index(adr_type);
3001
3002
// Memory-model-wise, a LoadStore acts like a little synchronized
3003
// block, so needs barriers on each side. These don't translate
3004
// into actual barriers on most machines, but we still need rest of
3005
// compiler to respect ordering.
3006
3007
Node* leading_membar = insert_mem_bar(Op_MemBarRelease);
3008
insert_mem_bar(Op_MemBarCPUOrder);
3009
3010
// 4984716: MemBars must be inserted before this
3011
// memory node in order to avoid a false
3012
// dependency which will confuse the scheduler.
3013
Node *mem = memory(alias_idx);
3014
3015
// For now, we handle only those cases that actually exist: ints,
3016
// longs, and Object. Adding others should be straightforward.
3017
Node* load_store = NULL;
3018
switch(type) {
3019
case T_INT:
3020
if (kind == LS_xadd) {
3021
load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3022
} else if (kind == LS_xchg) {
3023
load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3024
} else if (kind == LS_cmpxchg) {
3025
load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3026
} else {
3027
ShouldNotReachHere();
3028
}
3029
break;
3030
case T_LONG:
3031
if (kind == LS_xadd) {
3032
load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
3033
} else if (kind == LS_xchg) {
3034
load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
3035
} else if (kind == LS_cmpxchg) {
3036
load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
3037
} else {
3038
ShouldNotReachHere();
3039
}
3040
break;
3041
case T_OBJECT:
3042
// Transformation of a value which could be NULL pointer (CastPP #NULL)
3043
// could be delayed during Parse (for example, in adjust_map_after_if()).
3044
// Execute transformation here to avoid barrier generation in such case.
3045
if (_gvn.type(newval) == TypePtr::NULL_PTR)
3046
newval = _gvn.makecon(TypePtr::NULL_PTR);
3047
3048
// Reference stores need a store barrier.
3049
if (kind == LS_xchg) {
3050
// If pre-barrier must execute before the oop store, old value will require do_load here.
3051
if (!can_move_pre_barrier()) {
3052
pre_barrier(true /* do_load*/,
3053
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
3054
NULL /* pre_val*/,
3055
T_OBJECT);
3056
} // Else move pre_barrier to use load_store value, see below.
3057
} else if (kind == LS_cmpxchg) {
3058
// Same as for newval above:
3059
if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
3060
oldval = _gvn.makecon(TypePtr::NULL_PTR);
3061
}
3062
// The only known value which might get overwritten is oldval.
3063
pre_barrier(false /* do_load */,
3064
control(), NULL, NULL, max_juint, NULL, NULL,
3065
oldval /* pre_val */,
3066
T_OBJECT);
3067
} else {
3068
ShouldNotReachHere();
3069
}
3070
3071
#ifdef _LP64
3072
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3073
Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
3074
if (kind == LS_xchg) {
3075
load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
3076
newval_enc, adr_type, value_type->make_narrowoop()));
3077
} else {
3078
assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3079
Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
3080
load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
3081
newval_enc, oldval_enc));
3082
}
3083
} else
3084
#endif
3085
{
3086
if (kind == LS_xchg) {
3087
load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3088
} else {
3089
assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3090
load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3091
}
3092
}
3093
post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3094
break;
3095
default:
3096
fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3097
break;
3098
}
3099
3100
// SCMemProjNodes represent the memory state of a LoadStore. Their
3101
// main role is to prevent LoadStore nodes from being optimized away
3102
// when their results aren't used.
3103
Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3104
set_memory(proj, alias_idx);
3105
3106
Node* access = load_store;
3107
3108
if (type == T_OBJECT && kind == LS_xchg) {
3109
#ifdef _LP64
3110
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3111
load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3112
}
3113
#endif
3114
if (can_move_pre_barrier()) {
3115
// Don't need to load pre_val. The old value is returned by load_store.
3116
// The pre_barrier can execute after the xchg as long as no safepoint
3117
// gets inserted between them.
3118
pre_barrier(false /* do_load */,
3119
control(), NULL, NULL, max_juint, NULL, NULL,
3120
load_store /* pre_val */,
3121
T_OBJECT);
3122
}
3123
}
3124
3125
// Add the trailing membar surrounding the access
3126
insert_mem_bar(Op_MemBarCPUOrder);
3127
Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3128
MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3129
3130
assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3131
set_result(load_store);
3132
return true;
3133
}
3134
3135
//----------------------------inline_unsafe_ordered_store----------------------
3136
// public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3137
// public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3138
// public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3139
bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3140
// This is another variant of inline_unsafe_access, differing in
3141
// that it always issues store-store ("release") barrier and ensures
3142
// store-atomicity (which only matters for "long").
3143
3144
if (callee()->is_static()) return false; // caller must have the capability!
3145
3146
#ifndef PRODUCT
3147
{
3148
ResourceMark rm;
3149
// Check the signatures.
3150
ciSignature* sig = callee()->signature();
3151
#ifdef ASSERT
3152
BasicType rtype = sig->return_type()->basic_type();
3153
assert(rtype == T_VOID, "must return void");
3154
assert(sig->count() == 3, "has 3 arguments");
3155
assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
3156
assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
3157
#endif // ASSERT
3158
}
3159
#endif //PRODUCT
3160
3161
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
3162
3163
// Get arguments:
3164
Node* receiver = argument(0); // type: oop
3165
Node* base = argument(1); // type: oop
3166
Node* offset = argument(2); // type: long
3167
Node* val = argument(4); // type: oop, int, or long
3168
3169
// Null check receiver.
3170
receiver = null_check(receiver);
3171
if (stopped()) {
3172
return true;
3173
}
3174
3175
// Build field offset expression.
3176
assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3177
// 32-bit machines ignore the high half of long offsets
3178
offset = ConvL2X(offset);
3179
Node* adr = make_unsafe_address(base, offset);
3180
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3181
const Type *value_type = Type::get_const_basic_type(type);
3182
Compile::AliasType* alias_type = C->alias_type(adr_type);
3183
3184
insert_mem_bar(Op_MemBarRelease);
3185
insert_mem_bar(Op_MemBarCPUOrder);
3186
// Ensure that the store is atomic for longs:
3187
const bool require_atomic_access = true;
3188
Node* store;
3189
if (type == T_OBJECT) // reference stores need a store barrier.
3190
store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3191
else {
3192
store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3193
}
3194
insert_mem_bar(Op_MemBarCPUOrder);
3195
return true;
3196
}
3197
3198
bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3199
// Regardless of form, don't allow previous ld/st to move down,
3200
// then issue acquire, release, or volatile mem_bar.
3201
insert_mem_bar(Op_MemBarCPUOrder);
3202
switch(id) {
3203
case vmIntrinsics::_loadFence:
3204
insert_mem_bar(Op_LoadFence);
3205
return true;
3206
case vmIntrinsics::_storeFence:
3207
insert_mem_bar(Op_StoreFence);
3208
return true;
3209
case vmIntrinsics::_fullFence:
3210
insert_mem_bar(Op_MemBarVolatile);
3211
return true;
3212
default:
3213
fatal_unexpected_iid(id);
3214
return false;
3215
}
3216
}
3217
3218
bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3219
if (!kls->is_Con()) {
3220
return true;
3221
}
3222
const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
3223
if (klsptr == NULL) {
3224
return true;
3225
}
3226
ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
3227
// don't need a guard for a klass that is already initialized
3228
return !ik->is_initialized();
3229
}
3230
3231
//----------------------------inline_unsafe_allocate---------------------------
3232
// public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
3233
bool LibraryCallKit::inline_unsafe_allocate() {
3234
if (callee()->is_static()) return false; // caller must have the capability!
3235
3236
null_check_receiver(); // null-check, then ignore
3237
Node* cls = null_check(argument(1));
3238
if (stopped()) return true;
3239
3240
Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3241
kls = null_check(kls);
3242
if (stopped()) return true; // argument was like int.class
3243
3244
Node* test = NULL;
3245
if (LibraryCallKit::klass_needs_init_guard(kls)) {
3246
// Note: The argument might still be an illegal value like
3247
// Serializable.class or Object[].class. The runtime will handle it.
3248
// But we must make an explicit check for initialization.
3249
Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3250
// Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3251
// can generate code to load it as unsigned byte.
3252
Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3253
Node* bits = intcon(InstanceKlass::fully_initialized);
3254
test = _gvn.transform(new (C) SubINode(inst, bits));
3255
// The 'test' is non-zero if we need to take a slow path.
3256
}
3257
3258
Node* obj = new_instance(kls, test);
3259
set_result(obj);
3260
return true;
3261
}
3262
3263
#ifdef JFR_HAVE_INTRINSICS
3264
/*
3265
* oop -> myklass
3266
* myklass->trace_id |= USED
3267
* return myklass->trace_id & ~0x3
3268
*/
3269
bool LibraryCallKit::inline_native_classID() {
3270
Node* cls = null_check(argument(0), T_OBJECT);
3271
Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3272
kls = null_check(kls, T_OBJECT);
3273
3274
ByteSize offset = KLASS_TRACE_ID_OFFSET;
3275
Node* insp = basic_plus_adr(kls, in_bytes(offset));
3276
Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
3277
3278
Node* clsused = longcon(0x01l); // set the class bit
3279
Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
3280
const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3281
store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
3282
3283
#ifdef TRACE_ID_META_BITS
3284
Node* mbits = longcon(~TRACE_ID_META_BITS);
3285
tvalue = _gvn.transform(new (C) AndLNode(tvalue, mbits));
3286
#endif
3287
#ifdef TRACE_ID_SHIFT
3288
Node* cbits = intcon(TRACE_ID_SHIFT);
3289
tvalue = _gvn.transform(new (C) URShiftLNode(tvalue, cbits));
3290
#endif
3291
3292
set_result(tvalue);
3293
return true;
3294
}
3295
3296
bool LibraryCallKit::inline_native_getEventWriter() {
3297
Node* tls_ptr = _gvn.transform(new (C) ThreadLocalNode());
3298
3299
Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
3300
in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR)
3301
);
3302
3303
Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
3304
3305
Node* jobj_cmp_null = _gvn.transform( new (C) CmpPNode(jobj, null()) );
3306
Node* test_jobj_eq_null = _gvn.transform( new (C) BoolNode(jobj_cmp_null, BoolTest::eq) );
3307
3308
IfNode* iff_jobj_null =
3309
create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);
3310
3311
enum { _normal_path = 1,
3312
_null_path = 2,
3313
PATH_LIMIT };
3314
3315
RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
3316
PhiNode* result_val = new (C) PhiNode(result_rgn, TypePtr::BOTTOM);
3317
3318
Node* jobj_is_null = _gvn.transform(new (C) IfTrueNode(iff_jobj_null));
3319
result_rgn->init_req(_null_path, jobj_is_null);
3320
result_val->init_req(_null_path, null());
3321
3322
Node* jobj_is_not_null = _gvn.transform(new (C) IfFalseNode(iff_jobj_null));
3323
result_rgn->init_req(_normal_path, jobj_is_not_null);
3324
3325
Node* res = make_load(jobj_is_not_null, jobj, TypeInstPtr::NOTNULL, T_OBJECT, MemNode::unordered);
3326
result_val->init_req(_normal_path, res);
3327
3328
set_result(result_rgn, result_val);
3329
3330
return true;
3331
}
3332
#endif // JFR_HAVE_INTRINSICS
3333
3334
//------------------------inline_native_time_funcs--------------
3335
// inline code for System.currentTimeMillis() and System.nanoTime()
3336
// these have the same type and signature
3337
bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3338
const TypeFunc* tf = OptoRuntime::void_long_Type();
3339
const TypePtr* no_memory_effects = NULL;
3340
Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3341
Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0));
3342
#ifdef ASSERT
3343
Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+1));
3344
assert(value_top == top(), "second value must be top");
3345
#endif
3346
set_result(value);
3347
return true;
3348
}
3349
3350
//------------------------inline_native_currentThread------------------
3351
bool LibraryCallKit::inline_native_currentThread() {
3352
Node* junk = NULL;
3353
set_result(generate_current_thread(junk));
3354
return true;
3355
}
3356
3357
//------------------------inline_native_isInterrupted------------------
3358
// private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3359
bool LibraryCallKit::inline_native_isInterrupted() {
3360
// Add a fast path to t.isInterrupted(clear_int):
3361
// (t == Thread.current() &&
3362
// (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3363
// ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3364
// So, in the common case that the interrupt bit is false,
3365
// we avoid making a call into the VM. Even if the interrupt bit
3366
// is true, if the clear_int argument is false, we avoid the VM call.
3367
// However, if the receiver is not currentThread, we must call the VM,
3368
// because there must be some locking done around the operation.
3369
3370
// We only go to the fast case code if we pass two guards.
3371
// Paths which do not pass are accumulated in the slow_region.
3372
3373
enum {
3374
no_int_result_path = 1, // t == Thread.current() && !TLS._osthread._interrupted
3375
no_clear_result_path = 2, // t == Thread.current() && TLS._osthread._interrupted && !clear_int
3376
slow_result_path = 3, // slow path: t.isInterrupted(clear_int)
3377
PATH_LIMIT
3378
};
3379
3380
// Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3381
// out of the function.
3382
insert_mem_bar(Op_MemBarCPUOrder);
3383
3384
RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
3385
PhiNode* result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
3386
3387
RegionNode* slow_region = new (C) RegionNode(1);
3388
record_for_igvn(slow_region);
3389
3390
// (a) Receiving thread must be the current thread.
3391
Node* rec_thr = argument(0);
3392
Node* tls_ptr = NULL;
3393
Node* cur_thr = generate_current_thread(tls_ptr);
3394
Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr));
3395
Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne));
3396
3397
generate_slow_guard(bol_thr, slow_region);
3398
3399
// (b) Interrupt bit on TLS must be false.
3400
Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3401
Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3402
p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3403
3404
// Set the control input on the field _interrupted read to prevent it floating up.
3405
Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3406
Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3407
Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3408
3409
IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3410
3411
// First fast path: if (!TLS._interrupted) return false;
3412
Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit));
3413
result_rgn->init_req(no_int_result_path, false_bit);
3414
result_val->init_req(no_int_result_path, intcon(0));
3415
3416
// drop through to next case
3417
set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)));
3418
3419
#ifndef TARGET_OS_FAMILY_windows
3420
// (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3421
Node* clr_arg = argument(1);
3422
Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0)));
3423
Node* bol_arg = _gvn.transform(new (C) BoolNode(cmp_arg, BoolTest::ne));
3424
IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3425
3426
// Second fast path: ... else if (!clear_int) return true;
3427
Node* false_arg = _gvn.transform(new (C) IfFalseNode(iff_arg));
3428
result_rgn->init_req(no_clear_result_path, false_arg);
3429
result_val->init_req(no_clear_result_path, intcon(1));
3430
3431
// drop through to next case
3432
set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)));
3433
#else
3434
// To return true on Windows you must read the _interrupted field
3435
// and check the the event state i.e. take the slow path.
3436
#endif // TARGET_OS_FAMILY_windows
3437
3438
// (d) Otherwise, go to the slow path.
3439
slow_region->add_req(control());
3440
set_control( _gvn.transform(slow_region));
3441
3442
if (stopped()) {
3443
// There is no slow path.
3444
result_rgn->init_req(slow_result_path, top());
3445
result_val->init_req(slow_result_path, top());
3446
} else {
3447
// non-virtual because it is a private non-static
3448
CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3449
3450
Node* slow_val = set_results_for_java_call(slow_call);
3451
// this->control() comes from set_results_for_java_call
3452
3453
Node* fast_io = slow_call->in(TypeFunc::I_O);
3454
Node* fast_mem = slow_call->in(TypeFunc::Memory);
3455
3456
// These two phis are pre-filled with copies of of the fast IO and Memory
3457
PhiNode* result_mem = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3458
PhiNode* result_io = PhiNode::make(result_rgn, fast_io, Type::ABIO);
3459
3460
result_rgn->init_req(slow_result_path, control());
3461
result_io ->init_req(slow_result_path, i_o());
3462
result_mem->init_req(slow_result_path, reset_memory());
3463
result_val->init_req(slow_result_path, slow_val);
3464
3465
set_all_memory(_gvn.transform(result_mem));
3466
set_i_o( _gvn.transform(result_io));
3467
}
3468
3469
C->set_has_split_ifs(true); // Has chance for split-if optimization
3470
set_result(result_rgn, result_val);
3471
return true;
3472
}
3473
3474
//---------------------------load_mirror_from_klass----------------------------
3475
// Given a klass oop, load its java mirror (a java.lang.Class oop).
3476
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3477
Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3478
return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3479
}
3480
3481
//-----------------------load_klass_from_mirror_common-------------------------
3482
// Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3483
// Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3484
// and branch to the given path on the region.
3485
// If never_see_null, take an uncommon trap on null, so we can optimistically
3486
// compile for the non-null case.
3487
// If the region is NULL, force never_see_null = true.
3488
Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3489
bool never_see_null,
3490
RegionNode* region,
3491
int null_path,
3492
int offset) {
3493
if (region == NULL) never_see_null = true;
3494
Node* p = basic_plus_adr(mirror, offset);
3495
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3496
Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3497
Node* null_ctl = top();
3498
kls = null_check_oop(kls, &null_ctl, never_see_null);
3499
if (region != NULL) {
3500
// Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3501
region->init_req(null_path, null_ctl);
3502
} else {
3503
assert(null_ctl == top(), "no loose ends");
3504
}
3505
return kls;
3506
}
3507
3508
//--------------------(inline_native_Class_query helpers)---------------------
3509
// Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
3510
// Fall through if (mods & mask) == bits, take the guard otherwise.
3511
Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3512
// Branch around if the given klass has the given modifier bit set.
3513
// Like generate_guard, adds a new path onto the region.
3514
Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3515
Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3516
Node* mask = intcon(modifier_mask);
3517
Node* bits = intcon(modifier_bits);
3518
Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
3519
Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits));
3520
Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
3521
return generate_fair_guard(bol, region);
3522
}
3523
Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3524
return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3525
}
3526
3527
//-------------------------inline_native_Class_query-------------------
3528
bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3529
const Type* return_type = TypeInt::BOOL;
3530
Node* prim_return_value = top(); // what happens if it's a primitive class?
3531
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3532
bool expect_prim = false; // most of these guys expect to work on refs
3533
3534
enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3535
3536
Node* mirror = argument(0);
3537
Node* obj = top();
3538
3539
switch (id) {
3540
case vmIntrinsics::_isInstance:
3541
// nothing is an instance of a primitive type
3542
prim_return_value = intcon(0);
3543
obj = argument(1);
3544
break;
3545
case vmIntrinsics::_getModifiers:
3546
prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3547
assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3548
return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3549
break;
3550
case vmIntrinsics::_isInterface:
3551
prim_return_value = intcon(0);
3552
break;
3553
case vmIntrinsics::_isArray:
3554
prim_return_value = intcon(0);
3555
expect_prim = true; // cf. ObjectStreamClass.getClassSignature
3556
break;
3557
case vmIntrinsics::_isPrimitive:
3558
prim_return_value = intcon(1);
3559
expect_prim = true; // obviously
3560
break;
3561
case vmIntrinsics::_getSuperclass:
3562
prim_return_value = null();
3563
return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3564
break;
3565
case vmIntrinsics::_getComponentType:
3566
prim_return_value = null();
3567
return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3568
break;
3569
case vmIntrinsics::_getClassAccessFlags:
3570
prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3571
return_type = TypeInt::INT; // not bool! 6297094
3572
break;
3573
default:
3574
fatal_unexpected_iid(id);
3575
break;
3576
}
3577
3578
const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3579
if (mirror_con == NULL) return false; // cannot happen?
3580
3581
#ifndef PRODUCT
3582
if (C->print_intrinsics() || C->print_inlining()) {
3583
ciType* k = mirror_con->java_mirror_type();
3584
if (k) {
3585
tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3586
k->print_name();
3587
tty->cr();
3588
}
3589
}
3590
#endif
3591
3592
// Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3593
RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3594
record_for_igvn(region);
3595
PhiNode* phi = new (C) PhiNode(region, return_type);
3596
3597
// The mirror will never be null of Reflection.getClassAccessFlags, however
3598
// it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3599
// if it is. See bug 4774291.
3600
3601
// For Reflection.getClassAccessFlags(), the null check occurs in
3602
// the wrong place; see inline_unsafe_access(), above, for a similar
3603
// situation.
3604
mirror = null_check(mirror);
3605
// If mirror or obj is dead, only null-path is taken.
3606
if (stopped()) return true;
3607
3608
if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
3609
3610
// Now load the mirror's klass metaobject, and null-check it.
3611
// Side-effects region with the control path if the klass is null.
3612
Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3613
// If kls is null, we have a primitive mirror.
3614
phi->init_req(_prim_path, prim_return_value);
3615
if (stopped()) { set_result(region, phi); return true; }
3616
bool safe_for_replace = (region->in(_prim_path) == top());
3617
3618
Node* p; // handy temp
3619
Node* null_ctl;
3620
3621
// Now that we have the non-null klass, we can perform the real query.
3622
// For constant classes, the query will constant-fold in LoadNode::Value.
3623
Node* query_value = top();
3624
switch (id) {
3625
case vmIntrinsics::_isInstance:
3626
// nothing is an instance of a primitive type
3627
query_value = gen_instanceof(obj, kls, safe_for_replace);
3628
break;
3629
3630
case vmIntrinsics::_getModifiers:
3631
p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3632
query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3633
break;
3634
3635
case vmIntrinsics::_isInterface:
3636
// (To verify this code sequence, check the asserts in JVM_IsInterface.)
3637
if (generate_interface_guard(kls, region) != NULL)
3638
// A guard was added. If the guard is taken, it was an interface.
3639
phi->add_req(intcon(1));
3640
// If we fall through, it's a plain class.
3641
query_value = intcon(0);
3642
break;
3643
3644
case vmIntrinsics::_isArray:
3645
// (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3646
if (generate_array_guard(kls, region) != NULL)
3647
// A guard was added. If the guard is taken, it was an array.
3648
phi->add_req(intcon(1));
3649
// If we fall through, it's a plain class.
3650
query_value = intcon(0);
3651
break;
3652
3653
case vmIntrinsics::_isPrimitive:
3654
query_value = intcon(0); // "normal" path produces false
3655
break;
3656
3657
case vmIntrinsics::_getSuperclass:
3658
// The rules here are somewhat unfortunate, but we can still do better
3659
// with random logic than with a JNI call.
3660
// Interfaces store null or Object as _super, but must report null.
3661
// Arrays store an intermediate super as _super, but must report Object.
3662
// Other types can report the actual _super.
3663
// (To verify this code sequence, check the asserts in JVM_IsInterface.)
3664
if (generate_interface_guard(kls, region) != NULL)
3665
// A guard was added. If the guard is taken, it was an interface.
3666
phi->add_req(null());
3667
if (generate_array_guard(kls, region) != NULL)
3668
// A guard was added. If the guard is taken, it was an array.
3669
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3670
// If we fall through, it's a plain class. Get its _super.
3671
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3672
kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3673
null_ctl = top();
3674
kls = null_check_oop(kls, &null_ctl);
3675
if (null_ctl != top()) {
3676
// If the guard is taken, Object.superClass is null (both klass and mirror).
3677
region->add_req(null_ctl);
3678
phi ->add_req(null());
3679
}
3680
if (!stopped()) {
3681
query_value = load_mirror_from_klass(kls);
3682
}
3683
break;
3684
3685
case vmIntrinsics::_getComponentType:
3686
if (generate_array_guard(kls, region) != NULL) {
3687
// Be sure to pin the oop load to the guard edge just created:
3688
Node* is_array_ctrl = region->in(region->req()-1);
3689
Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
3690
Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3691
phi->add_req(cmo);
3692
}
3693
query_value = null(); // non-array case is null
3694
break;
3695
3696
case vmIntrinsics::_getClassAccessFlags:
3697
p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3698
query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3699
break;
3700
3701
default:
3702
fatal_unexpected_iid(id);
3703
break;
3704
}
3705
3706
// Fall-through is the normal case of a query to a real class.
3707
phi->init_req(1, query_value);
3708
region->init_req(1, control());
3709
3710
C->set_has_split_ifs(true); // Has chance for split-if optimization
3711
set_result(region, phi);
3712
return true;
3713
}
3714
3715
//--------------------------inline_native_subtype_check------------------------
3716
// This intrinsic takes the JNI calls out of the heart of
3717
// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3718
bool LibraryCallKit::inline_native_subtype_check() {
3719
// Pull both arguments off the stack.
3720
Node* args[2]; // two java.lang.Class mirrors: superc, subc
3721
args[0] = argument(0);
3722
args[1] = argument(1);
3723
Node* klasses[2]; // corresponding Klasses: superk, subk
3724
klasses[0] = klasses[1] = top();
3725
3726
enum {
3727
// A full decision tree on {superc is prim, subc is prim}:
3728
_prim_0_path = 1, // {P,N} => false
3729
// {P,P} & superc!=subc => false
3730
_prim_same_path, // {P,P} & superc==subc => true
3731
_prim_1_path, // {N,P} => false
3732
_ref_subtype_path, // {N,N} & subtype check wins => true
3733
_both_ref_path, // {N,N} & subtype check loses => false
3734
PATH_LIMIT
3735
};
3736
3737
RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3738
Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
3739
record_for_igvn(region);
3740
3741
const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3742
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3743
int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3744
3745
// First null-check both mirrors and load each mirror's klass metaobject.
3746
int which_arg;
3747
for (which_arg = 0; which_arg <= 1; which_arg++) {
3748
Node* arg = args[which_arg];
3749
arg = null_check(arg);
3750
if (stopped()) break;
3751
args[which_arg] = arg;
3752
3753
Node* p = basic_plus_adr(arg, class_klass_offset);
3754
Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3755
klasses[which_arg] = _gvn.transform(kls);
3756
}
3757
3758
// Having loaded both klasses, test each for null.
3759
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3760
for (which_arg = 0; which_arg <= 1; which_arg++) {
3761
Node* kls = klasses[which_arg];
3762
Node* null_ctl = top();
3763
kls = null_check_oop(kls, &null_ctl, never_see_null);
3764
int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3765
region->init_req(prim_path, null_ctl);
3766
if (stopped()) break;
3767
klasses[which_arg] = kls;
3768
}
3769
3770
if (!stopped()) {
3771
// now we have two reference types, in klasses[0..1]
3772
Node* subk = klasses[1]; // the argument to isAssignableFrom
3773
Node* superk = klasses[0]; // the receiver
3774
region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3775
// now we have a successful reference subtype check
3776
region->set_req(_ref_subtype_path, control());
3777
}
3778
3779
// If both operands are primitive (both klasses null), then
3780
// we must return true when they are identical primitives.
3781
// It is convenient to test this after the first null klass check.
3782
set_control(region->in(_prim_0_path)); // go back to first null check
3783
if (!stopped()) {
3784
// Since superc is primitive, make a guard for the superc==subc case.
3785
Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1]));
3786
Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
3787
generate_guard(bol_eq, region, PROB_FAIR);
3788
if (region->req() == PATH_LIMIT+1) {
3789
// A guard was added. If the added guard is taken, superc==subc.
3790
region->swap_edges(PATH_LIMIT, _prim_same_path);
3791
region->del_req(PATH_LIMIT);
3792
}
3793
region->set_req(_prim_0_path, control()); // Not equal after all.
3794
}
3795
3796
// these are the only paths that produce 'true':
3797
phi->set_req(_prim_same_path, intcon(1));
3798
phi->set_req(_ref_subtype_path, intcon(1));
3799
3800
// pull together the cases:
3801
assert(region->req() == PATH_LIMIT, "sane region");
3802
for (uint i = 1; i < region->req(); i++) {
3803
Node* ctl = region->in(i);
3804
if (ctl == NULL || ctl == top()) {
3805
region->set_req(i, top());
3806
phi ->set_req(i, top());
3807
} else if (phi->in(i) == NULL) {
3808
phi->set_req(i, intcon(0)); // all other paths produce 'false'
3809
}
3810
}
3811
3812
set_control(_gvn.transform(region));
3813
set_result(_gvn.transform(phi));
3814
return true;
3815
}
3816
3817
//---------------------generate_array_guard_common------------------------
3818
Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3819
bool obj_array, bool not_array) {
3820
// If obj_array/non_array==false/false:
3821
// Branch around if the given klass is in fact an array (either obj or prim).
3822
// If obj_array/non_array==false/true:
3823
// Branch around if the given klass is not an array klass of any kind.
3824
// If obj_array/non_array==true/true:
3825
// Branch around if the kls is not an oop array (kls is int[], String, etc.)
3826
// If obj_array/non_array==true/false:
3827
// Branch around if the kls is an oop array (Object[] or subtype)
3828
//
3829
// Like generate_guard, adds a new path onto the region.
3830
jint layout_con = 0;
3831
Node* layout_val = get_layout_helper(kls, layout_con);
3832
if (layout_val == NULL) {
3833
bool query = (obj_array
3834
? Klass::layout_helper_is_objArray(layout_con)
3835
: Klass::layout_helper_is_array(layout_con));
3836
if (query == not_array) {
3837
return NULL; // never a branch
3838
} else { // always a branch
3839
Node* always_branch = control();
3840
if (region != NULL)
3841
region->add_req(always_branch);
3842
set_control(top());
3843
return always_branch;
3844
}
3845
}
3846
// Now test the correct condition.
3847
jint nval = (obj_array
3848
? (jint)(Klass::_lh_array_tag_type_value
3849
<< Klass::_lh_array_tag_shift)
3850
: Klass::_lh_neutral_value);
3851
Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval)));
3852
BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
3853
// invert the test if we are looking for a non-array
3854
if (not_array) btest = BoolTest(btest).negate();
3855
Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest));
3856
return generate_fair_guard(bol, region);
3857
}
3858
3859
3860
//-----------------------inline_native_newArray--------------------------
3861
// private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3862
bool LibraryCallKit::inline_native_newArray() {
3863
Node* mirror = argument(0);
3864
Node* count_val = argument(1);
3865
3866
mirror = null_check(mirror);
3867
// If mirror or obj is dead, only null-path is taken.
3868
if (stopped()) return true;
3869
3870
enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3871
RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3872
PhiNode* result_val = new(C) PhiNode(result_reg,
3873
TypeInstPtr::NOTNULL);
3874
PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO);
3875
PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
3876
TypePtr::BOTTOM);
3877
3878
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3879
Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3880
result_reg, _slow_path);
3881
Node* normal_ctl = control();
3882
Node* no_array_ctl = result_reg->in(_slow_path);
3883
3884
// Generate code for the slow case. We make a call to newArray().
3885
set_control(no_array_ctl);
3886
if (!stopped()) {
3887
// Either the input type is void.class, or else the
3888
// array klass has not yet been cached. Either the
3889
// ensuing call will throw an exception, or else it
3890
// will cache the array klass for next time.
3891
PreserveJVMState pjvms(this);
3892
CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3893
Node* slow_result = set_results_for_java_call(slow_call);
3894
// this->control() comes from set_results_for_java_call
3895
result_reg->set_req(_slow_path, control());
3896
result_val->set_req(_slow_path, slow_result);
3897
result_io ->set_req(_slow_path, i_o());
3898
result_mem->set_req(_slow_path, reset_memory());
3899
}
3900
3901
set_control(normal_ctl);
3902
if (!stopped()) {
3903
// Normal case: The array type has been cached in the java.lang.Class.
3904
// The following call works fine even if the array type is polymorphic.
3905
// It could be a dynamic mix of int[], boolean[], Object[], etc.
3906
Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
3907
result_reg->init_req(_normal_path, control());
3908
result_val->init_req(_normal_path, obj);
3909
result_io ->init_req(_normal_path, i_o());
3910
result_mem->init_req(_normal_path, reset_memory());
3911
}
3912
3913
// Return the combined state.
3914
set_i_o( _gvn.transform(result_io) );
3915
set_all_memory( _gvn.transform(result_mem));
3916
3917
C->set_has_split_ifs(true); // Has chance for split-if optimization
3918
set_result(result_reg, result_val);
3919
return true;
3920
}
3921
3922
//----------------------inline_native_getLength--------------------------
3923
// public static native int java.lang.reflect.Array.getLength(Object array);
3924
bool LibraryCallKit::inline_native_getLength() {
3925
if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3926
3927
Node* array = null_check(argument(0));
3928
// If array is dead, only null-path is taken.
3929
if (stopped()) return true;
3930
3931
// Deoptimize if it is a non-array.
3932
Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3933
3934
if (non_array != NULL) {
3935
PreserveJVMState pjvms(this);
3936
set_control(non_array);
3937
uncommon_trap(Deoptimization::Reason_intrinsic,
3938
Deoptimization::Action_maybe_recompile);
3939
}
3940
3941
// If control is dead, only non-array-path is taken.
3942
if (stopped()) return true;
3943
3944
// The works fine even if the array type is polymorphic.
3945
// It could be a dynamic mix of int[], boolean[], Object[], etc.
3946
Node* result = load_array_length(array);
3947
3948
C->set_has_split_ifs(true); // Has chance for split-if optimization
3949
set_result(result);
3950
return true;
3951
}
3952
3953
//------------------------inline_array_copyOf----------------------------
3954
// public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3955
// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
3956
bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3957
if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3958
3959
// Get the arguments.
3960
Node* original = argument(0);
3961
Node* start = is_copyOfRange? argument(1): intcon(0);
3962
Node* end = is_copyOfRange? argument(2): argument(1);
3963
Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3964
3965
Node* newcopy = NULL;
3966
3967
// Set the original stack and the reexecute bit for the interpreter to reexecute
3968
// the bytecode that invokes Arrays.copyOf if deoptimization happens.
3969
{ PreserveReexecuteState preexecs(this);
3970
jvms()->set_should_reexecute(true);
3971
3972
array_type_mirror = null_check(array_type_mirror);
3973
original = null_check(original);
3974
3975
// Check if a null path was taken unconditionally.
3976
if (stopped()) return true;
3977
3978
Node* orig_length = load_array_length(original);
3979
3980
Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3981
klass_node = null_check(klass_node);
3982
3983
RegionNode* bailout = new (C) RegionNode(1);
3984
record_for_igvn(bailout);
3985
3986
// Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3987
// Bail out if that is so.
3988
Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3989
if (not_objArray != NULL) {
3990
// Improve the klass node's type from the new optimistic assumption:
3991
ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3992
const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3993
Node* cast = new (C) CastPPNode(klass_node, akls);
3994
cast->init_req(0, control());
3995
klass_node = _gvn.transform(cast);
3996
}
3997
3998
// Bail out if either start or end is negative.
3999
generate_negative_guard(start, bailout, &start);
4000
generate_negative_guard(end, bailout, &end);
4001
4002
Node* length = end;
4003
if (_gvn.type(start) != TypeInt::ZERO) {
4004
length = _gvn.transform(new (C) SubINode(end, start));
4005
}
4006
4007
// Bail out if length is negative.
4008
// Without this the new_array would throw
4009
// NegativeArraySizeException but IllegalArgumentException is what
4010
// should be thrown
4011
generate_negative_guard(length, bailout, &length);
4012
4013
if (bailout->req() > 1) {
4014
PreserveJVMState pjvms(this);
4015
set_control(_gvn.transform(bailout));
4016
uncommon_trap(Deoptimization::Reason_intrinsic,
4017
Deoptimization::Action_maybe_recompile);
4018
}
4019
4020
if (!stopped()) {
4021
// How many elements will we copy from the original?
4022
// The answer is MinI(orig_length - start, length).
4023
Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
4024
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4025
4026
newcopy = new_array(klass_node, length, 0); // no argments to push
4027
4028
// Generate a direct call to the right arraycopy function(s).
4029
// We know the copy is disjoint but we might not know if the
4030
// oop stores need checking.
4031
// Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4032
// This will fail a store-check if x contains any non-nulls.
4033
bool disjoint_bases = true;
4034
// if start > orig_length then the length of the copy may be
4035
// negative.
4036
bool length_never_negative = !is_copyOfRange;
4037
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4038
original, start, newcopy, intcon(0), moved,
4039
disjoint_bases, length_never_negative);
4040
}
4041
} // original reexecute is set back here
4042
4043
C->set_has_split_ifs(true); // Has chance for split-if optimization
4044
if (!stopped()) {
4045
set_result(newcopy);
4046
}
4047
return true;
4048
}
4049
4050
4051
//----------------------generate_virtual_guard---------------------------
4052
// Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
4053
Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
4054
RegionNode* slow_region) {
4055
ciMethod* method = callee();
4056
int vtable_index = method->vtable_index();
4057
assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4058
err_msg_res("bad index %d", vtable_index));
4059
// Get the Method* out of the appropriate vtable entry.
4060
int entry_offset = (InstanceKlass::vtable_start_offset() +
4061
vtable_index*vtableEntry::size()) * wordSize +
4062
vtableEntry::method_offset_in_bytes();
4063
Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
4064
Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4065
4066
// Compare the target method with the expected method (e.g., Object.hashCode).
4067
const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
4068
4069
Node* native_call = makecon(native_call_addr);
4070
Node* chk_native = _gvn.transform(new(C) CmpPNode(target_call, native_call));
4071
Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne));
4072
4073
return generate_slow_guard(test_native, slow_region);
4074
}
4075
4076
//-----------------------generate_method_call----------------------------
4077
// Use generate_method_call to make a slow-call to the real
4078
// method if the fast path fails. An alternative would be to
4079
// use a stub like OptoRuntime::slow_arraycopy_Java.
4080
// This only works for expanding the current library call,
4081
// not another intrinsic. (E.g., don't use this for making an
4082
// arraycopy call inside of the copyOf intrinsic.)
4083
CallJavaNode*
4084
LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
4085
// When compiling the intrinsic method itself, do not use this technique.
4086
guarantee(callee() != C->method(), "cannot make slow-call to self");
4087
4088
ciMethod* method = callee();
4089
// ensure the JVMS we have will be correct for this call
4090
guarantee(method_id == method->intrinsic_id(), "must match");
4091
4092
const TypeFunc* tf = TypeFunc::make(method);
4093
CallJavaNode* slow_call;
4094
if (is_static) {
4095
assert(!is_virtual, "");
4096
slow_call = new(C) CallStaticJavaNode(C, tf,
4097
SharedRuntime::get_resolve_static_call_stub(),
4098
method, bci());
4099
} else if (is_virtual) {
4100
null_check_receiver();
4101
int vtable_index = Method::invalid_vtable_index;
4102
if (UseInlineCaches) {
4103
// Suppress the vtable call
4104
} else {
4105
// hashCode and clone are not a miranda methods,
4106
// so the vtable index is fixed.
4107
// No need to use the linkResolver to get it.
4108
vtable_index = method->vtable_index();
4109
assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4110
err_msg_res("bad index %d", vtable_index));
4111
}
4112
slow_call = new(C) CallDynamicJavaNode(tf,
4113
SharedRuntime::get_resolve_virtual_call_stub(),
4114
method, vtable_index, bci());
4115
} else { // neither virtual nor static: opt_virtual
4116
null_check_receiver();
4117
slow_call = new(C) CallStaticJavaNode(C, tf,
4118
SharedRuntime::get_resolve_opt_virtual_call_stub(),
4119
method, bci());
4120
slow_call->set_optimized_virtual(true);
4121
}
4122
set_arguments_for_java_call(slow_call);
4123
set_edges_for_java_call(slow_call);
4124
return slow_call;
4125
}
4126
4127
4128
/**
4129
* Build special case code for calls to hashCode on an object. This call may
4130
* be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4131
* slightly different code.
4132
*/
4133
bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4134
assert(is_static == callee()->is_static(), "correct intrinsic selection");
4135
assert(!(is_virtual && is_static), "either virtual, special, or static");
4136
4137
enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4138
4139
RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
4140
PhiNode* result_val = new(C) PhiNode(result_reg, TypeInt::INT);
4141
PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO);
4142
PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4143
Node* obj = NULL;
4144
if (!is_static) {
4145
// Check for hashing null object
4146
obj = null_check_receiver();
4147
if (stopped()) return true; // unconditionally null
4148
result_reg->init_req(_null_path, top());
4149
result_val->init_req(_null_path, top());
4150
} else {
4151
// Do a null check, and return zero if null.
4152
// System.identityHashCode(null) == 0
4153
obj = argument(0);
4154
Node* null_ctl = top();
4155
obj = null_check_oop(obj, &null_ctl);
4156
result_reg->init_req(_null_path, null_ctl);
4157
result_val->init_req(_null_path, _gvn.intcon(0));
4158
}
4159
4160
// Unconditionally null? Then return right away.
4161
if (stopped()) {
4162
set_control( result_reg->in(_null_path));
4163
if (!stopped())
4164
set_result(result_val->in(_null_path));
4165
return true;
4166
}
4167
4168
// We only go to the fast case code if we pass a number of guards. The
4169
// paths which do not pass are accumulated in the slow_region.
4170
RegionNode* slow_region = new (C) RegionNode(1);
4171
record_for_igvn(slow_region);
4172
4173
// If this is a virtual call, we generate a funny guard. We pull out
4174
// the vtable entry corresponding to hashCode() from the target object.
4175
// If the target method which we are calling happens to be the native
4176
// Object hashCode() method, we pass the guard. We do not need this
4177
// guard for non-virtual calls -- the caller is known to be the native
4178
// Object hashCode().
4179
if (is_virtual) {
4180
// After null check, get the object's klass.
4181
Node* obj_klass = load_object_klass(obj);
4182
generate_virtual_guard(obj_klass, slow_region);
4183
}
4184
4185
// Get the header out of the object, use LoadMarkNode when available
4186
Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4187
// The control of the load must be NULL. Otherwise, the load can move before
4188
// the null check after castPP removal.
4189
Node* no_ctrl = NULL;
4190
Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4191
4192
// Test the header to see if it is unlocked.
4193
Node* lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4194
Node* lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
4195
Node* unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
4196
Node* chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val));
4197
Node* test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne));
4198
4199
generate_slow_guard(test_unlocked, slow_region);
4200
4201
// Get the hash value and check to see that it has been properly assigned.
4202
// We depend on hash_mask being at most 32 bits and avoid the use of
4203
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4204
// vm: see markOop.hpp.
4205
Node* hash_mask = _gvn.intcon(markOopDesc::hash_mask);
4206
Node* hash_shift = _gvn.intcon(markOopDesc::hash_shift);
4207
Node* hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift));
4208
// This hack lets the hash bits live anywhere in the mark object now, as long
4209
// as the shift drops the relevant bits into the low 32 bits. Note that
4210
// Java spec says that HashCode is an int so there's no point in capturing
4211
// an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4212
hshifted_header = ConvX2I(hshifted_header);
4213
Node* hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask));
4214
4215
Node* no_hash_val = _gvn.intcon(markOopDesc::no_hash);
4216
Node* chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val));
4217
Node* test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq));
4218
4219
generate_slow_guard(test_assigned, slow_region);
4220
4221
Node* init_mem = reset_memory();
4222
// fill in the rest of the null path:
4223
result_io ->init_req(_null_path, i_o());
4224
result_mem->init_req(_null_path, init_mem);
4225
4226
result_val->init_req(_fast_path, hash_val);
4227
result_reg->init_req(_fast_path, control());
4228
result_io ->init_req(_fast_path, i_o());
4229
result_mem->init_req(_fast_path, init_mem);
4230
4231
// Generate code for the slow case. We make a call to hashCode().
4232
set_control(_gvn.transform(slow_region));
4233
if (!stopped()) {
4234
// No need for PreserveJVMState, because we're using up the present state.
4235
set_all_memory(init_mem);
4236
vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4237
CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
4238
Node* slow_result = set_results_for_java_call(slow_call);
4239
// this->control() comes from set_results_for_java_call
4240
result_reg->init_req(_slow_path, control());
4241
result_val->init_req(_slow_path, slow_result);
4242
result_io ->set_req(_slow_path, i_o());
4243
result_mem ->set_req(_slow_path, reset_memory());
4244
}
4245
4246
// Return the combined state.
4247
set_i_o( _gvn.transform(result_io) );
4248
set_all_memory( _gvn.transform(result_mem));
4249
4250
set_result(result_reg, result_val);
4251
return true;
4252
}
4253
4254
//---------------------------inline_native_getClass----------------------------
4255
// public final native Class<?> java.lang.Object.getClass();
4256
//
4257
// Build special case code for calls to getClass on an object.
4258
bool LibraryCallKit::inline_native_getClass() {
4259
Node* obj = null_check_receiver();
4260
if (stopped()) return true;
4261
set_result(load_mirror_from_klass(load_object_klass(obj)));
4262
return true;
4263
}
4264
4265
//-----------------inline_native_Reflection_getCallerClass---------------------
4266
// public static native Class<?> sun.reflect.Reflection.getCallerClass();
4267
//
4268
// In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4269
//
4270
// NOTE: This code must perform the same logic as JVM_GetCallerClass
4271
// in that it must skip particular security frames and checks for
4272
// caller sensitive methods.
4273
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4274
#ifndef PRODUCT
4275
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4276
tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4277
}
4278
#endif
4279
4280
if (!jvms()->has_method()) {
4281
#ifndef PRODUCT
4282
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4283
tty->print_cr(" Bailing out because intrinsic was inlined at top level");
4284
}
4285
#endif
4286
return false;
4287
}
4288
4289
// Walk back up the JVM state to find the caller at the required
4290
// depth.
4291
JVMState* caller_jvms = jvms();
4292
4293
// Cf. JVM_GetCallerClass
4294
// NOTE: Start the loop at depth 1 because the current JVM state does
4295
// not include the Reflection.getCallerClass() frame.
4296
for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
4297
ciMethod* m = caller_jvms->method();
4298
switch (n) {
4299
case 0:
4300
fatal("current JVM state does not include the Reflection.getCallerClass frame");
4301
break;
4302
case 1:
4303
// Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4304
if (!m->caller_sensitive()) {
4305
#ifndef PRODUCT
4306
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4307
tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
4308
}
4309
#endif
4310
return false; // bail-out; let JVM_GetCallerClass do the work
4311
}
4312
break;
4313
default:
4314
if (!m->is_ignored_by_security_stack_walk()) {
4315
// We have reached the desired frame; return the holder class.
4316
// Acquire method holder as java.lang.Class and push as constant.
4317
ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4318
ciInstance* caller_mirror = caller_klass->java_mirror();
4319
set_result(makecon(TypeInstPtr::make(caller_mirror)));
4320
4321
#ifndef PRODUCT
4322
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4323
tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4324
tty->print_cr(" JVM state at this point:");
4325
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4326
ciMethod* m = jvms()->of_depth(i)->method();
4327
tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4328
}
4329
}
4330
#endif
4331
return true;
4332
}
4333
break;
4334
}
4335
}
4336
4337
#ifndef PRODUCT
4338
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4339
tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4340
tty->print_cr(" JVM state at this point:");
4341
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4342
ciMethod* m = jvms()->of_depth(i)->method();
4343
tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4344
}
4345
}
4346
#endif
4347
4348
return false; // bail-out; let JVM_GetCallerClass do the work
4349
}
4350
4351
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4352
Node* arg = argument(0);
4353
Node* result = NULL;
4354
4355
switch (id) {
4356
case vmIntrinsics::_floatToRawIntBits: result = new (C) MoveF2INode(arg); break;
4357
case vmIntrinsics::_intBitsToFloat: result = new (C) MoveI2FNode(arg); break;
4358
case vmIntrinsics::_doubleToRawLongBits: result = new (C) MoveD2LNode(arg); break;
4359
case vmIntrinsics::_longBitsToDouble: result = new (C) MoveL2DNode(arg); break;
4360
4361
case vmIntrinsics::_doubleToLongBits: {
4362
// two paths (plus control) merge in a wood
4363
RegionNode *r = new (C) RegionNode(3);
4364
Node *phi = new (C) PhiNode(r, TypeLong::LONG);
4365
4366
Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
4367
// Build the boolean node
4368
Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
4369
4370
// Branch either way.
4371
// NaN case is less traveled, which makes all the difference.
4372
IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4373
Node *opt_isnan = _gvn.transform(ifisnan);
4374
assert( opt_isnan->is_If(), "Expect an IfNode");
4375
IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4376
Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
4377
4378
set_control(iftrue);
4379
4380
static const jlong nan_bits = CONST64(0x7ff8000000000000);
4381
Node *slow_result = longcon(nan_bits); // return NaN
4382
phi->init_req(1, _gvn.transform( slow_result ));
4383
r->init_req(1, iftrue);
4384
4385
// Else fall through
4386
Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
4387
set_control(iffalse);
4388
4389
phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg)));
4390
r->init_req(2, iffalse);
4391
4392
// Post merge
4393
set_control(_gvn.transform(r));
4394
record_for_igvn(r);
4395
4396
C->set_has_split_ifs(true); // Has chance for split-if optimization
4397
result = phi;
4398
assert(result->bottom_type()->isa_long(), "must be");
4399
break;
4400
}
4401
4402
case vmIntrinsics::_floatToIntBits: {
4403
// two paths (plus control) merge in a wood
4404
RegionNode *r = new (C) RegionNode(3);
4405
Node *phi = new (C) PhiNode(r, TypeInt::INT);
4406
4407
Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg));
4408
// Build the boolean node
4409
Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
4410
4411
// Branch either way.
4412
// NaN case is less traveled, which makes all the difference.
4413
IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4414
Node *opt_isnan = _gvn.transform(ifisnan);
4415
assert( opt_isnan->is_If(), "Expect an IfNode");
4416
IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4417
Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
4418
4419
set_control(iftrue);
4420
4421
static const jint nan_bits = 0x7fc00000;
4422
Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4423
phi->init_req(1, _gvn.transform( slow_result ));
4424
r->init_req(1, iftrue);
4425
4426
// Else fall through
4427
Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
4428
set_control(iffalse);
4429
4430
phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg)));
4431
r->init_req(2, iffalse);
4432
4433
// Post merge
4434
set_control(_gvn.transform(r));
4435
record_for_igvn(r);
4436
4437
C->set_has_split_ifs(true); // Has chance for split-if optimization
4438
result = phi;
4439
assert(result->bottom_type()->isa_int(), "must be");
4440
break;
4441
}
4442
4443
default:
4444
fatal_unexpected_iid(id);
4445
break;
4446
}
4447
set_result(_gvn.transform(result));
4448
return true;
4449
}
4450
4451
#ifdef _LP64
4452
#define XTOP ,top() /*additional argument*/
4453
#else //_LP64
4454
#define XTOP /*no additional argument*/
4455
#endif //_LP64
4456
4457
//----------------------inline_unsafe_copyMemory-------------------------
4458
// public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4459
bool LibraryCallKit::inline_unsafe_copyMemory() {
4460
if (callee()->is_static()) return false; // caller must have the capability!
4461
null_check_receiver(); // null-check receiver
4462
if (stopped()) return true;
4463
4464
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4465
4466
Node* src_ptr = argument(1); // type: oop
4467
Node* src_off = ConvL2X(argument(2)); // type: long
4468
Node* dst_ptr = argument(4); // type: oop
4469
Node* dst_off = ConvL2X(argument(5)); // type: long
4470
Node* size = ConvL2X(argument(7)); // type: long
4471
4472
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4473
"fieldOffset must be byte-scaled");
4474
4475
Node* src = make_unsafe_address(src_ptr, src_off);
4476
Node* dst = make_unsafe_address(dst_ptr, dst_off);
4477
4478
// Conservatively insert a memory barrier on all memory slices.
4479
// Do not let writes of the copy source or destination float below the copy.
4480
insert_mem_bar(Op_MemBarCPUOrder);
4481
4482
// Call it. Note that the length argument is not scaled.
4483
make_runtime_call(RC_LEAF|RC_NO_FP,
4484
OptoRuntime::fast_arraycopy_Type(),
4485
StubRoutines::unsafe_arraycopy(),
4486
"unsafe_arraycopy",
4487
TypeRawPtr::BOTTOM,
4488
src, dst, size XTOP);
4489
4490
// Do not let reads of the copy destination float above the copy.
4491
insert_mem_bar(Op_MemBarCPUOrder);
4492
4493
return true;
4494
}
4495
4496
//------------------------clone_coping-----------------------------------
4497
// Helper function for inline_native_clone.
4498
void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4499
assert(obj_size != NULL, "");
4500
Node* raw_obj = alloc_obj->in(1);
4501
assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4502
4503
AllocateNode* alloc = NULL;
4504
if (ReduceBulkZeroing) {
4505
// We will be completely responsible for initializing this object -
4506
// mark Initialize node as complete.
4507
alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4508
// The object was just allocated - there should be no any stores!
4509
guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4510
// Mark as complete_with_arraycopy so that on AllocateNode
4511
// expansion, we know this AllocateNode is initialized by an array
4512
// copy and a StoreStore barrier exists after the array copy.
4513
alloc->initialization()->set_complete_with_arraycopy();
4514
}
4515
4516
// Copy the fastest available way.
4517
// TODO: generate fields copies for small objects instead.
4518
Node* src = obj;
4519
Node* dest = alloc_obj;
4520
Node* size = _gvn.transform(obj_size);
4521
4522
// Exclude the header but include array length to copy by 8 bytes words.
4523
// Can't use base_offset_in_bytes(bt) since basic type is unknown.
4524
int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4525
instanceOopDesc::base_offset_in_bytes();
4526
// base_off:
4527
// 8 - 32-bit VM
4528
// 12 - 64-bit VM, compressed klass
4529
// 16 - 64-bit VM, normal klass
4530
if (base_off % BytesPerLong != 0) {
4531
assert(UseCompressedClassPointers, "");
4532
if (is_array) {
4533
// Exclude length to copy by 8 bytes words.
4534
base_off += sizeof(int);
4535
} else {
4536
// Include klass to copy by 8 bytes words.
4537
base_off = instanceOopDesc::klass_offset_in_bytes();
4538
}
4539
assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4540
}
4541
src = basic_plus_adr(src, base_off);
4542
dest = basic_plus_adr(dest, base_off);
4543
4544
// Compute the length also, if needed:
4545
Node* countx = size;
4546
countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4547
countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4548
4549
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4550
bool disjoint_bases = true;
4551
generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4552
src, NULL, dest, NULL, countx,
4553
/*dest_uninitialized*/true);
4554
4555
// If necessary, emit some card marks afterwards. (Non-arrays only.)
4556
if (card_mark) {
4557
assert(!is_array, "");
4558
// Put in store barrier for any and all oops we are sticking
4559
// into this object. (We could avoid this if we could prove
4560
// that the object type contains no oop fields at all.)
4561
Node* no_particular_value = NULL;
4562
Node* no_particular_field = NULL;
4563
int raw_adr_idx = Compile::AliasIdxRaw;
4564
post_barrier(control(),
4565
memory(raw_adr_type),
4566
alloc_obj,
4567
no_particular_field,
4568
raw_adr_idx,
4569
no_particular_value,
4570
T_OBJECT,
4571
false);
4572
}
4573
4574
// Do not let reads from the cloned object float above the arraycopy.
4575
if (alloc != NULL) {
4576
// Do not let stores that initialize this object be reordered with
4577
// a subsequent store that would make this object accessible by
4578
// other threads.
4579
// Record what AllocateNode this StoreStore protects so that
4580
// escape analysis can go from the MemBarStoreStoreNode to the
4581
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
4582
// based on the escape status of the AllocateNode.
4583
insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
4584
} else {
4585
insert_mem_bar(Op_MemBarCPUOrder);
4586
}
4587
}
4588
4589
//------------------------inline_native_clone----------------------------
4590
// protected native Object java.lang.Object.clone();
4591
//
4592
// Here are the simple edge cases:
4593
// null receiver => normal trap
4594
// virtual and clone was overridden => slow path to out-of-line clone
4595
// not cloneable or finalizer => slow path to out-of-line Object.clone
4596
//
4597
// The general case has two steps, allocation and copying.
4598
// Allocation has two cases, and uses GraphKit::new_instance or new_array.
4599
//
4600
// Copying also has two cases, oop arrays and everything else.
4601
// Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4602
// Everything else uses the tight inline loop supplied by CopyArrayNode.
4603
//
4604
// These steps fold up nicely if and when the cloned object's klass
4605
// can be sharply typed as an object array, a type array, or an instance.
4606
//
4607
bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4608
PhiNode* result_val;
4609
4610
// Set the reexecute bit for the interpreter to reexecute
4611
// the bytecode that invokes Object.clone if deoptimization happens.
4612
{ PreserveReexecuteState preexecs(this);
4613
jvms()->set_should_reexecute(true);
4614
4615
Node* obj = null_check_receiver();
4616
if (stopped()) return true;
4617
4618
Node* obj_klass = load_object_klass(obj);
4619
const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4620
const TypeOopPtr* toop = ((tklass != NULL)
4621
? tklass->as_instance_type()
4622
: TypeInstPtr::NOTNULL);
4623
4624
// Conservatively insert a memory barrier on all memory slices.
4625
// Do not let writes into the original float below the clone.
4626
insert_mem_bar(Op_MemBarCPUOrder);
4627
4628
// paths into result_reg:
4629
enum {
4630
_slow_path = 1, // out-of-line call to clone method (virtual or not)
4631
_objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4632
_array_path, // plain array allocation, plus arrayof_long_arraycopy
4633
_instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4634
PATH_LIMIT
4635
};
4636
RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
4637
result_val = new(C) PhiNode(result_reg,
4638
TypeInstPtr::NOTNULL);
4639
PhiNode* result_i_o = new(C) PhiNode(result_reg, Type::ABIO);
4640
PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
4641
TypePtr::BOTTOM);
4642
record_for_igvn(result_reg);
4643
4644
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4645
int raw_adr_idx = Compile::AliasIdxRaw;
4646
4647
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4648
if (array_ctl != NULL) {
4649
// It's an array.
4650
PreserveJVMState pjvms(this);
4651
set_control(array_ctl);
4652
Node* obj_length = load_array_length(obj);
4653
Node* obj_size = NULL;
4654
Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
4655
4656
if (!use_ReduceInitialCardMarks()) {
4657
// If it is an oop array, it requires very special treatment,
4658
// because card marking is required on each card of the array.
4659
Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4660
if (is_obja != NULL) {
4661
PreserveJVMState pjvms2(this);
4662
set_control(is_obja);
4663
// Generate a direct call to the right arraycopy function(s).
4664
bool disjoint_bases = true;
4665
bool length_never_negative = true;
4666
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4667
obj, intcon(0), alloc_obj, intcon(0),
4668
obj_length,
4669
disjoint_bases, length_never_negative);
4670
result_reg->init_req(_objArray_path, control());
4671
result_val->init_req(_objArray_path, alloc_obj);
4672
result_i_o ->set_req(_objArray_path, i_o());
4673
result_mem ->set_req(_objArray_path, reset_memory());
4674
}
4675
}
4676
// Otherwise, there are no card marks to worry about.
4677
// (We can dispense with card marks if we know the allocation
4678
// comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4679
// causes the non-eden paths to take compensating steps to
4680
// simulate a fresh allocation, so that no further
4681
// card marks are required in compiled code to initialize
4682
// the object.)
4683
4684
if (!stopped()) {
4685
copy_to_clone(obj, alloc_obj, obj_size, true, false);
4686
4687
// Present the results of the copy.
4688
result_reg->init_req(_array_path, control());
4689
result_val->init_req(_array_path, alloc_obj);
4690
result_i_o ->set_req(_array_path, i_o());
4691
result_mem ->set_req(_array_path, reset_memory());
4692
}
4693
}
4694
4695
// We only go to the instance fast case code if we pass a number of guards.
4696
// The paths which do not pass are accumulated in the slow_region.
4697
RegionNode* slow_region = new (C) RegionNode(1);
4698
record_for_igvn(slow_region);
4699
if (!stopped()) {
4700
// It's an instance (we did array above). Make the slow-path tests.
4701
// If this is a virtual call, we generate a funny guard. We grab
4702
// the vtable entry corresponding to clone() from the target object.
4703
// If the target method which we are calling happens to be the
4704
// Object clone() method, we pass the guard. We do not need this
4705
// guard for non-virtual calls; the caller is known to be the native
4706
// Object clone().
4707
if (is_virtual) {
4708
generate_virtual_guard(obj_klass, slow_region);
4709
}
4710
4711
// The object must be cloneable and must not have a finalizer.
4712
// Both of these conditions may be checked in a single test.
4713
// We could optimize the cloneable test further, but we don't care.
4714
generate_access_flags_guard(obj_klass,
4715
// Test both conditions:
4716
JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
4717
// Must be cloneable but not finalizer:
4718
JVM_ACC_IS_CLONEABLE,
4719
slow_region);
4720
}
4721
4722
if (!stopped()) {
4723
// It's an instance, and it passed the slow-path tests.
4724
PreserveJVMState pjvms(this);
4725
Node* obj_size = NULL;
4726
// Need to deoptimize on exception from allocation since Object.clone intrinsic
4727
// is reexecuted if deoptimization occurs and there could be problems when merging
4728
// exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4729
Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
4730
4731
copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4732
4733
// Present the results of the slow call.
4734
result_reg->init_req(_instance_path, control());
4735
result_val->init_req(_instance_path, alloc_obj);
4736
result_i_o ->set_req(_instance_path, i_o());
4737
result_mem ->set_req(_instance_path, reset_memory());
4738
}
4739
4740
// Generate code for the slow case. We make a call to clone().
4741
set_control(_gvn.transform(slow_region));
4742
if (!stopped()) {
4743
PreserveJVMState pjvms(this);
4744
CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4745
Node* slow_result = set_results_for_java_call(slow_call);
4746
// this->control() comes from set_results_for_java_call
4747
result_reg->init_req(_slow_path, control());
4748
result_val->init_req(_slow_path, slow_result);
4749
result_i_o ->set_req(_slow_path, i_o());
4750
result_mem ->set_req(_slow_path, reset_memory());
4751
}
4752
4753
// Return the combined state.
4754
set_control( _gvn.transform(result_reg));
4755
set_i_o( _gvn.transform(result_i_o));
4756
set_all_memory( _gvn.transform(result_mem));
4757
} // original reexecute is set back here
4758
4759
set_result(_gvn.transform(result_val));
4760
return true;
4761
}
4762
4763
//------------------------------basictype2arraycopy----------------------------
4764
address LibraryCallKit::basictype2arraycopy(BasicType t,
4765
Node* src_offset,
4766
Node* dest_offset,
4767
bool disjoint_bases,
4768
const char* &name,
4769
bool dest_uninitialized) {
4770
const TypeInt* src_offset_inttype = gvn().find_int_type(src_offset);;
4771
const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
4772
4773
bool aligned = false;
4774
bool disjoint = disjoint_bases;
4775
4776
// if the offsets are the same, we can treat the memory regions as
4777
// disjoint, because either the memory regions are in different arrays,
4778
// or they are identical (which we can treat as disjoint.) We can also
4779
// treat a copy with a destination index less that the source index
4780
// as disjoint since a low->high copy will work correctly in this case.
4781
if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
4782
dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
4783
// both indices are constants
4784
int s_offs = src_offset_inttype->get_con();
4785
int d_offs = dest_offset_inttype->get_con();
4786
int element_size = type2aelembytes(t);
4787
aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
4788
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
4789
if (s_offs >= d_offs) disjoint = true;
4790
} else if (src_offset == dest_offset && src_offset != NULL) {
4791
// This can occur if the offsets are identical non-constants.
4792
disjoint = true;
4793
}
4794
4795
return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
4796
}
4797
4798
4799
//------------------------------inline_arraycopy-----------------------
4800
// public static native void java.lang.System.arraycopy(Object src, int srcPos,
4801
// Object dest, int destPos,
4802
// int length);
4803
bool LibraryCallKit::inline_arraycopy() {
4804
// Get the arguments.
4805
Node* src = argument(0); // type: oop
4806
Node* src_offset = argument(1); // type: int
4807
Node* dest = argument(2); // type: oop
4808
Node* dest_offset = argument(3); // type: int
4809
Node* length = argument(4); // type: int
4810
4811
// Compile time checks. If any of these checks cannot be verified at compile time,
4812
// we do not make a fast path for this call. Instead, we let the call remain as it
4813
// is. The checks we choose to mandate at compile time are:
4814
//
4815
// (1) src and dest are arrays.
4816
const Type* src_type = src->Value(&_gvn);
4817
const Type* dest_type = dest->Value(&_gvn);
4818
const TypeAryPtr* top_src = src_type->isa_aryptr();
4819
const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4820
4821
// Do we have the type of src?
4822
bool has_src = (top_src != NULL && top_src->klass() != NULL);
4823
// Do we have the type of dest?
4824
bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4825
// Is the type for src from speculation?
4826
bool src_spec = false;
4827
// Is the type for dest from speculation?
4828
bool dest_spec = false;
4829
4830
if (!has_src || !has_dest) {
4831
// We don't have sufficient type information, let's see if
4832
// speculative types can help. We need to have types for both src
4833
// and dest so that it pays off.
4834
4835
// Do we already have or could we have type information for src
4836
bool could_have_src = has_src;
4837
// Do we already have or could we have type information for dest
4838
bool could_have_dest = has_dest;
4839
4840
ciKlass* src_k = NULL;
4841
if (!has_src) {
4842
src_k = src_type->speculative_type();
4843
if (src_k != NULL && src_k->is_array_klass()) {
4844
could_have_src = true;
4845
}
4846
}
4847
4848
ciKlass* dest_k = NULL;
4849
if (!has_dest) {
4850
dest_k = dest_type->speculative_type();
4851
if (dest_k != NULL && dest_k->is_array_klass()) {
4852
could_have_dest = true;
4853
}
4854
}
4855
4856
if (could_have_src && could_have_dest) {
4857
// This is going to pay off so emit the required guards
4858
if (!has_src) {
4859
src = maybe_cast_profiled_obj(src, src_k);
4860
src_type = _gvn.type(src);
4861
top_src = src_type->isa_aryptr();
4862
has_src = (top_src != NULL && top_src->klass() != NULL);
4863
src_spec = true;
4864
}
4865
if (!has_dest) {
4866
dest = maybe_cast_profiled_obj(dest, dest_k);
4867
dest_type = _gvn.type(dest);
4868
top_dest = dest_type->isa_aryptr();
4869
has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4870
dest_spec = true;
4871
}
4872
}
4873
}
4874
4875
if (!has_src || !has_dest) {
4876
// Conservatively insert a memory barrier on all memory slices.
4877
// Do not let writes into the source float below the arraycopy.
4878
insert_mem_bar(Op_MemBarCPUOrder);
4879
4880
// Call StubRoutines::generic_arraycopy stub.
4881
generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4882
src, src_offset, dest, dest_offset, length);
4883
4884
// Do not let reads from the destination float above the arraycopy.
4885
// Since we cannot type the arrays, we don't know which slices
4886
// might be affected. We could restrict this barrier only to those
4887
// memory slices which pertain to array elements--but don't bother.
4888
if (!InsertMemBarAfterArraycopy)
4889
// (If InsertMemBarAfterArraycopy, there is already one in place.)
4890
insert_mem_bar(Op_MemBarCPUOrder);
4891
return true;
4892
}
4893
4894
// (2) src and dest arrays must have elements of the same BasicType
4895
// Figure out the size and type of the elements we will be copying.
4896
BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
4897
BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4898
if (src_elem == T_ARRAY) src_elem = T_OBJECT;
4899
if (dest_elem == T_ARRAY) dest_elem = T_OBJECT;
4900
4901
if (src_elem != dest_elem || dest_elem == T_VOID) {
4902
// The component types are not the same or are not recognized. Punt.
4903
// (But, avoid the native method wrapper to JVM_ArrayCopy.)
4904
generate_slow_arraycopy(TypePtr::BOTTOM,
4905
src, src_offset, dest, dest_offset, length,
4906
/*dest_uninitialized*/false);
4907
return true;
4908
}
4909
4910
if (src_elem == T_OBJECT) {
4911
// If both arrays are object arrays then having the exact types
4912
// for both will remove the need for a subtype check at runtime
4913
// before the call and may make it possible to pick a faster copy
4914
// routine (without a subtype check on every element)
4915
// Do we have the exact type of src?
4916
bool could_have_src = src_spec;
4917
// Do we have the exact type of dest?
4918
bool could_have_dest = dest_spec;
4919
ciKlass* src_k = top_src->klass();
4920
ciKlass* dest_k = top_dest->klass();
4921
if (!src_spec) {
4922
src_k = src_type->speculative_type();
4923
if (src_k != NULL && src_k->is_array_klass()) {
4924
could_have_src = true;
4925
}
4926
}
4927
if (!dest_spec) {
4928
dest_k = dest_type->speculative_type();
4929
if (dest_k != NULL && dest_k->is_array_klass()) {
4930
could_have_dest = true;
4931
}
4932
}
4933
if (could_have_src && could_have_dest) {
4934
// If we can have both exact types, emit the missing guards
4935
if (could_have_src && !src_spec) {
4936
src = maybe_cast_profiled_obj(src, src_k);
4937
}
4938
if (could_have_dest && !dest_spec) {
4939
dest = maybe_cast_profiled_obj(dest, dest_k);
4940
}
4941
}
4942
}
4943
4944
//---------------------------------------------------------------------------
4945
// We will make a fast path for this call to arraycopy.
4946
4947
// We have the following tests left to perform:
4948
//
4949
// (3) src and dest must not be null.
4950
// (4) src_offset must not be negative.
4951
// (5) dest_offset must not be negative.
4952
// (6) length must not be negative.
4953
// (7) src_offset + length must not exceed length of src.
4954
// (8) dest_offset + length must not exceed length of dest.
4955
// (9) each element of an oop array must be assignable
4956
4957
RegionNode* slow_region = new (C) RegionNode(1);
4958
record_for_igvn(slow_region);
4959
4960
// (3) operands must not be null
4961
// We currently perform our null checks with the null_check routine.
4962
// This means that the null exceptions will be reported in the caller
4963
// rather than (correctly) reported inside of the native arraycopy call.
4964
// This should be corrected, given time. We do our null check with the
4965
// stack pointer restored.
4966
src = null_check(src, T_ARRAY);
4967
dest = null_check(dest, T_ARRAY);
4968
4969
// (4) src_offset must not be negative.
4970
generate_negative_guard(src_offset, slow_region);
4971
4972
// (5) dest_offset must not be negative.
4973
generate_negative_guard(dest_offset, slow_region);
4974
4975
// (6) length must not be negative (moved to generate_arraycopy()).
4976
// generate_negative_guard(length, slow_region);
4977
4978
// (7) src_offset + length must not exceed length of src.
4979
generate_limit_guard(src_offset, length,
4980
load_array_length(src),
4981
slow_region);
4982
4983
// (8) dest_offset + length must not exceed length of dest.
4984
generate_limit_guard(dest_offset, length,
4985
load_array_length(dest),
4986
slow_region);
4987
4988
// (9) each element of an oop array must be assignable
4989
// The generate_arraycopy subroutine checks this.
4990
4991
// This is where the memory effects are placed:
4992
const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
4993
generate_arraycopy(adr_type, dest_elem,
4994
src, src_offset, dest, dest_offset, length,
4995
false, false, slow_region);
4996
4997
return true;
4998
}
4999
5000
//-----------------------------generate_arraycopy----------------------
5001
// Generate an optimized call to arraycopy.
5002
// Caller must guard against non-arrays.
5003
// Caller must determine a common array basic-type for both arrays.
5004
// Caller must validate offsets against array bounds.
5005
// The slow_region has already collected guard failure paths
5006
// (such as out of bounds length or non-conformable array types).
5007
// The generated code has this shape, in general:
5008
//
5009
// if (length == 0) return // via zero_path
5010
// slowval = -1
5011
// if (types unknown) {
5012
// slowval = call generic copy loop
5013
// if (slowval == 0) return // via checked_path
5014
// } else if (indexes in bounds) {
5015
// if ((is object array) && !(array type check)) {
5016
// slowval = call checked copy loop
5017
// if (slowval == 0) return // via checked_path
5018
// } else {
5019
// call bulk copy loop
5020
// return // via fast_path
5021
// }
5022
// }
5023
// // adjust params for remaining work:
5024
// if (slowval != -1) {
5025
// n = -1^slowval; src_offset += n; dest_offset += n; length -= n
5026
// }
5027
// slow_region:
5028
// call slow arraycopy(src, src_offset, dest, dest_offset, length)
5029
// return // via slow_call_path
5030
//
5031
// This routine is used from several intrinsics: System.arraycopy,
5032
// Object.clone (the array subcase), and Arrays.copyOf[Range].
5033
//
5034
void
5035
LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
5036
BasicType basic_elem_type,
5037
Node* src, Node* src_offset,
5038
Node* dest, Node* dest_offset,
5039
Node* copy_length,
5040
bool disjoint_bases,
5041
bool length_never_negative,
5042
RegionNode* slow_region) {
5043
5044
if (slow_region == NULL) {
5045
slow_region = new(C) RegionNode(1);
5046
record_for_igvn(slow_region);
5047
}
5048
5049
Node* original_dest = dest;
5050
AllocateArrayNode* alloc = NULL; // used for zeroing, if needed
5051
bool dest_uninitialized = false;
5052
5053
// See if this is the initialization of a newly-allocated array.
5054
// If so, we will take responsibility here for initializing it to zero.
5055
// (Note: Because tightly_coupled_allocation performs checks on the
5056
// out-edges of the dest, we need to avoid making derived pointers
5057
// from it until we have checked its uses.)
5058
if (ReduceBulkZeroing
5059
&& !ZeroTLAB // pointless if already zeroed
5060
&& basic_elem_type != T_CONFLICT // avoid corner case
5061
&& !src->eqv_uncast(dest)
5062
&& ((alloc = tightly_coupled_allocation(dest, slow_region))
5063
!= NULL)
5064
&& _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
5065
&& alloc->maybe_set_complete(&_gvn)) {
5066
// "You break it, you buy it."
5067
InitializeNode* init = alloc->initialization();
5068
assert(init->is_complete(), "we just did this");
5069
init->set_complete_with_arraycopy();
5070
assert(dest->is_CheckCastPP(), "sanity");
5071
assert(dest->in(0)->in(0) == init, "dest pinned");
5072
adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory
5073
// From this point on, every exit path is responsible for
5074
// initializing any non-copied parts of the object to zero.
5075
// Also, if this flag is set we make sure that arraycopy interacts properly
5076
// with G1, eliding pre-barriers. See CR 6627983.
5077
dest_uninitialized = true;
5078
} else {
5079
// No zeroing elimination here.
5080
alloc = NULL;
5081
//original_dest = dest;
5082
//dest_uninitialized = false;
5083
}
5084
5085
// Results are placed here:
5086
enum { fast_path = 1, // normal void-returning assembly stub
5087
checked_path = 2, // special assembly stub with cleanup
5088
slow_call_path = 3, // something went wrong; call the VM
5089
zero_path = 4, // bypass when length of copy is zero
5090
bcopy_path = 5, // copy primitive array by 64-bit blocks
5091
PATH_LIMIT = 6
5092
};
5093
RegionNode* result_region = new(C) RegionNode(PATH_LIMIT);
5094
PhiNode* result_i_o = new(C) PhiNode(result_region, Type::ABIO);
5095
PhiNode* result_memory = new(C) PhiNode(result_region, Type::MEMORY, adr_type);
5096
record_for_igvn(result_region);
5097
_gvn.set_type_bottom(result_i_o);
5098
_gvn.set_type_bottom(result_memory);
5099
assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
5100
5101
// The slow_control path:
5102
Node* slow_control;
5103
Node* slow_i_o = i_o();
5104
Node* slow_mem = memory(adr_type);
5105
debug_only(slow_control = (Node*) badAddress);
5106
5107
// Checked control path:
5108
Node* checked_control = top();
5109
Node* checked_mem = NULL;
5110
Node* checked_i_o = NULL;
5111
Node* checked_value = NULL;
5112
5113
if (basic_elem_type == T_CONFLICT) {
5114
assert(!dest_uninitialized, "");
5115
Node* cv = generate_generic_arraycopy(adr_type,
5116
src, src_offset, dest, dest_offset,
5117
copy_length, dest_uninitialized);
5118
if (cv == NULL) cv = intcon(-1); // failure (no stub available)
5119
checked_control = control();
5120
checked_i_o = i_o();
5121
checked_mem = memory(adr_type);
5122
checked_value = cv;
5123
set_control(top()); // no fast path
5124
}
5125
5126
Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
5127
if (not_pos != NULL) {
5128
PreserveJVMState pjvms(this);
5129
set_control(not_pos);
5130
5131
// (6) length must not be negative.
5132
if (!length_never_negative) {
5133
generate_negative_guard(copy_length, slow_region);
5134
}
5135
5136
// copy_length is 0.
5137
if (!stopped() && dest_uninitialized) {
5138
Node* dest_length = alloc->in(AllocateNode::ALength);
5139
if (copy_length->eqv_uncast(dest_length)
5140
|| _gvn.find_int_con(dest_length, 1) <= 0) {
5141
// There is no zeroing to do. No need for a secondary raw memory barrier.
5142
} else {
5143
// Clear the whole thing since there are no source elements to copy.
5144
generate_clear_array(adr_type, dest, basic_elem_type,
5145
intcon(0), NULL,
5146
alloc->in(AllocateNode::AllocSize));
5147
// Use a secondary InitializeNode as raw memory barrier.
5148
// Currently it is needed only on this path since other
5149
// paths have stub or runtime calls as raw memory barriers.
5150
InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
5151
Compile::AliasIdxRaw,
5152
top())->as_Initialize();
5153
init->set_complete(&_gvn); // (there is no corresponding AllocateNode)
5154
}
5155
}
5156
5157
// Present the results of the fast call.
5158
result_region->init_req(zero_path, control());
5159
result_i_o ->init_req(zero_path, i_o());
5160
result_memory->init_req(zero_path, memory(adr_type));
5161
}
5162
5163
if (!stopped() && dest_uninitialized) {
5164
// We have to initialize the *uncopied* part of the array to zero.
5165
// The copy destination is the slice dest[off..off+len]. The other slices
5166
// are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
5167
Node* dest_size = alloc->in(AllocateNode::AllocSize);
5168
Node* dest_length = alloc->in(AllocateNode::ALength);
5169
Node* dest_tail = _gvn.transform(new(C) AddINode(dest_offset,
5170
copy_length));
5171
5172
// If there is a head section that needs zeroing, do it now.
5173
if (find_int_con(dest_offset, -1) != 0) {
5174
generate_clear_array(adr_type, dest, basic_elem_type,
5175
intcon(0), dest_offset,
5176
NULL);
5177
}
5178
5179
// Next, perform a dynamic check on the tail length.
5180
// It is often zero, and we can win big if we prove this.
5181
// There are two wins: Avoid generating the ClearArray
5182
// with its attendant messy index arithmetic, and upgrade
5183
// the copy to a more hardware-friendly word size of 64 bits.
5184
Node* tail_ctl = NULL;
5185
if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
5186
Node* cmp_lt = _gvn.transform(new(C) CmpINode(dest_tail, dest_length));
5187
Node* bol_lt = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt));
5188
tail_ctl = generate_slow_guard(bol_lt, NULL);
5189
assert(tail_ctl != NULL || !stopped(), "must be an outcome");
5190
}
5191
5192
// At this point, let's assume there is no tail.
5193
if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
5194
// There is no tail. Try an upgrade to a 64-bit copy.
5195
bool didit = false;
5196
{ PreserveJVMState pjvms(this);
5197
didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
5198
src, src_offset, dest, dest_offset,
5199
dest_size, dest_uninitialized);
5200
if (didit) {
5201
// Present the results of the block-copying fast call.
5202
result_region->init_req(bcopy_path, control());
5203
result_i_o ->init_req(bcopy_path, i_o());
5204
result_memory->init_req(bcopy_path, memory(adr_type));
5205
}
5206
}
5207
if (didit)
5208
set_control(top()); // no regular fast path
5209
}
5210
5211
// Clear the tail, if any.
5212
if (tail_ctl != NULL) {
5213
Node* notail_ctl = stopped() ? NULL : control();
5214
set_control(tail_ctl);
5215
if (notail_ctl == NULL) {
5216
generate_clear_array(adr_type, dest, basic_elem_type,
5217
dest_tail, NULL,
5218
dest_size);
5219
} else {
5220
// Make a local merge.
5221
Node* done_ctl = new(C) RegionNode(3);
5222
Node* done_mem = new(C) PhiNode(done_ctl, Type::MEMORY, adr_type);
5223
done_ctl->init_req(1, notail_ctl);
5224
done_mem->init_req(1, memory(adr_type));
5225
generate_clear_array(adr_type, dest, basic_elem_type,
5226
dest_tail, NULL,
5227
dest_size);
5228
done_ctl->init_req(2, control());
5229
done_mem->init_req(2, memory(adr_type));
5230
set_control( _gvn.transform(done_ctl));
5231
set_memory( _gvn.transform(done_mem), adr_type );
5232
}
5233
}
5234
}
5235
5236
BasicType copy_type = basic_elem_type;
5237
assert(basic_elem_type != T_ARRAY, "caller must fix this");
5238
if (!stopped() && copy_type == T_OBJECT) {
5239
// If src and dest have compatible element types, we can copy bits.
5240
// Types S[] and D[] are compatible if D is a supertype of S.
5241
//
5242
// If they are not, we will use checked_oop_disjoint_arraycopy,
5243
// which performs a fast optimistic per-oop check, and backs off
5244
// further to JVM_ArrayCopy on the first per-oop check that fails.
5245
// (Actually, we don't move raw bits only; the GC requires card marks.)
5246
5247
// Get the Klass* for both src and dest
5248
Node* src_klass = load_object_klass(src);
5249
Node* dest_klass = load_object_klass(dest);
5250
5251
// Generate the subtype check.
5252
// This might fold up statically, or then again it might not.
5253
//
5254
// Non-static example: Copying List<String>.elements to a new String[].
5255
// The backing store for a List<String> is always an Object[],
5256
// but its elements are always type String, if the generic types
5257
// are correct at the source level.
5258
//
5259
// Test S[] against D[], not S against D, because (probably)
5260
// the secondary supertype cache is less busy for S[] than S.
5261
// This usually only matters when D is an interface.
5262
Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
5263
// Plug failing path into checked_oop_disjoint_arraycopy
5264
if (not_subtype_ctrl != top()) {
5265
PreserveJVMState pjvms(this);
5266
set_control(not_subtype_ctrl);
5267
// (At this point we can assume disjoint_bases, since types differ.)
5268
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5269
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5270
Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5271
Node* dest_elem_klass = _gvn.transform(n1);
5272
Node* cv = generate_checkcast_arraycopy(adr_type,
5273
dest_elem_klass,
5274
src, src_offset, dest, dest_offset,
5275
ConvI2X(copy_length), dest_uninitialized);
5276
if (cv == NULL) cv = intcon(-1); // failure (no stub available)
5277
checked_control = control();
5278
checked_i_o = i_o();
5279
checked_mem = memory(adr_type);
5280
checked_value = cv;
5281
}
5282
// At this point we know we do not need type checks on oop stores.
5283
5284
// Let's see if we need card marks:
5285
if (alloc != NULL && use_ReduceInitialCardMarks()) {
5286
// If we do not need card marks, copy using the jint or jlong stub.
5287
copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5288
assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5289
"sizes agree");
5290
}
5291
}
5292
5293
if (!stopped()) {
5294
// Generate the fast path, if possible.
5295
PreserveJVMState pjvms(this);
5296
generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5297
src, src_offset, dest, dest_offset,
5298
ConvI2X(copy_length), dest_uninitialized);
5299
5300
// Present the results of the fast call.
5301
result_region->init_req(fast_path, control());
5302
result_i_o ->init_req(fast_path, i_o());
5303
result_memory->init_req(fast_path, memory(adr_type));
5304
}
5305
5306
// Here are all the slow paths up to this point, in one bundle:
5307
slow_control = top();
5308
if (slow_region != NULL)
5309
slow_control = _gvn.transform(slow_region);
5310
DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
5311
5312
set_control(checked_control);
5313
if (!stopped()) {
5314
// Clean up after the checked call.
5315
// The returned value is either 0 or -1^K,
5316
// where K = number of partially transferred array elements.
5317
Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0)));
5318
Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
5319
IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
5320
5321
// If it is 0, we are done, so transfer to the end.
5322
Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff));
5323
result_region->init_req(checked_path, checks_done);
5324
result_i_o ->init_req(checked_path, checked_i_o);
5325
result_memory->init_req(checked_path, checked_mem);
5326
5327
// If it is not zero, merge into the slow call.
5328
set_control( _gvn.transform(new(C) IfFalseNode(iff) ));
5329
RegionNode* slow_reg2 = new(C) RegionNode(3);
5330
PhiNode* slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO);
5331
PhiNode* slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type);
5332
record_for_igvn(slow_reg2);
5333
slow_reg2 ->init_req(1, slow_control);
5334
slow_i_o2 ->init_req(1, slow_i_o);
5335
slow_mem2 ->init_req(1, slow_mem);
5336
slow_reg2 ->init_req(2, control());
5337
slow_i_o2 ->init_req(2, checked_i_o);
5338
slow_mem2 ->init_req(2, checked_mem);
5339
5340
slow_control = _gvn.transform(slow_reg2);
5341
slow_i_o = _gvn.transform(slow_i_o2);
5342
slow_mem = _gvn.transform(slow_mem2);
5343
5344
if (alloc != NULL) {
5345
// We'll restart from the very beginning, after zeroing the whole thing.
5346
// This can cause double writes, but that's OK since dest is brand new.
5347
// So we ignore the low 31 bits of the value returned from the stub.
5348
} else {
5349
// We must continue the copy exactly where it failed, or else
5350
// another thread might see the wrong number of writes to dest.
5351
Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1)));
5352
Node* slow_offset = new(C) PhiNode(slow_reg2, TypeInt::INT);
5353
slow_offset->init_req(1, intcon(0));
5354
slow_offset->init_req(2, checked_offset);
5355
slow_offset = _gvn.transform(slow_offset);
5356
5357
// Adjust the arguments by the conditionally incoming offset.
5358
Node* src_off_plus = _gvn.transform(new(C) AddINode(src_offset, slow_offset));
5359
Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset));
5360
Node* length_minus = _gvn.transform(new(C) SubINode(copy_length, slow_offset));
5361
5362
// Tweak the node variables to adjust the code produced below:
5363
src_offset = src_off_plus;
5364
dest_offset = dest_off_plus;
5365
copy_length = length_minus;
5366
}
5367
}
5368
5369
set_control(slow_control);
5370
if (!stopped()) {
5371
// Generate the slow path, if needed.
5372
PreserveJVMState pjvms(this); // replace_in_map may trash the map
5373
5374
set_memory(slow_mem, adr_type);
5375
set_i_o(slow_i_o);
5376
5377
if (dest_uninitialized) {
5378
generate_clear_array(adr_type, dest, basic_elem_type,
5379
intcon(0), NULL,
5380
alloc->in(AllocateNode::AllocSize));
5381
}
5382
5383
generate_slow_arraycopy(adr_type,
5384
src, src_offset, dest, dest_offset,
5385
copy_length, /*dest_uninitialized*/false);
5386
5387
result_region->init_req(slow_call_path, control());
5388
result_i_o ->init_req(slow_call_path, i_o());
5389
result_memory->init_req(slow_call_path, memory(adr_type));
5390
}
5391
5392
// Remove unused edges.
5393
for (uint i = 1; i < result_region->req(); i++) {
5394
if (result_region->in(i) == NULL)
5395
result_region->init_req(i, top());
5396
}
5397
5398
// Finished; return the combined state.
5399
set_control( _gvn.transform(result_region));
5400
set_i_o( _gvn.transform(result_i_o) );
5401
set_memory( _gvn.transform(result_memory), adr_type );
5402
5403
// The memory edges above are precise in order to model effects around
5404
// array copies accurately to allow value numbering of field loads around
5405
// arraycopy. Such field loads, both before and after, are common in Java
5406
// collections and similar classes involving header/array data structures.
5407
//
5408
// But with low number of register or when some registers are used or killed
5409
// by arraycopy calls it causes registers spilling on stack. See 6544710.
5410
// The next memory barrier is added to avoid it. If the arraycopy can be
5411
// optimized away (which it can, sometimes) then we can manually remove
5412
// the membar also.
5413
//
5414
// Do not let reads from the cloned object float above the arraycopy.
5415
if (alloc != NULL) {
5416
// Do not let stores that initialize this object be reordered with
5417
// a subsequent store that would make this object accessible by
5418
// other threads.
5419
// Record what AllocateNode this StoreStore protects so that
5420
// escape analysis can go from the MemBarStoreStoreNode to the
5421
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
5422
// based on the escape status of the AllocateNode.
5423
insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
5424
} else if (InsertMemBarAfterArraycopy)
5425
insert_mem_bar(Op_MemBarCPUOrder);
5426
}
5427
5428
5429
// Helper function which determines if an arraycopy immediately follows
5430
// an allocation, with no intervening tests or other escapes for the object.
5431
AllocateArrayNode*
5432
LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5433
RegionNode* slow_region) {
5434
if (stopped()) return NULL; // no fast path
5435
if (C->AliasLevel() == 0) return NULL; // no MergeMems around
5436
5437
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5438
if (alloc == NULL) return NULL;
5439
5440
Node* rawmem = memory(Compile::AliasIdxRaw);
5441
// Is the allocation's memory state untouched?
5442
if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5443
// Bail out if there have been raw-memory effects since the allocation.
5444
// (Example: There might have been a call or safepoint.)
5445
return NULL;
5446
}
5447
rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5448
if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5449
return NULL;
5450
}
5451
5452
// There must be no unexpected observers of this allocation.
5453
for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5454
Node* obs = ptr->fast_out(i);
5455
if (obs != this->map()) {
5456
return NULL;
5457
}
5458
}
5459
5460
// This arraycopy must unconditionally follow the allocation of the ptr.
5461
Node* alloc_ctl = ptr->in(0);
5462
assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
5463
5464
Node* ctl = control();
5465
while (ctl != alloc_ctl) {
5466
// There may be guards which feed into the slow_region.
5467
// Any other control flow means that we might not get a chance
5468
// to finish initializing the allocated object.
5469
if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
5470
IfNode* iff = ctl->in(0)->as_If();
5471
Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
5472
assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
5473
if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
5474
ctl = iff->in(0); // This test feeds the known slow_region.
5475
continue;
5476
}
5477
// One more try: Various low-level checks bottom out in
5478
// uncommon traps. If the debug-info of the trap omits
5479
// any reference to the allocation, as we've already
5480
// observed, then there can be no objection to the trap.
5481
bool found_trap = false;
5482
for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
5483
Node* obs = not_ctl->fast_out(j);
5484
if (obs->in(0) == not_ctl && obs->is_Call() &&
5485
(obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
5486
found_trap = true; break;
5487
}
5488
}
5489
if (found_trap) {
5490
ctl = iff->in(0); // This test feeds a harmless uncommon trap.
5491
continue;
5492
}
5493
}
5494
return NULL;
5495
}
5496
5497
// If we get this far, we have an allocation which immediately
5498
// precedes the arraycopy, and we can take over zeroing the new object.
5499
// The arraycopy will finish the initialization, and provide
5500
// a new control state to which we will anchor the destination pointer.
5501
5502
return alloc;
5503
}
5504
5505
// Helper for initialization of arrays, creating a ClearArray.
5506
// It writes zero bits in [start..end), within the body of an array object.
5507
// The memory effects are all chained onto the 'adr_type' alias category.
5508
//
5509
// Since the object is otherwise uninitialized, we are free
5510
// to put a little "slop" around the edges of the cleared area,
5511
// as long as it does not go back into the array's header,
5512
// or beyond the array end within the heap.
5513
//
5514
// The lower edge can be rounded down to the nearest jint and the
5515
// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
5516
//
5517
// Arguments:
5518
// adr_type memory slice where writes are generated
5519
// dest oop of the destination array
5520
// basic_elem_type element type of the destination
5521
// slice_idx array index of first element to store
5522
// slice_len number of elements to store (or NULL)
5523
// dest_size total size in bytes of the array object
5524
//
5525
// Exactly one of slice_len or dest_size must be non-NULL.
5526
// If dest_size is non-NULL, zeroing extends to the end of the object.
5527
// If slice_len is non-NULL, the slice_idx value must be a constant.
5528
void
5529
LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
5530
Node* dest,
5531
BasicType basic_elem_type,
5532
Node* slice_idx,
5533
Node* slice_len,
5534
Node* dest_size) {
5535
// one or the other but not both of slice_len and dest_size:
5536
assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
5537
if (slice_len == NULL) slice_len = top();
5538
if (dest_size == NULL) dest_size = top();
5539
5540
// operate on this memory slice:
5541
Node* mem = memory(adr_type); // memory slice to operate on
5542
5543
// scaling and rounding of indexes:
5544
int scale = exact_log2(type2aelembytes(basic_elem_type));
5545
int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5546
int clear_low = (-1 << scale) & (BytesPerInt - 1);
5547
int bump_bit = (-1 << scale) & BytesPerInt;
5548
5549
// determine constant starts and ends
5550
const intptr_t BIG_NEG = -128;
5551
assert(BIG_NEG + 2*abase < 0, "neg enough");
5552
intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
5553
intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
5554
if (slice_len_con == 0) {
5555
return; // nothing to do here
5556
}
5557
intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
5558
intptr_t end_con = find_intptr_t_con(dest_size, -1);
5559
if (slice_idx_con >= 0 && slice_len_con >= 0) {
5560
assert(end_con < 0, "not two cons");
5561
end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
5562
BytesPerLong);
5563
}
5564
5565
if (start_con >= 0 && end_con >= 0) {
5566
// Constant start and end. Simple.
5567
mem = ClearArrayNode::clear_memory(control(), mem, dest,
5568
start_con, end_con, &_gvn);
5569
} else if (start_con >= 0 && dest_size != top()) {
5570
// Constant start, pre-rounded end after the tail of the array.
5571
Node* end = dest_size;
5572
mem = ClearArrayNode::clear_memory(control(), mem, dest,
5573
start_con, end, &_gvn);
5574
} else if (start_con >= 0 && slice_len != top()) {
5575
// Constant start, non-constant end. End needs rounding up.
5576
// End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
5577
intptr_t end_base = abase + (slice_idx_con << scale);
5578
int end_round = (-1 << scale) & (BytesPerLong - 1);
5579
Node* end = ConvI2X(slice_len);
5580
if (scale != 0)
5581
end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) ));
5582
end_base += end_round;
5583
end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base)));
5584
end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round)));
5585
mem = ClearArrayNode::clear_memory(control(), mem, dest,
5586
start_con, end, &_gvn);
5587
} else if (start_con < 0 && dest_size != top()) {
5588
// Non-constant start, pre-rounded end after the tail of the array.
5589
// This is almost certainly a "round-to-end" operation.
5590
Node* start = slice_idx;
5591
start = ConvI2X(start);
5592
if (scale != 0)
5593
start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) ));
5594
start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase)));
5595
if ((bump_bit | clear_low) != 0) {
5596
int to_clear = (bump_bit | clear_low);
5597
// Align up mod 8, then store a jint zero unconditionally
5598
// just before the mod-8 boundary.
5599
if (((abase + bump_bit) & ~to_clear) - bump_bit
5600
< arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
5601
bump_bit = 0;
5602
assert((abase & to_clear) == 0, "array base must be long-aligned");
5603
} else {
5604
// Bump 'start' up to (or past) the next jint boundary:
5605
start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit)));
5606
assert((abase & clear_low) == 0, "array base must be int-aligned");
5607
}
5608
// Round bumped 'start' down to jlong boundary in body of array.
5609
start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
5610
if (bump_bit != 0) {
5611
// Store a zero to the immediately preceding jint:
5612
Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
5613
Node* p1 = basic_plus_adr(dest, x1);
5614
mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
5615
mem = _gvn.transform(mem);
5616
}
5617
}
5618
Node* end = dest_size; // pre-rounded
5619
mem = ClearArrayNode::clear_memory(control(), mem, dest,
5620
start, end, &_gvn);
5621
} else {
5622
// Non-constant start, unrounded non-constant end.
5623
// (Nobody zeroes a random midsection of an array using this routine.)
5624
ShouldNotReachHere(); // fix caller
5625
}
5626
5627
// Done.
5628
set_memory(mem, adr_type);
5629
}
5630
5631
5632
bool
5633
LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
5634
BasicType basic_elem_type,
5635
AllocateNode* alloc,
5636
Node* src, Node* src_offset,
5637
Node* dest, Node* dest_offset,
5638
Node* dest_size, bool dest_uninitialized) {
5639
// See if there is an advantage from block transfer.
5640
int scale = exact_log2(type2aelembytes(basic_elem_type));
5641
if (scale >= LogBytesPerLong)
5642
return false; // it is already a block transfer
5643
5644
// Look at the alignment of the starting offsets.
5645
int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5646
5647
intptr_t src_off_con = (intptr_t) find_int_con(src_offset, -1);
5648
intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
5649
if (src_off_con < 0 || dest_off_con < 0)
5650
// At present, we can only understand constants.
5651
return false;
5652
5653
intptr_t src_off = abase + (src_off_con << scale);
5654
intptr_t dest_off = abase + (dest_off_con << scale);
5655
5656
if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
5657
// Non-aligned; too bad.
5658
// One more chance: Pick off an initial 32-bit word.
5659
// This is a common case, since abase can be odd mod 8.
5660
if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5661
((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5662
Node* sptr = basic_plus_adr(src, src_off);
5663
Node* dptr = basic_plus_adr(dest, dest_off);
5664
Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
5665
store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
5666
src_off += BytesPerInt;
5667
dest_off += BytesPerInt;
5668
} else {
5669
return false;
5670
}
5671
}
5672
assert(src_off % BytesPerLong == 0, "");
5673
assert(dest_off % BytesPerLong == 0, "");
5674
5675
// Do this copy by giant steps.
5676
Node* sptr = basic_plus_adr(src, src_off);
5677
Node* dptr = basic_plus_adr(dest, dest_off);
5678
Node* countx = dest_size;
5679
countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off)));
5680
countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong)));
5681
5682
bool disjoint_bases = true; // since alloc != NULL
5683
generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5684
sptr, NULL, dptr, NULL, countx, dest_uninitialized);
5685
5686
return true;
5687
}
5688
5689
5690
// Helper function; generates code for the slow case.
5691
// We make a call to a runtime method which emulates the native method,
5692
// but without the native wrapper overhead.
5693
void
5694
LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
5695
Node* src, Node* src_offset,
5696
Node* dest, Node* dest_offset,
5697
Node* copy_length, bool dest_uninitialized) {
5698
assert(!dest_uninitialized, "Invariant");
5699
Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
5700
OptoRuntime::slow_arraycopy_Type(),
5701
OptoRuntime::slow_arraycopy_Java(),
5702
"slow_arraycopy", adr_type,
5703
src, src_offset, dest, dest_offset,
5704
copy_length);
5705
5706
// Handle exceptions thrown by this fellow:
5707
make_slow_call_ex(call, env()->Throwable_klass(), false);
5708
}
5709
5710
// Helper function; generates code for cases requiring runtime checks.
5711
Node*
5712
LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
5713
Node* dest_elem_klass,
5714
Node* src, Node* src_offset,
5715
Node* dest, Node* dest_offset,
5716
Node* copy_length, bool dest_uninitialized) {
5717
if (stopped()) return NULL;
5718
5719
address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
5720
if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5721
return NULL;
5722
}
5723
5724
// Pick out the parameters required to perform a store-check
5725
// for the target array. This is an optimistic check. It will
5726
// look in each non-null element's class, at the desired klass's
5727
// super_check_offset, for the desired klass.
5728
int sco_offset = in_bytes(Klass::super_check_offset_offset());
5729
Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5730
Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
5731
Node* check_offset = ConvI2X(_gvn.transform(n3));
5732
Node* check_value = dest_elem_klass;
5733
5734
Node* src_start = array_element_address(src, src_offset, T_OBJECT);
5735
Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5736
5737
// (We know the arrays are never conjoint, because their types differ.)
5738
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5739
OptoRuntime::checkcast_arraycopy_Type(),
5740
copyfunc_addr, "checkcast_arraycopy", adr_type,
5741
// five arguments, of which two are
5742
// intptr_t (jlong in LP64)
5743
src_start, dest_start,
5744
copy_length XTOP,
5745
check_offset XTOP,
5746
check_value);
5747
5748
return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5749
}
5750
5751
5752
// Helper function; generates code for cases requiring runtime checks.
5753
Node*
5754
LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
5755
Node* src, Node* src_offset,
5756
Node* dest, Node* dest_offset,
5757
Node* copy_length, bool dest_uninitialized) {
5758
assert(!dest_uninitialized, "Invariant");
5759
if (stopped()) return NULL;
5760
address copyfunc_addr = StubRoutines::generic_arraycopy();
5761
if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5762
return NULL;
5763
}
5764
5765
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5766
OptoRuntime::generic_arraycopy_Type(),
5767
copyfunc_addr, "generic_arraycopy", adr_type,
5768
src, src_offset, dest, dest_offset, copy_length);
5769
5770
return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5771
}
5772
5773
// Helper function; generates the fast out-of-line call to an arraycopy stub.
5774
void
5775
LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
5776
BasicType basic_elem_type,
5777
bool disjoint_bases,
5778
Node* src, Node* src_offset,
5779
Node* dest, Node* dest_offset,
5780
Node* copy_length, bool dest_uninitialized) {
5781
if (stopped()) return; // nothing to do
5782
5783
Node* src_start = src;
5784
Node* dest_start = dest;
5785
if (src_offset != NULL || dest_offset != NULL) {
5786
assert(src_offset != NULL && dest_offset != NULL, "");
5787
src_start = array_element_address(src, src_offset, basic_elem_type);
5788
dest_start = array_element_address(dest, dest_offset, basic_elem_type);
5789
}
5790
5791
// Figure out which arraycopy runtime method to call.
5792
const char* copyfunc_name = "arraycopy";
5793
address copyfunc_addr =
5794
basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
5795
disjoint_bases, copyfunc_name, dest_uninitialized);
5796
5797
// Call it. Note that the count_ix value is not scaled to a byte-size.
5798
make_runtime_call(RC_LEAF|RC_NO_FP,
5799
OptoRuntime::fast_arraycopy_Type(),
5800
copyfunc_addr, copyfunc_name, adr_type,
5801
src_start, dest_start, copy_length XTOP);
5802
}
5803
5804
//-------------inline_encodeISOArray-----------------------------------
5805
// encode char[] to byte[] in ISO_8859_1
5806
bool LibraryCallKit::inline_encodeISOArray() {
5807
assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5808
// no receiver since it is static method
5809
Node *src = argument(0);
5810
Node *src_offset = argument(1);
5811
Node *dst = argument(2);
5812
Node *dst_offset = argument(3);
5813
Node *length = argument(4);
5814
5815
const Type* src_type = src->Value(&_gvn);
5816
const Type* dst_type = dst->Value(&_gvn);
5817
const TypeAryPtr* top_src = src_type->isa_aryptr();
5818
const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5819
if (top_src == NULL || top_src->klass() == NULL ||
5820
top_dest == NULL || top_dest->klass() == NULL) {
5821
// failed array check
5822
return false;
5823
}
5824
5825
// Figure out the size and type of the elements we will be copying.
5826
BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5827
BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5828
if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5829
return false;
5830
}
5831
Node* src_start = array_element_address(src, src_offset, src_elem);
5832
Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5833
// 'src_start' points to src array + scaled offset
5834
// 'dst_start' points to dst array + scaled offset
5835
5836
const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5837
Node* enc = new (C) EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5838
enc = _gvn.transform(enc);
5839
Node* res_mem = _gvn.transform(new (C) SCMemProjNode(enc));
5840
set_memory(res_mem, mtype);
5841
set_result(enc);
5842
return true;
5843
}
5844
5845
//-------------inline_multiplyToLen-----------------------------------
5846
bool LibraryCallKit::inline_multiplyToLen() {
5847
assert(UseMultiplyToLenIntrinsic, "not implementated on this platform");
5848
5849
address stubAddr = StubRoutines::multiplyToLen();
5850
if (stubAddr == NULL) {
5851
return false; // Intrinsic's stub is not implemented on this platform
5852
}
5853
const char* stubName = "multiplyToLen";
5854
5855
assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5856
5857
// no receiver because it is a static method
5858
Node* x = argument(0);
5859
Node* xlen = argument(1);
5860
Node* y = argument(2);
5861
Node* ylen = argument(3);
5862
Node* z = argument(4);
5863
5864
const Type* x_type = x->Value(&_gvn);
5865
const Type* y_type = y->Value(&_gvn);
5866
const TypeAryPtr* top_x = x_type->isa_aryptr();
5867
const TypeAryPtr* top_y = y_type->isa_aryptr();
5868
if (top_x == NULL || top_x->klass() == NULL ||
5869
top_y == NULL || top_y->klass() == NULL) {
5870
// failed array check
5871
return false;
5872
}
5873
5874
BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5875
BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5876
if (x_elem != T_INT || y_elem != T_INT) {
5877
return false;
5878
}
5879
5880
// Set the original stack and the reexecute bit for the interpreter to reexecute
5881
// the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5882
// on the return from z array allocation in runtime.
5883
{ PreserveReexecuteState preexecs(this);
5884
jvms()->set_should_reexecute(true);
5885
5886
Node* x_start = array_element_address(x, intcon(0), x_elem);
5887
Node* y_start = array_element_address(y, intcon(0), y_elem);
5888
// 'x_start' points to x array + scaled xlen
5889
// 'y_start' points to y array + scaled ylen
5890
5891
// Allocate the result array
5892
Node* zlen = _gvn.transform(new(C) AddINode(xlen, ylen));
5893
ciKlass* klass = ciTypeArrayKlass::make(T_INT);
5894
Node* klass_node = makecon(TypeKlassPtr::make(klass));
5895
5896
IdealKit ideal(this);
5897
5898
#define __ ideal.
5899
Node* one = __ ConI(1);
5900
Node* zero = __ ConI(0);
5901
IdealVariable need_alloc(ideal), z_alloc(ideal); __ declarations_done();
5902
__ set(need_alloc, zero);
5903
__ set(z_alloc, z);
5904
__ if_then(z, BoolTest::eq, null()); {
5905
__ increment (need_alloc, one);
5906
} __ else_(); {
5907
// Update graphKit memory and control from IdealKit.
5908
sync_kit(ideal);
5909
Node* zlen_arg = load_array_length(z);
5910
// Update IdealKit memory and control from graphKit.
5911
__ sync_kit(this);
5912
__ if_then(zlen_arg, BoolTest::lt, zlen); {
5913
__ increment (need_alloc, one);
5914
} __ end_if();
5915
} __ end_if();
5916
5917
__ if_then(__ value(need_alloc), BoolTest::ne, zero); {
5918
// Update graphKit memory and control from IdealKit.
5919
sync_kit(ideal);
5920
Node * narr = new_array(klass_node, zlen, 1);
5921
// Update IdealKit memory and control from graphKit.
5922
__ sync_kit(this);
5923
__ set(z_alloc, narr);
5924
} __ end_if();
5925
5926
sync_kit(ideal);
5927
z = __ value(z_alloc);
5928
// Can't use TypeAryPtr::INTS which uses Bottom offset.
5929
_gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5930
// Final sync IdealKit and GraphKit.
5931
final_sync(ideal);
5932
#undef __
5933
5934
Node* z_start = array_element_address(z, intcon(0), T_INT);
5935
5936
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5937
OptoRuntime::multiplyToLen_Type(),
5938
stubAddr, stubName, TypePtr::BOTTOM,
5939
x_start, xlen, y_start, ylen, z_start, zlen);
5940
} // original reexecute is set back here
5941
5942
C->set_has_split_ifs(true); // Has chance for split-if optimization
5943
set_result(z);
5944
return true;
5945
}
5946
5947
//-------------inline_squareToLen------------------------------------
5948
bool LibraryCallKit::inline_squareToLen() {
5949
assert(UseSquareToLenIntrinsic, "not implementated on this platform");
5950
5951
address stubAddr = StubRoutines::squareToLen();
5952
if (stubAddr == NULL) {
5953
return false; // Intrinsic's stub is not implemented on this platform
5954
}
5955
const char* stubName = "squareToLen";
5956
5957
assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5958
5959
Node* x = argument(0);
5960
Node* len = argument(1);
5961
Node* z = argument(2);
5962
Node* zlen = argument(3);
5963
5964
const Type* x_type = x->Value(&_gvn);
5965
const Type* z_type = z->Value(&_gvn);
5966
const TypeAryPtr* top_x = x_type->isa_aryptr();
5967
const TypeAryPtr* top_z = z_type->isa_aryptr();
5968
if (top_x == NULL || top_x->klass() == NULL ||
5969
top_z == NULL || top_z->klass() == NULL) {
5970
// failed array check
5971
return false;
5972
}
5973
5974
BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5975
BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5976
if (x_elem != T_INT || z_elem != T_INT) {
5977
return false;
5978
}
5979
5980
5981
Node* x_start = array_element_address(x, intcon(0), x_elem);
5982
Node* z_start = array_element_address(z, intcon(0), z_elem);
5983
5984
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5985
OptoRuntime::squareToLen_Type(),
5986
stubAddr, stubName, TypePtr::BOTTOM,
5987
x_start, len, z_start, zlen);
5988
5989
set_result(z);
5990
return true;
5991
}
5992
5993
//-------------inline_mulAdd------------------------------------------
5994
bool LibraryCallKit::inline_mulAdd() {
5995
assert(UseMulAddIntrinsic, "not implementated on this platform");
5996
5997
address stubAddr = StubRoutines::mulAdd();
5998
if (stubAddr == NULL) {
5999
return false; // Intrinsic's stub is not implemented on this platform
6000
}
6001
const char* stubName = "mulAdd";
6002
6003
assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
6004
6005
Node* out = argument(0);
6006
Node* in = argument(1);
6007
Node* offset = argument(2);
6008
Node* len = argument(3);
6009
Node* k = argument(4);
6010
6011
const Type* out_type = out->Value(&_gvn);
6012
const Type* in_type = in->Value(&_gvn);
6013
const TypeAryPtr* top_out = out_type->isa_aryptr();
6014
const TypeAryPtr* top_in = in_type->isa_aryptr();
6015
if (top_out == NULL || top_out->klass() == NULL ||
6016
top_in == NULL || top_in->klass() == NULL) {
6017
// failed array check
6018
return false;
6019
}
6020
6021
BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6022
BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6023
if (out_elem != T_INT || in_elem != T_INT) {
6024
return false;
6025
}
6026
6027
Node* outlen = load_array_length(out);
6028
Node* new_offset = _gvn.transform(new (C) SubINode(outlen, offset));
6029
Node* out_start = array_element_address(out, intcon(0), out_elem);
6030
Node* in_start = array_element_address(in, intcon(0), in_elem);
6031
6032
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6033
OptoRuntime::mulAdd_Type(),
6034
stubAddr, stubName, TypePtr::BOTTOM,
6035
out_start,in_start, new_offset, len, k);
6036
Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6037
set_result(result);
6038
return true;
6039
}
6040
6041
//-------------inline_montgomeryMultiply-----------------------------------
6042
bool LibraryCallKit::inline_montgomeryMultiply() {
6043
address stubAddr = StubRoutines::montgomeryMultiply();
6044
if (stubAddr == NULL) {
6045
return false; // Intrinsic's stub is not implemented on this platform
6046
}
6047
6048
assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
6049
const char* stubName = "montgomery_multiply";
6050
6051
assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
6052
6053
Node* a = argument(0);
6054
Node* b = argument(1);
6055
Node* n = argument(2);
6056
Node* len = argument(3);
6057
Node* inv = argument(4);
6058
Node* m = argument(6);
6059
6060
const Type* a_type = a->Value(&_gvn);
6061
const TypeAryPtr* top_a = a_type->isa_aryptr();
6062
const Type* b_type = b->Value(&_gvn);
6063
const TypeAryPtr* top_b = b_type->isa_aryptr();
6064
const Type* n_type = a->Value(&_gvn);
6065
const TypeAryPtr* top_n = n_type->isa_aryptr();
6066
const Type* m_type = a->Value(&_gvn);
6067
const TypeAryPtr* top_m = m_type->isa_aryptr();
6068
if (top_a == NULL || top_a->klass() == NULL ||
6069
top_b == NULL || top_b->klass() == NULL ||
6070
top_n == NULL || top_n->klass() == NULL ||
6071
top_m == NULL || top_m->klass() == NULL) {
6072
// failed array check
6073
return false;
6074
}
6075
6076
BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6077
BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6078
BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6079
BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6080
if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6081
return false;
6082
}
6083
6084
// Make the call
6085
{
6086
Node* a_start = array_element_address(a, intcon(0), a_elem);
6087
Node* b_start = array_element_address(b, intcon(0), b_elem);
6088
Node* n_start = array_element_address(n, intcon(0), n_elem);
6089
Node* m_start = array_element_address(m, intcon(0), m_elem);
6090
6091
Node* call = NULL;
6092
if (CCallingConventionRequiresIntsAsLongs) {
6093
Node* len_I2L = ConvI2L(len);
6094
call = make_runtime_call(RC_LEAF,
6095
OptoRuntime::montgomeryMultiply_Type(),
6096
stubAddr, stubName, TypePtr::BOTTOM,
6097
a_start, b_start, n_start, len_I2L XTOP, inv,
6098
top(), m_start);
6099
} else {
6100
call = make_runtime_call(RC_LEAF,
6101
OptoRuntime::montgomeryMultiply_Type(),
6102
stubAddr, stubName, TypePtr::BOTTOM,
6103
a_start, b_start, n_start, len, inv, top(),
6104
m_start);
6105
}
6106
set_result(m);
6107
}
6108
6109
return true;
6110
}
6111
6112
bool LibraryCallKit::inline_montgomerySquare() {
6113
address stubAddr = StubRoutines::montgomerySquare();
6114
if (stubAddr == NULL) {
6115
return false; // Intrinsic's stub is not implemented on this platform
6116
}
6117
6118
assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
6119
const char* stubName = "montgomery_square";
6120
6121
assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
6122
6123
Node* a = argument(0);
6124
Node* n = argument(1);
6125
Node* len = argument(2);
6126
Node* inv = argument(3);
6127
Node* m = argument(5);
6128
6129
const Type* a_type = a->Value(&_gvn);
6130
const TypeAryPtr* top_a = a_type->isa_aryptr();
6131
const Type* n_type = a->Value(&_gvn);
6132
const TypeAryPtr* top_n = n_type->isa_aryptr();
6133
const Type* m_type = a->Value(&_gvn);
6134
const TypeAryPtr* top_m = m_type->isa_aryptr();
6135
if (top_a == NULL || top_a->klass() == NULL ||
6136
top_n == NULL || top_n->klass() == NULL ||
6137
top_m == NULL || top_m->klass() == NULL) {
6138
// failed array check
6139
return false;
6140
}
6141
6142
BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6143
BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6144
BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6145
if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
6146
return false;
6147
}
6148
6149
// Make the call
6150
{
6151
Node* a_start = array_element_address(a, intcon(0), a_elem);
6152
Node* n_start = array_element_address(n, intcon(0), n_elem);
6153
Node* m_start = array_element_address(m, intcon(0), m_elem);
6154
6155
Node* call = NULL;
6156
if (CCallingConventionRequiresIntsAsLongs) {
6157
Node* len_I2L = ConvI2L(len);
6158
call = make_runtime_call(RC_LEAF,
6159
OptoRuntime::montgomerySquare_Type(),
6160
stubAddr, stubName, TypePtr::BOTTOM,
6161
a_start, n_start, len_I2L XTOP, inv, top(),
6162
m_start);
6163
} else {
6164
call = make_runtime_call(RC_LEAF,
6165
OptoRuntime::montgomerySquare_Type(),
6166
stubAddr, stubName, TypePtr::BOTTOM,
6167
a_start, n_start, len, inv, top(),
6168
m_start);
6169
}
6170
6171
set_result(m);
6172
}
6173
6174
return true;
6175
}
6176
6177
6178
/**
6179
* Calculate CRC32 for byte.
6180
* int java.util.zip.CRC32.update(int crc, int b)
6181
*/
6182
bool LibraryCallKit::inline_updateCRC32() {
6183
assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
6184
assert(callee()->signature()->size() == 2, "update has 2 parameters");
6185
// no receiver since it is static method
6186
Node* crc = argument(0); // type: int
6187
Node* b = argument(1); // type: int
6188
6189
/*
6190
* int c = ~ crc;
6191
* b = timesXtoThe32[(b ^ c) & 0xFF];
6192
* b = b ^ (c >>> 8);
6193
* crc = ~b;
6194
*/
6195
6196
Node* M1 = intcon(-1);
6197
crc = _gvn.transform(new (C) XorINode(crc, M1));
6198
Node* result = _gvn.transform(new (C) XorINode(crc, b));
6199
result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));
6200
6201
Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
6202
Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
6203
Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
6204
result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
6205
6206
crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
6207
result = _gvn.transform(new (C) XorINode(crc, result));
6208
result = _gvn.transform(new (C) XorINode(result, M1));
6209
set_result(result);
6210
return true;
6211
}
6212
6213
/**
6214
* Calculate CRC32 for byte[] array.
6215
* int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
6216
*/
6217
bool LibraryCallKit::inline_updateBytesCRC32() {
6218
assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
6219
assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
6220
// no receiver since it is static method
6221
Node* crc = argument(0); // type: int
6222
Node* src = argument(1); // type: oop
6223
Node* offset = argument(2); // type: int
6224
Node* length = argument(3); // type: int
6225
6226
const Type* src_type = src->Value(&_gvn);
6227
const TypeAryPtr* top_src = src_type->isa_aryptr();
6228
if (top_src == NULL || top_src->klass() == NULL) {
6229
// failed array check
6230
return false;
6231
}
6232
6233
// Figure out the size and type of the elements we will be copying.
6234
BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6235
if (src_elem != T_BYTE) {
6236
return false;
6237
}
6238
6239
// 'src_start' points to src array + scaled offset
6240
Node* src_start = array_element_address(src, offset, src_elem);
6241
6242
// We assume that range check is done by caller.
6243
// TODO: generate range check (offset+length < src.length) in debug VM.
6244
6245
// Call the stub.
6246
address stubAddr = StubRoutines::updateBytesCRC32();
6247
const char *stubName = "updateBytesCRC32";
6248
Node* call;
6249
if (CCallingConventionRequiresIntsAsLongs) {
6250
call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6251
stubAddr, stubName, TypePtr::BOTTOM,
6252
crc XTOP, src_start, length XTOP);
6253
} else {
6254
call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6255
stubAddr, stubName, TypePtr::BOTTOM,
6256
crc, src_start, length);
6257
}
6258
Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6259
set_result(result);
6260
return true;
6261
}
6262
6263
/**
6264
* Calculate CRC32 for ByteBuffer.
6265
* int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
6266
*/
6267
bool LibraryCallKit::inline_updateByteBufferCRC32() {
6268
assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
6269
assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
6270
// no receiver since it is static method
6271
Node* crc = argument(0); // type: int
6272
Node* src = argument(1); // type: long
6273
Node* offset = argument(3); // type: int
6274
Node* length = argument(4); // type: int
6275
6276
src = ConvL2X(src); // adjust Java long to machine word
6277
Node* base = _gvn.transform(new (C) CastX2PNode(src));
6278
offset = ConvI2X(offset);
6279
6280
// 'src_start' points to src array + scaled offset
6281
Node* src_start = basic_plus_adr(top(), base, offset);
6282
6283
// Call the stub.
6284
address stubAddr = StubRoutines::updateBytesCRC32();
6285
const char *stubName = "updateBytesCRC32";
6286
Node* call;
6287
if (CCallingConventionRequiresIntsAsLongs) {
6288
call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6289
stubAddr, stubName, TypePtr::BOTTOM,
6290
crc XTOP, src_start, length XTOP);
6291
} else {
6292
call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
6293
stubAddr, stubName, TypePtr::BOTTOM,
6294
crc, src_start, length);
6295
}
6296
Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6297
set_result(result);
6298
return true;
6299
}
6300
6301
//----------------------------inline_reference_get----------------------------
6302
// public T java.lang.ref.Reference.get();
6303
bool LibraryCallKit::inline_reference_get() {
6304
const int referent_offset = java_lang_ref_Reference::referent_offset;
6305
guarantee(referent_offset > 0, "should have already been set");
6306
6307
// Get the argument:
6308
Node* reference_obj = null_check_receiver();
6309
if (stopped()) return true;
6310
6311
Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6312
6313
ciInstanceKlass* klass = env()->Object_klass();
6314
const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6315
6316
Node* no_ctrl = NULL;
6317
Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
6318
6319
// Use the pre-barrier to record the value in the referent field
6320
pre_barrier(false /* do_load */,
6321
control(),
6322
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
6323
result /* pre_val */,
6324
T_OBJECT);
6325
6326
// Add memory barrier to prevent commoning reads from this field
6327
// across safepoint since GC can change its value.
6328
insert_mem_bar(Op_MemBarCPUOrder);
6329
6330
set_result(result);
6331
return true;
6332
}
6333
6334
6335
Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6336
bool is_exact=true, bool is_static=false) {
6337
6338
const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6339
assert(tinst != NULL, "obj is null");
6340
assert(tinst->klass()->is_loaded(), "obj is not loaded");
6341
assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6342
6343
ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
6344
ciSymbol::make(fieldTypeString),
6345
is_static);
6346
if (field == NULL) return (Node *) NULL;
6347
assert (field != NULL, "undefined field");
6348
6349
// Next code copied from Parse::do_get_xxx():
6350
6351
// Compute address and memory type.
6352
int offset = field->offset_in_bytes();
6353
bool is_vol = field->is_volatile();
6354
ciType* field_klass = field->type();
6355
assert(field_klass->is_loaded(), "should be loaded");
6356
const TypePtr* adr_type = C->alias_type(field)->adr_type();
6357
Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6358
BasicType bt = field->layout_type();
6359
6360
// Build the resultant type of the load
6361
const Type *type;
6362
if (bt == T_OBJECT) {
6363
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6364
} else {
6365
type = Type::get_const_basic_type(bt);
6366
}
6367
6368
Node* leading_membar = NULL;
6369
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6370
leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
6371
}
6372
// Build the load.
6373
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6374
Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6375
// If reference is volatile, prevent following memory ops from
6376
// floating up past the volatile read. Also prevents commoning
6377
// another volatile read.
6378
if (is_vol) {
6379
// Memory barrier includes bogus read of value to force load BEFORE membar
6380
Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6381
mb->as_MemBar()->set_trailing_load();
6382
}
6383
return loadedField;
6384
}
6385
6386
6387
//------------------------------inline_aescrypt_Block-----------------------
6388
bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6389
address stubAddr = NULL;
6390
const char *stubName;
6391
assert(UseAES, "need AES instruction support");
6392
6393
switch(id) {
6394
case vmIntrinsics::_aescrypt_encryptBlock:
6395
stubAddr = StubRoutines::aescrypt_encryptBlock();
6396
stubName = "aescrypt_encryptBlock";
6397
break;
6398
case vmIntrinsics::_aescrypt_decryptBlock:
6399
stubAddr = StubRoutines::aescrypt_decryptBlock();
6400
stubName = "aescrypt_decryptBlock";
6401
break;
6402
}
6403
if (stubAddr == NULL) return false;
6404
6405
Node* aescrypt_object = argument(0);
6406
Node* src = argument(1);
6407
Node* src_offset = argument(2);
6408
Node* dest = argument(3);
6409
Node* dest_offset = argument(4);
6410
6411
// (1) src and dest are arrays.
6412
const Type* src_type = src->Value(&_gvn);
6413
const Type* dest_type = dest->Value(&_gvn);
6414
const TypeAryPtr* top_src = src_type->isa_aryptr();
6415
const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6416
assert (top_src != NULL && top_src->klass() != NULL && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6417
6418
// for the quick and dirty code we will skip all the checks.
6419
// we are just trying to get the call to be generated.
6420
Node* src_start = src;
6421
Node* dest_start = dest;
6422
if (src_offset != NULL || dest_offset != NULL) {
6423
assert(src_offset != NULL && dest_offset != NULL, "");
6424
src_start = array_element_address(src, src_offset, T_BYTE);
6425
dest_start = array_element_address(dest, dest_offset, T_BYTE);
6426
}
6427
6428
// now need to get the start of its expanded key array
6429
// this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6430
Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6431
if (k_start == NULL) return false;
6432
6433
if (Matcher::pass_original_key_for_aes()) {
6434
// on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6435
// compatibility issues between Java key expansion and SPARC crypto instructions
6436
Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6437
if (original_k_start == NULL) return false;
6438
6439
// Call the stub.
6440
make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
6441
stubAddr, stubName, TypePtr::BOTTOM,
6442
src_start, dest_start, k_start, original_k_start);
6443
} else {
6444
// Call the stub.
6445
make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
6446
stubAddr, stubName, TypePtr::BOTTOM,
6447
src_start, dest_start, k_start);
6448
}
6449
6450
return true;
6451
}
6452
6453
//------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
6454
bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
6455
address stubAddr = NULL;
6456
const char *stubName = NULL;
6457
6458
assert(UseAES, "need AES instruction support");
6459
6460
switch(id) {
6461
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
6462
stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
6463
stubName = "cipherBlockChaining_encryptAESCrypt";
6464
break;
6465
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
6466
stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
6467
stubName = "cipherBlockChaining_decryptAESCrypt";
6468
break;
6469
}
6470
if (stubAddr == NULL) return false;
6471
6472
Node* cipherBlockChaining_object = argument(0);
6473
Node* src = argument(1);
6474
Node* src_offset = argument(2);
6475
Node* len = argument(3);
6476
Node* dest = argument(4);
6477
Node* dest_offset = argument(5);
6478
6479
// (1) src and dest are arrays.
6480
const Type* src_type = src->Value(&_gvn);
6481
const Type* dest_type = dest->Value(&_gvn);
6482
const TypeAryPtr* top_src = src_type->isa_aryptr();
6483
const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6484
assert (top_src != NULL && top_src->klass() != NULL
6485
&& top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6486
6487
// checks are the responsibility of the caller
6488
Node* src_start = src;
6489
Node* dest_start = dest;
6490
if (src_offset != NULL || dest_offset != NULL) {
6491
assert(src_offset != NULL && dest_offset != NULL, "");
6492
src_start = array_element_address(src, src_offset, T_BYTE);
6493
dest_start = array_element_address(dest, dest_offset, T_BYTE);
6494
}
6495
6496
// if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6497
// (because of the predicated logic executed earlier).
6498
// so we cast it here safely.
6499
// this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6500
6501
Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6502
if (embeddedCipherObj == NULL) return false;
6503
6504
// cast it to what we know it will be at runtime
6505
const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6506
assert(tinst != NULL, "CBC obj is null");
6507
assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6508
ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6509
assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6510
6511
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6512
const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6513
const TypeOopPtr* xtype = aklass->as_instance_type();
6514
Node* aescrypt_object = new(C) CheckCastPPNode(control(), embeddedCipherObj, xtype);
6515
aescrypt_object = _gvn.transform(aescrypt_object);
6516
6517
// we need to get the start of the aescrypt_object's expanded key array
6518
Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6519
if (k_start == NULL) return false;
6520
6521
// similarly, get the start address of the r vector
6522
Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
6523
if (objRvec == NULL) return false;
6524
Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6525
6526
Node* cbcCrypt;
6527
if (Matcher::pass_original_key_for_aes()) {
6528
// on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6529
// compatibility issues between Java key expansion and SPARC crypto instructions
6530
Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6531
if (original_k_start == NULL) return false;
6532
6533
// Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
6534
cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6535
OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6536
stubAddr, stubName, TypePtr::BOTTOM,
6537
src_start, dest_start, k_start, r_start, len, original_k_start);
6538
} else {
6539
// Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6540
cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6541
OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6542
stubAddr, stubName, TypePtr::BOTTOM,
6543
src_start, dest_start, k_start, r_start, len);
6544
}
6545
6546
// return cipher length (int)
6547
Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
6548
set_result(retvalue);
6549
return true;
6550
}
6551
6552
//------------------------------get_key_start_from_aescrypt_object-----------------------
6553
Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6554
#ifdef PPC64
6555
// MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
6556
// Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
6557
// However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
6558
// The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
6559
Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false);
6560
assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6561
if (objSessionK == NULL) {
6562
return (Node *) NULL;
6563
}
6564
Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS);
6565
#else
6566
Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6567
#endif // PPC64
6568
assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6569
if (objAESCryptKey == NULL) return (Node *) NULL;
6570
6571
// now have the array, need to get the start address of the K array
6572
Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6573
return k_start;
6574
}
6575
6576
//------------------------------get_original_key_start_from_aescrypt_object-----------------------
6577
Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6578
Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6579
assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6580
if (objAESCryptKey == NULL) return (Node *) NULL;
6581
6582
// now have the array, need to get the start address of the lastKey array
6583
Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6584
return original_k_start;
6585
}
6586
6587
//----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6588
// Return node representing slow path of predicate check.
6589
// the pseudo code we want to emulate with this predicate is:
6590
// for encryption:
6591
// if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6592
// for decryption:
6593
// if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6594
// note cipher==plain is more conservative than the original java code but that's OK
6595
//
6596
Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6597
// The receiver was checked for NULL already.
6598
Node* objCBC = argument(0);
6599
6600
// Load embeddedCipher field of CipherBlockChaining object.
6601
Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6602
6603
// get AESCrypt klass for instanceOf check
6604
// AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6605
// will have same classloader as CipherBlockChaining object
6606
const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6607
assert(tinst != NULL, "CBCobj is null");
6608
assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6609
6610
// we want to do an instanceof comparison against the AESCrypt class
6611
ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6612
if (!klass_AESCrypt->is_loaded()) {
6613
// if AESCrypt is not even loaded, we never take the intrinsic fast path
6614
Node* ctrl = control();
6615
set_control(top()); // no regular fast path
6616
return ctrl;
6617
}
6618
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6619
6620
Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6621
Node* cmp_instof = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
6622
Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
6623
6624
Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6625
6626
// for encryption, we are done
6627
if (!decrypting)
6628
return instof_false; // even if it is NULL
6629
6630
// for decryption, we need to add a further check to avoid
6631
// taking the intrinsic path when cipher and plain are the same
6632
// see the original java code for why.
6633
RegionNode* region = new(C) RegionNode(3);
6634
region->init_req(1, instof_false);
6635
Node* src = argument(1);
6636
Node* dest = argument(4);
6637
Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
6638
Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
6639
Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6640
region->init_req(2, src_dest_conjoint);
6641
6642
record_for_igvn(region);
6643
return _gvn.transform(region);
6644
}
6645
6646
//------------------------------inline_ghash_processBlocks
6647
bool LibraryCallKit::inline_ghash_processBlocks() {
6648
address stubAddr;
6649
const char *stubName;
6650
assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
6651
6652
stubAddr = StubRoutines::ghash_processBlocks();
6653
stubName = "ghash_processBlocks";
6654
6655
Node* data = argument(0);
6656
Node* offset = argument(1);
6657
Node* len = argument(2);
6658
Node* state = argument(3);
6659
Node* subkeyH = argument(4);
6660
6661
Node* state_start = array_element_address(state, intcon(0), T_LONG);
6662
assert(state_start, "state is NULL");
6663
Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
6664
assert(subkeyH_start, "subkeyH is NULL");
6665
Node* data_start = array_element_address(data, offset, T_BYTE);
6666
assert(data_start, "data is NULL");
6667
6668
Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
6669
OptoRuntime::ghash_processBlocks_Type(),
6670
stubAddr, stubName, TypePtr::BOTTOM,
6671
state_start, subkeyH_start, data_start, len);
6672
return true;
6673
}
6674
6675
//------------------------------inline_sha_implCompress-----------------------
6676
//
6677
// Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6678
// void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6679
//
6680
// Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6681
// void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6682
//
6683
// Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6684
// void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6685
//
6686
bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
6687
assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6688
6689
Node* sha_obj = argument(0);
6690
Node* src = argument(1); // type oop
6691
Node* ofs = argument(2); // type int
6692
6693
const Type* src_type = src->Value(&_gvn);
6694
const TypeAryPtr* top_src = src_type->isa_aryptr();
6695
if (top_src == NULL || top_src->klass() == NULL) {
6696
// failed array check
6697
return false;
6698
}
6699
// Figure out the size and type of the elements we will be copying.
6700
BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6701
if (src_elem != T_BYTE) {
6702
return false;
6703
}
6704
// 'src_start' points to src array + offset
6705
Node* src_start = array_element_address(src, ofs, src_elem);
6706
Node* state = NULL;
6707
address stubAddr;
6708
const char *stubName;
6709
6710
switch(id) {
6711
case vmIntrinsics::_sha_implCompress:
6712
assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6713
state = get_state_from_sha_object(sha_obj);
6714
stubAddr = StubRoutines::sha1_implCompress();
6715
stubName = "sha1_implCompress";
6716
break;
6717
case vmIntrinsics::_sha2_implCompress:
6718
assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6719
state = get_state_from_sha_object(sha_obj);
6720
stubAddr = StubRoutines::sha256_implCompress();
6721
stubName = "sha256_implCompress";
6722
break;
6723
case vmIntrinsics::_sha5_implCompress:
6724
assert(UseSHA512Intrinsics, "need SHA512 instruction support");
6725
state = get_state_from_sha5_object(sha_obj);
6726
stubAddr = StubRoutines::sha512_implCompress();
6727
stubName = "sha512_implCompress";
6728
break;
6729
default:
6730
fatal_unexpected_iid(id);
6731
return false;
6732
}
6733
if (state == NULL) return false;
6734
6735
// Call the stub.
6736
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(),
6737
stubAddr, stubName, TypePtr::BOTTOM,
6738
src_start, state);
6739
6740
return true;
6741
}
6742
6743
//------------------------------inline_digestBase_implCompressMB-----------------------
6744
//
6745
// Calculate SHA/SHA2/SHA5 for multi-block byte[] array.
6746
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
6747
//
6748
bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
6749
assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6750
"need SHA1/SHA256/SHA512 instruction support");
6751
assert((uint)predicate < 3, "sanity");
6752
assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6753
6754
Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
6755
Node* src = argument(1); // byte[] array
6756
Node* ofs = argument(2); // type int
6757
Node* limit = argument(3); // type int
6758
6759
const Type* src_type = src->Value(&_gvn);
6760
const TypeAryPtr* top_src = src_type->isa_aryptr();
6761
if (top_src == NULL || top_src->klass() == NULL) {
6762
// failed array check
6763
return false;
6764
}
6765
// Figure out the size and type of the elements we will be copying.
6766
BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6767
if (src_elem != T_BYTE) {
6768
return false;
6769
}
6770
// 'src_start' points to src array + offset
6771
Node* src_start = array_element_address(src, ofs, src_elem);
6772
6773
const char* klass_SHA_name = NULL;
6774
const char* stub_name = NULL;
6775
address stub_addr = NULL;
6776
bool long_state = false;
6777
6778
switch (predicate) {
6779
case 0:
6780
if (UseSHA1Intrinsics) {
6781
klass_SHA_name = "sun/security/provider/SHA";
6782
stub_name = "sha1_implCompressMB";
6783
stub_addr = StubRoutines::sha1_implCompressMB();
6784
}
6785
break;
6786
case 1:
6787
if (UseSHA256Intrinsics) {
6788
klass_SHA_name = "sun/security/provider/SHA2";
6789
stub_name = "sha256_implCompressMB";
6790
stub_addr = StubRoutines::sha256_implCompressMB();
6791
}
6792
break;
6793
case 2:
6794
if (UseSHA512Intrinsics) {
6795
klass_SHA_name = "sun/security/provider/SHA5";
6796
stub_name = "sha512_implCompressMB";
6797
stub_addr = StubRoutines::sha512_implCompressMB();
6798
long_state = true;
6799
}
6800
break;
6801
default:
6802
fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
6803
}
6804
if (klass_SHA_name != NULL) {
6805
// get DigestBase klass to lookup for SHA klass
6806
const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6807
assert(tinst != NULL, "digestBase_obj is not instance???");
6808
assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6809
6810
ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6811
assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
6812
ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6813
return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
6814
}
6815
return false;
6816
}
6817
//------------------------------inline_sha_implCompressMB-----------------------
6818
bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
6819
bool long_state, address stubAddr, const char *stubName,
6820
Node* src_start, Node* ofs, Node* limit) {
6821
const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
6822
const TypeOopPtr* xtype = aklass->as_instance_type();
6823
Node* sha_obj = new (C) CheckCastPPNode(control(), digestBase_obj, xtype);
6824
sha_obj = _gvn.transform(sha_obj);
6825
6826
Node* state;
6827
if (long_state) {
6828
state = get_state_from_sha5_object(sha_obj);
6829
} else {
6830
state = get_state_from_sha_object(sha_obj);
6831
}
6832
if (state == NULL) return false;
6833
6834
// Call the stub.
6835
Node *call;
6836
if (CCallingConventionRequiresIntsAsLongs) {
6837
call = make_runtime_call(RC_LEAF|RC_NO_FP,
6838
OptoRuntime::digestBase_implCompressMB_Type(),
6839
stubAddr, stubName, TypePtr::BOTTOM,
6840
src_start, state, ofs XTOP, limit XTOP);
6841
} else {
6842
call = make_runtime_call(RC_LEAF|RC_NO_FP,
6843
OptoRuntime::digestBase_implCompressMB_Type(),
6844
stubAddr, stubName, TypePtr::BOTTOM,
6845
src_start, state, ofs, limit);
6846
}
6847
// return ofs (int)
6848
Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
6849
set_result(result);
6850
6851
return true;
6852
}
6853
6854
//------------------------------get_state_from_sha_object-----------------------
6855
Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
6856
Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
6857
assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
6858
if (sha_state == NULL) return (Node *) NULL;
6859
6860
// now have the array, need to get the start address of the state array
6861
Node* state = array_element_address(sha_state, intcon(0), T_INT);
6862
return state;
6863
}
6864
6865
//------------------------------get_state_from_sha5_object-----------------------
6866
Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
6867
Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
6868
assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
6869
if (sha_state == NULL) return (Node *) NULL;
6870
6871
// now have the array, need to get the start address of the state array
6872
Node* state = array_element_address(sha_state, intcon(0), T_LONG);
6873
return state;
6874
}
6875
6876
//----------------------------inline_digestBase_implCompressMB_predicate----------------------------
6877
// Return node representing slow path of predicate check.
6878
// the pseudo code we want to emulate with this predicate is:
6879
// if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
6880
//
6881
Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
6882
assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6883
"need SHA1/SHA256/SHA512 instruction support");
6884
assert((uint)predicate < 3, "sanity");
6885
6886
// The receiver was checked for NULL already.
6887
Node* digestBaseObj = argument(0);
6888
6889
// get DigestBase klass for instanceOf check
6890
const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
6891
assert(tinst != NULL, "digestBaseObj is null");
6892
assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6893
6894
const char* klass_SHA_name = NULL;
6895
switch (predicate) {
6896
case 0:
6897
if (UseSHA1Intrinsics) {
6898
// we want to do an instanceof comparison against the SHA class
6899
klass_SHA_name = "sun/security/provider/SHA";
6900
}
6901
break;
6902
case 1:
6903
if (UseSHA256Intrinsics) {
6904
// we want to do an instanceof comparison against the SHA2 class
6905
klass_SHA_name = "sun/security/provider/SHA2";
6906
}
6907
break;
6908
case 2:
6909
if (UseSHA512Intrinsics) {
6910
// we want to do an instanceof comparison against the SHA5 class
6911
klass_SHA_name = "sun/security/provider/SHA5";
6912
}
6913
break;
6914
default:
6915
fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
6916
}
6917
6918
ciKlass* klass_SHA = NULL;
6919
if (klass_SHA_name != NULL) {
6920
klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6921
}
6922
if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
6923
// if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6924
Node* ctrl = control();
6925
set_control(top()); // no intrinsic path
6926
return ctrl;
6927
}
6928
ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6929
6930
Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
6931
Node* cmp_instof = _gvn.transform(new (C) CmpINode(instofSHA, intcon(1)));
6932
Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
6933
Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6934
6935
return instof_false; // even if it is NULL
6936
}
6937
6938
bool LibraryCallKit::inline_profileBoolean() {
6939
Node* counts = argument(1);
6940
const TypeAryPtr* ary = NULL;
6941
ciArray* aobj = NULL;
6942
if (counts->is_Con()
6943
&& (ary = counts->bottom_type()->isa_aryptr()) != NULL
6944
&& (aobj = ary->const_oop()->as_array()) != NULL
6945
&& (aobj->length() == 2)) {
6946
// Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
6947
jint false_cnt = aobj->element_value(0).as_int();
6948
jint true_cnt = aobj->element_value(1).as_int();
6949
6950
if (C->log() != NULL) {
6951
C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
6952
false_cnt, true_cnt);
6953
}
6954
6955
if (false_cnt + true_cnt == 0) {
6956
// According to profile, never executed.
6957
uncommon_trap_exact(Deoptimization::Reason_intrinsic,
6958
Deoptimization::Action_reinterpret);
6959
return true;
6960
}
6961
6962
// result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
6963
// is a number of each value occurrences.
6964
Node* result = argument(0);
6965
if (false_cnt == 0 || true_cnt == 0) {
6966
// According to profile, one value has been never seen.
6967
int expected_val = (false_cnt == 0) ? 1 : 0;
6968
6969
Node* cmp = _gvn.transform(new (C) CmpINode(result, intcon(expected_val)));
6970
Node* test = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
6971
6972
IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
6973
Node* fast_path = _gvn.transform(new (C) IfTrueNode(check));
6974
Node* slow_path = _gvn.transform(new (C) IfFalseNode(check));
6975
6976
{ // Slow path: uncommon trap for never seen value and then reexecute
6977
// MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
6978
// the value has been seen at least once.
6979
PreserveJVMState pjvms(this);
6980
PreserveReexecuteState preexecs(this);
6981
jvms()->set_should_reexecute(true);
6982
6983
set_control(slow_path);
6984
set_i_o(i_o());
6985
6986
uncommon_trap_exact(Deoptimization::Reason_intrinsic,
6987
Deoptimization::Action_reinterpret);
6988
}
6989
// The guard for never seen value enables sharpening of the result and
6990
// returning a constant. It allows to eliminate branches on the same value
6991
// later on.
6992
set_control(fast_path);
6993
result = intcon(expected_val);
6994
}
6995
// Stop profiling.
6996
// MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
6997
// By replacing method body with profile data (represented as ProfileBooleanNode
6998
// on IR level) we effectively disable profiling.
6999
// It enables full speed execution once optimized code is generated.
7000
Node* profile = _gvn.transform(new (C) ProfileBooleanNode(result, false_cnt, true_cnt));
7001
C->record_for_igvn(profile);
7002
set_result(profile);
7003
return true;
7004
} else {
7005
// Continue profiling.
7006
// Profile data isn't available at the moment. So, execute method's bytecode version.
7007
// Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
7008
// is compiled and counters aren't available since corresponding MethodHandle
7009
// isn't a compile-time constant.
7010
return false;
7011
}
7012
}
7013
7014