Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/opto/graphKit.hpp
64441 views
1
/*
2
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_OPTO_GRAPHKIT_HPP
26
#define SHARE_OPTO_GRAPHKIT_HPP
27
28
#include "ci/ciEnv.hpp"
29
#include "ci/ciMethodData.hpp"
30
#include "gc/shared/c2/barrierSetC2.hpp"
31
#include "opto/addnode.hpp"
32
#include "opto/callnode.hpp"
33
#include "opto/cfgnode.hpp"
34
#include "opto/compile.hpp"
35
#include "opto/divnode.hpp"
36
#include "opto/mulnode.hpp"
37
#include "opto/phaseX.hpp"
38
#include "opto/subnode.hpp"
39
#include "opto/type.hpp"
40
#include "runtime/deoptimization.hpp"
41
42
class BarrierSetC2;
43
class FastLockNode;
44
class FastUnlockNode;
45
class IdealKit;
46
class LibraryCallKit;
47
class Parse;
48
class RootNode;
49
50
//-----------------------------------------------------------------------------
51
//----------------------------GraphKit-----------------------------------------
52
// Toolkit for building the common sorts of subgraphs.
53
// Does not know about bytecode parsing or type-flow results.
54
// It is able to create graphs implementing the semantics of most
55
// or all bytecodes, so that it can expand intrinsics and calls.
56
// It may depend on JVMState structure, but it must not depend
57
// on specific bytecode streams.
58
class GraphKit : public Phase {
59
friend class PreserveJVMState;
60
61
protected:
62
ciEnv* _env; // Compilation environment
63
PhaseGVN &_gvn; // Some optimizations while parsing
64
SafePointNode* _map; // Parser map from JVM to Nodes
65
SafePointNode* _exceptions;// Parser map(s) for exception state(s)
66
int _bci; // JVM Bytecode Pointer
67
ciMethod* _method; // JVM Current Method
68
BarrierSetC2* _barrier_set;
69
70
private:
71
int _sp; // JVM Expression Stack Pointer; don't modify directly!
72
73
private:
74
SafePointNode* map_not_null() const {
75
assert(_map != NULL, "must call stopped() to test for reset compiler map");
76
return _map;
77
}
78
79
public:
80
GraphKit(); // empty constructor
81
GraphKit(JVMState* jvms); // the JVM state on which to operate
82
83
#ifdef ASSERT
84
~GraphKit() {
85
assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
86
}
87
#endif
88
89
virtual Parse* is_Parse() const { return NULL; }
90
virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
91
92
ciEnv* env() const { return _env; }
93
PhaseGVN& gvn() const { return _gvn; }
94
void* barrier_set_state() const { return C->barrier_set_state(); }
95
96
void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
97
98
// Handy well-known nodes:
99
Node* null() const { return zerocon(T_OBJECT); }
100
Node* top() const { return C->top(); }
101
RootNode* root() const { return C->root(); }
102
103
// Create or find a constant node
104
Node* intcon(jint con) const { return _gvn.intcon(con); }
105
Node* longcon(jlong con) const { return _gvn.longcon(con); }
106
Node* integercon(jlong con, BasicType bt) const {
107
if (bt == T_INT) {
108
return intcon(checked_cast<jint>(con));
109
}
110
assert(bt == T_LONG, "basic type not an int or long");
111
return longcon(con);
112
}
113
Node* makecon(const Type *t) const { return _gvn.makecon(t); }
114
Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
115
// (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
116
117
jint find_int_con(Node* n, jint value_if_unknown) {
118
return _gvn.find_int_con(n, value_if_unknown);
119
}
120
jlong find_long_con(Node* n, jlong value_if_unknown) {
121
return _gvn.find_long_con(n, value_if_unknown);
122
}
123
// (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
124
125
// JVM State accessors:
126
// Parser mapping from JVM indices into Nodes.
127
// Low slots are accessed by the StartNode::enum.
128
// Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
129
// Then come JVM stack slots.
130
// Finally come the monitors, if any.
131
// See layout accessors in class JVMState.
132
133
SafePointNode* map() const { return _map; }
134
bool has_exceptions() const { return _exceptions != NULL; }
135
JVMState* jvms() const { return map_not_null()->_jvms; }
136
int sp() const { return _sp; }
137
int bci() const { return _bci; }
138
Bytecodes::Code java_bc() const;
139
ciMethod* method() const { return _method; }
140
141
void set_jvms(JVMState* jvms) { set_map(jvms->map());
142
assert(jvms == this->jvms(), "sanity");
143
_sp = jvms->sp();
144
_bci = jvms->bci();
145
_method = jvms->has_method() ? jvms->method() : NULL; }
146
void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
147
void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; }
148
void clean_stack(int from_sp); // clear garbage beyond from_sp to top
149
150
void inc_sp(int i) { set_sp(sp() + i); }
151
void dec_sp(int i) { set_sp(sp() - i); }
152
void set_bci(int bci) { _bci = bci; }
153
154
// Make sure jvms has current bci & sp.
155
JVMState* sync_jvms() const;
156
JVMState* sync_jvms_for_reexecute();
157
158
#ifdef ASSERT
159
// Make sure JVMS has an updated copy of bci and sp.
160
// Also sanity-check method, depth, and monitor depth.
161
bool jvms_in_sync() const;
162
163
// Make sure the map looks OK.
164
void verify_map() const;
165
166
// Make sure a proposed exception state looks OK.
167
static void verify_exception_state(SafePointNode* ex_map);
168
#endif
169
170
// Clone the existing map state. (Implements PreserveJVMState.)
171
SafePointNode* clone_map();
172
173
// Set the map to a clone of the given one.
174
void set_map_clone(SafePointNode* m);
175
176
// Tell if the compilation is failing.
177
bool failing() const { return C->failing(); }
178
179
// Set _map to NULL, signalling a stop to further bytecode execution.
180
// Preserve the map intact for future use, and return it back to the caller.
181
SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
182
183
// Stop, but first smash the map's inputs to NULL, to mark it dead.
184
void stop_and_kill_map();
185
186
// Tell if _map is NULL, or control is top.
187
bool stopped();
188
189
// Tell if this method or any caller method has exception handlers.
190
bool has_ex_handler();
191
192
// Save an exception without blowing stack contents or other JVM state.
193
// (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
194
static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
195
196
// Recover a saved exception from its map.
197
static Node* saved_ex_oop(SafePointNode* ex_map);
198
199
// Recover a saved exception from its map, and remove it from the map.
200
static Node* clear_saved_ex_oop(SafePointNode* ex_map);
201
202
#ifdef ASSERT
203
// Recover a saved exception from its map, and remove it from the map.
204
static bool has_saved_ex_oop(SafePointNode* ex_map);
205
#endif
206
207
// Push an exception in the canonical position for handlers (stack(0)).
208
void push_ex_oop(Node* ex_oop) {
209
ensure_stack(1); // ensure room to push the exception
210
set_stack(0, ex_oop);
211
set_sp(1);
212
clean_stack(1);
213
}
214
215
// Detach and return an exception state.
216
SafePointNode* pop_exception_state() {
217
SafePointNode* ex_map = _exceptions;
218
if (ex_map != NULL) {
219
_exceptions = ex_map->next_exception();
220
ex_map->set_next_exception(NULL);
221
debug_only(verify_exception_state(ex_map));
222
}
223
return ex_map;
224
}
225
226
// Add an exception, using the given JVM state, without commoning.
227
void push_exception_state(SafePointNode* ex_map) {
228
debug_only(verify_exception_state(ex_map));
229
ex_map->set_next_exception(_exceptions);
230
_exceptions = ex_map;
231
}
232
233
// Turn the current JVM state into an exception state, appending the ex_oop.
234
SafePointNode* make_exception_state(Node* ex_oop);
235
236
// Add an exception, using the given JVM state.
237
// Combine all exceptions with a common exception type into a single state.
238
// (This is done via combine_exception_states.)
239
void add_exception_state(SafePointNode* ex_map);
240
241
// Combine all exceptions of any sort whatever into a single master state.
242
SafePointNode* combine_and_pop_all_exception_states() {
243
if (_exceptions == NULL) return NULL;
244
SafePointNode* phi_map = pop_exception_state();
245
SafePointNode* ex_map;
246
while ((ex_map = pop_exception_state()) != NULL) {
247
combine_exception_states(ex_map, phi_map);
248
}
249
return phi_map;
250
}
251
252
// Combine the two exception states, building phis as necessary.
253
// The second argument is updated to include contributions from the first.
254
void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
255
256
// Reset the map to the given state. If there are any half-finished phis
257
// in it (created by combine_exception_states), transform them now.
258
// Returns the exception oop. (Caller must call push_ex_oop if required.)
259
Node* use_exception_state(SafePointNode* ex_map);
260
261
// Collect exceptions from a given JVM state into my exception list.
262
void add_exception_states_from(JVMState* jvms);
263
264
// Collect all raised exceptions into the current JVM state.
265
// Clear the current exception list and map, returns the combined states.
266
JVMState* transfer_exceptions_into_jvms();
267
268
// Helper to throw a built-in exception.
269
// Range checks take the offending index.
270
// Cast and array store checks take the offending class.
271
// Others do not take the optional argument.
272
// The JVMS must allow the bytecode to be re-executed
273
// via an uncommon trap.
274
void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
275
276
// Helper to check the JavaThread::_should_post_on_exceptions flag
277
// and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
278
void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
279
bool must_throw) ;
280
281
// Helper Functions for adding debug information
282
void kill_dead_locals();
283
#ifdef ASSERT
284
bool dead_locals_are_killed();
285
#endif
286
// The call may deoptimize. Supply required JVM state as debug info.
287
// If must_throw is true, the call is guaranteed not to return normally.
288
void add_safepoint_edges(SafePointNode* call,
289
bool must_throw = false);
290
291
// How many stack inputs does the current BC consume?
292
// And, how does the stack change after the bytecode?
293
// Returns false if unknown.
294
bool compute_stack_effects(int& inputs, int& depth);
295
296
// Add a fixed offset to a pointer
297
Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
298
return basic_plus_adr(base, ptr, MakeConX(offset));
299
}
300
Node* basic_plus_adr(Node* base, intptr_t offset) {
301
return basic_plus_adr(base, base, MakeConX(offset));
302
}
303
// Add a variable offset to a pointer
304
Node* basic_plus_adr(Node* base, Node* offset) {
305
return basic_plus_adr(base, base, offset);
306
}
307
Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
308
309
310
// Some convenient shortcuts for common nodes
311
Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); }
312
Node* IfFalse(IfNode* iff) { return _gvn.transform(new IfFalseNode(iff)); }
313
314
Node* AddI(Node* l, Node* r) { return _gvn.transform(new AddINode(l, r)); }
315
Node* SubI(Node* l, Node* r) { return _gvn.transform(new SubINode(l, r)); }
316
Node* MulI(Node* l, Node* r) { return _gvn.transform(new MulINode(l, r)); }
317
Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new DivINode(ctl, l, r)); }
318
319
Node* AndI(Node* l, Node* r) { return _gvn.transform(new AndINode(l, r)); }
320
Node* OrI(Node* l, Node* r) { return _gvn.transform(new OrINode(l, r)); }
321
Node* XorI(Node* l, Node* r) { return _gvn.transform(new XorINode(l, r)); }
322
323
Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); }
324
Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); }
325
326
Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); }
327
Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); }
328
Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); }
329
330
Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); }
331
Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); }
332
Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); }
333
Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); }
334
335
Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); }
336
337
// Convert between int and long, and size_t.
338
// (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
339
Node* ConvI2L(Node* offset);
340
Node* ConvI2UL(Node* offset);
341
Node* ConvL2I(Node* offset);
342
// Find out the klass of an object.
343
Node* load_object_klass(Node* object);
344
// Find out the length of an array.
345
Node* load_array_length(Node* array);
346
// Cast array allocation's length as narrow as possible.
347
// If replace_length_in_map is true, replace length with CastIINode in map.
348
// This method is invoked after creating/moving ArrayAllocationNode or in load_array_length
349
Node* array_ideal_length(AllocateArrayNode* alloc,
350
const TypeOopPtr* oop_type,
351
bool replace_length_in_map);
352
353
354
// Helper function to do a NULL pointer check or ZERO check based on type.
355
// Throw an exception if a given value is null.
356
// Return the value cast to not-null.
357
// Be clever about equivalent dominating null checks.
358
Node* null_check_common(Node* value, BasicType type,
359
bool assert_null = false,
360
Node* *null_control = NULL,
361
bool speculative = false);
362
Node* null_check(Node* value, BasicType type = T_OBJECT) {
363
return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
364
}
365
Node* null_check_receiver() {
366
assert(argument(0)->bottom_type()->isa_ptr(), "must be");
367
return null_check(argument(0));
368
}
369
Node* zero_check_int(Node* value) {
370
assert(value->bottom_type()->basic_type() == T_INT,
371
"wrong type: %s", type2name(value->bottom_type()->basic_type()));
372
return null_check_common(value, T_INT);
373
}
374
Node* zero_check_long(Node* value) {
375
assert(value->bottom_type()->basic_type() == T_LONG,
376
"wrong type: %s", type2name(value->bottom_type()->basic_type()));
377
return null_check_common(value, T_LONG);
378
}
379
// Throw an uncommon trap if a given value is __not__ null.
380
// Return the value cast to null, and be clever about dominating checks.
381
Node* null_assert(Node* value, BasicType type = T_OBJECT) {
382
return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null());
383
}
384
385
// Check if value is null and abort if it is
386
Node* must_be_not_null(Node* value, bool do_replace_in_map);
387
388
// Null check oop. Return null-path control into (*null_control).
389
// Return a cast-not-null node which depends on the not-null control.
390
// If never_see_null, use an uncommon trap (*null_control sees a top).
391
// The cast is not valid along the null path; keep a copy of the original.
392
// If safe_for_replace, then we can replace the value with the cast
393
// in the parsing map (the cast is guaranteed to dominate the map)
394
Node* null_check_oop(Node* value, Node* *null_control,
395
bool never_see_null = false,
396
bool safe_for_replace = false,
397
bool speculative = false);
398
399
// Check the null_seen bit.
400
bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
401
402
void guard_klass_being_initialized(Node* klass);
403
void guard_init_thread(Node* klass);
404
405
void clinit_barrier(ciInstanceKlass* ik, ciMethod* context);
406
407
// Check for unique class for receiver at call
408
ciKlass* profile_has_unique_klass() {
409
ciCallProfile profile = method()->call_profile_at_bci(bci());
410
if (profile.count() >= 0 && // no cast failures here
411
profile.has_receiver(0) &&
412
profile.morphism() == 1) {
413
return profile.receiver(0);
414
}
415
return NULL;
416
}
417
418
// record type from profiling with the type system
419
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind);
420
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
421
void record_profiled_parameters_for_speculation();
422
void record_profiled_return_for_speculation();
423
Node* record_profiled_receiver_for_speculation(Node* n);
424
425
// Use the type profile to narrow an object type.
426
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
427
ciKlass* require_klass,
428
ciKlass* spec,
429
bool safe_for_replace);
430
431
// Cast obj to type and emit guard unless we had too many traps here already
432
Node* maybe_cast_profiled_obj(Node* obj,
433
ciKlass* type,
434
bool not_null = false);
435
436
// Cast obj to not-null on this path
437
Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
438
// Replace all occurrences of one node by another.
439
void replace_in_map(Node* old, Node* neww);
440
441
void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
442
Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
443
Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
444
445
void push_pair(Node* ldval) {
446
push(ldval);
447
push(top()); // the halfword is merely a placeholder
448
}
449
void push_pair_local(int i) {
450
// longs are stored in locals in "push" order
451
push( local(i+0) ); // the real value
452
assert(local(i+1) == top(), "");
453
push(top()); // halfword placeholder
454
}
455
Node* pop_pair() {
456
// the second half is pushed last & popped first; it contains exactly nothing
457
Node* halfword = pop();
458
assert(halfword == top(), "");
459
// the long bits are pushed first & popped last:
460
return pop();
461
}
462
void set_pair_local(int i, Node* lval) {
463
// longs are stored in locals as a value/half pair (like doubles)
464
set_local(i+0, lval);
465
set_local(i+1, top());
466
}
467
468
// Push the node, which may be zero, one, or two words.
469
void push_node(BasicType n_type, Node* n) {
470
int n_size = type2size[n_type];
471
if (n_size == 1) push( n ); // T_INT, ...
472
else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
473
else { assert(n_size == 0, "must be T_VOID"); }
474
}
475
476
Node* pop_node(BasicType n_type) {
477
int n_size = type2size[n_type];
478
if (n_size == 1) return pop();
479
else if (n_size == 2) return pop_pair();
480
else return NULL;
481
}
482
483
Node* control() const { return map_not_null()->control(); }
484
Node* i_o() const { return map_not_null()->i_o(); }
485
Node* returnadr() const { return map_not_null()->returnadr(); }
486
Node* frameptr() const { return map_not_null()->frameptr(); }
487
Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
488
Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
489
Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
490
Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
491
Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
492
493
void set_control (Node* c) { map_not_null()->set_control(c); }
494
void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
495
void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
496
void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
497
void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
498
void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
499
500
// Access unaliased memory
501
Node* memory(uint alias_idx);
502
Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
503
Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
504
505
// Access immutable memory
506
Node* immutable_memory() { return C->immutable_memory(); }
507
508
// Set unaliased memory
509
void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
510
void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
511
void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
512
513
// Get the entire memory state (probably a MergeMemNode), and reset it
514
// (The resetting prevents somebody from using the dangling Node pointer.)
515
Node* reset_memory();
516
517
// Get the entire memory state, asserted to be a MergeMemNode.
518
MergeMemNode* merged_memory() {
519
Node* mem = map_not_null()->memory();
520
assert(mem->is_MergeMem(), "parse memory is always pre-split");
521
return mem->as_MergeMem();
522
}
523
524
// Set the entire memory state; produce a new MergeMemNode.
525
void set_all_memory(Node* newmem);
526
527
// Create a memory projection from the call, then set_all_memory.
528
void set_all_memory_call(Node* call, bool separate_io_proj = false);
529
530
// Create a LoadNode, reading from the parser's memory state.
531
// (Note: require_atomic_access is useful only with T_LONG.)
532
//
533
// We choose the unordered semantics by default because we have
534
// adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
535
// of volatile fields.
536
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
537
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
538
bool require_atomic_access = false, bool unaligned = false,
539
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
540
// This version computes alias_index from bottom_type
541
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
542
mo, control_dependency, require_atomic_access,
543
unaligned, mismatched, unsafe, barrier_data);
544
}
545
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
546
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
547
bool require_atomic_access = false, bool unaligned = false,
548
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
549
// This version computes alias_index from an address type
550
assert(adr_type != NULL, "use other make_load factory");
551
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
552
mo, control_dependency, require_atomic_access,
553
unaligned, mismatched, unsafe, barrier_data);
554
}
555
// This is the base version which is given an alias index.
556
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
557
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
558
bool require_atomic_access = false, bool unaligned = false,
559
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
560
561
// Create & transform a StoreNode and store the effect into the
562
// parser's memory state.
563
//
564
// We must ensure that stores of object references will be visible
565
// only after the object's initialization. So the clients of this
566
// procedure must indicate that the store requires `release'
567
// semantics, if the stored value is an object reference that might
568
// point to a new object and may become externally visible.
569
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
570
const TypePtr* adr_type,
571
MemNode::MemOrd mo,
572
bool require_atomic_access = false,
573
bool unaligned = false,
574
bool mismatched = false,
575
bool unsafe = false) {
576
// This version computes alias_index from an address type
577
assert(adr_type != NULL, "use other store_to_memory factory");
578
return store_to_memory(ctl, adr, val, bt,
579
C->get_alias_index(adr_type),
580
mo, require_atomic_access,
581
unaligned, mismatched, unsafe);
582
}
583
// This is the base version which is given alias index
584
// Return the new StoreXNode
585
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
586
int adr_idx,
587
MemNode::MemOrd,
588
bool require_atomic_access = false,
589
bool unaligned = false,
590
bool mismatched = false,
591
bool unsafe = false);
592
593
// Perform decorated accesses
594
595
Node* access_store_at(Node* obj, // containing obj
596
Node* adr, // actual adress to store val at
597
const TypePtr* adr_type,
598
Node* val,
599
const Type* val_type,
600
BasicType bt,
601
DecoratorSet decorators);
602
603
Node* access_load_at(Node* obj, // containing obj
604
Node* adr, // actual adress to load val at
605
const TypePtr* adr_type,
606
const Type* val_type,
607
BasicType bt,
608
DecoratorSet decorators);
609
610
Node* access_load(Node* adr, // actual adress to load val at
611
const Type* val_type,
612
BasicType bt,
613
DecoratorSet decorators);
614
615
Node* access_atomic_cmpxchg_val_at(Node* obj,
616
Node* adr,
617
const TypePtr* adr_type,
618
int alias_idx,
619
Node* expected_val,
620
Node* new_val,
621
const Type* value_type,
622
BasicType bt,
623
DecoratorSet decorators);
624
625
Node* access_atomic_cmpxchg_bool_at(Node* obj,
626
Node* adr,
627
const TypePtr* adr_type,
628
int alias_idx,
629
Node* expected_val,
630
Node* new_val,
631
const Type* value_type,
632
BasicType bt,
633
DecoratorSet decorators);
634
635
Node* access_atomic_xchg_at(Node* obj,
636
Node* adr,
637
const TypePtr* adr_type,
638
int alias_idx,
639
Node* new_val,
640
const Type* value_type,
641
BasicType bt,
642
DecoratorSet decorators);
643
644
Node* access_atomic_add_at(Node* obj,
645
Node* adr,
646
const TypePtr* adr_type,
647
int alias_idx,
648
Node* new_val,
649
const Type* value_type,
650
BasicType bt,
651
DecoratorSet decorators);
652
653
void access_clone(Node* src, Node* dst, Node* size, bool is_array);
654
655
// Return addressing for an array element.
656
Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
657
// Optional constraint on the array size:
658
const TypeInt* sizetype = NULL,
659
// Optional control dependency (for example, on range check)
660
Node* ctrl = NULL);
661
662
// Return a load of array element at idx.
663
Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl);
664
665
//---------------- Dtrace support --------------------
666
void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
667
void make_dtrace_method_entry(ciMethod* method) {
668
make_dtrace_method_entry_exit(method, true);
669
}
670
void make_dtrace_method_exit(ciMethod* method) {
671
make_dtrace_method_entry_exit(method, false);
672
}
673
674
//--------------- stub generation -------------------
675
public:
676
void gen_stub(address C_function,
677
const char *name,
678
int is_fancy_jump,
679
bool pass_tls,
680
bool return_pc);
681
682
//---------- help for generating calls --------------
683
684
// Do a null check on the receiver as it would happen before the call to
685
// callee (with all arguments still on the stack).
686
Node* null_check_receiver_before_call(ciMethod* callee) {
687
assert(!callee->is_static(), "must be a virtual method");
688
// Callsite signature can be different from actual method being called (i.e _linkTo* sites).
689
// Use callsite signature always.
690
ciMethod* declared_method = method()->get_method_at_bci(bci());
691
const int nargs = declared_method->arg_size();
692
inc_sp(nargs);
693
Node* n = null_check_receiver();
694
dec_sp(nargs);
695
return n;
696
}
697
698
// Fill in argument edges for the call from argument(0), argument(1), ...
699
// (The next step is to call set_edges_for_java_call.)
700
void set_arguments_for_java_call(CallJavaNode* call);
701
702
// Fill in non-argument edges for the call.
703
// Transform the call, and update the basics: control, i_o, memory.
704
// (The next step is usually to call set_results_for_java_call.)
705
void set_edges_for_java_call(CallJavaNode* call,
706
bool must_throw = false, bool separate_io_proj = false);
707
708
// Finish up a java call that was started by set_edges_for_java_call.
709
// Call add_exception on any throw arising from the call.
710
// Return the call result (transformed).
711
Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false);
712
713
// Similar to set_edges_for_java_call, but simplified for runtime calls.
714
void set_predefined_output_for_runtime_call(Node* call) {
715
set_predefined_output_for_runtime_call(call, NULL, NULL);
716
}
717
void set_predefined_output_for_runtime_call(Node* call,
718
Node* keep_mem,
719
const TypePtr* hook_mem);
720
Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
721
722
// Replace the call with the current state of the kit. Requires
723
// that the call was generated with separate io_projs so that
724
// exceptional control flow can be handled properly.
725
void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
726
727
// helper functions for statistics
728
void increment_counter(address counter_addr); // increment a debug counter
729
void increment_counter(Node* counter_addr); // increment a debug counter
730
731
// Bail out to the interpreter right now
732
// The optional klass is the one causing the trap.
733
// The optional reason is debug information written to the compile log.
734
// Optional must_throw is the same as with add_safepoint_edges.
735
void uncommon_trap(int trap_request,
736
ciKlass* klass = NULL, const char* reason_string = NULL,
737
bool must_throw = false, bool keep_exact_action = false);
738
739
// Shorthand, to avoid saying "Deoptimization::" so many times.
740
void uncommon_trap(Deoptimization::DeoptReason reason,
741
Deoptimization::DeoptAction action,
742
ciKlass* klass = NULL, const char* reason_string = NULL,
743
bool must_throw = false, bool keep_exact_action = false) {
744
uncommon_trap(Deoptimization::make_trap_request(reason, action),
745
klass, reason_string, must_throw, keep_exact_action);
746
}
747
748
// Bail out to the interpreter and keep exact action (avoid switching to Action_none).
749
void uncommon_trap_exact(Deoptimization::DeoptReason reason,
750
Deoptimization::DeoptAction action,
751
ciKlass* klass = NULL, const char* reason_string = NULL,
752
bool must_throw = false) {
753
uncommon_trap(Deoptimization::make_trap_request(reason, action),
754
klass, reason_string, must_throw, /*keep_exact_action=*/true);
755
}
756
757
// SP when bytecode needs to be reexecuted.
758
virtual int reexecute_sp() { return sp(); }
759
760
// Report if there were too many traps at the current method and bci.
761
// Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
762
// If there is no MDO at all, report no trap unless told to assume it.
763
bool too_many_traps(Deoptimization::DeoptReason reason) {
764
return C->too_many_traps(method(), bci(), reason);
765
}
766
767
// Report if there were too many recompiles at the current method and bci.
768
bool too_many_recompiles(Deoptimization::DeoptReason reason) {
769
return C->too_many_recompiles(method(), bci(), reason);
770
}
771
772
bool too_many_traps_or_recompiles(Deoptimization::DeoptReason reason) {
773
return C->too_many_traps_or_recompiles(method(), bci(), reason);
774
}
775
776
// Returns the object (if any) which was created the moment before.
777
Node* just_allocated_object(Node* current_control);
778
779
// Sync Ideal and Graph kits.
780
void sync_kit(IdealKit& ideal);
781
void final_sync(IdealKit& ideal);
782
783
public:
784
// Helper function to round double arguments before a call
785
void round_double_arguments(ciMethod* dest_method);
786
787
// rounding for strict float precision conformance
788
Node* precision_rounding(Node* n);
789
790
// rounding for strict double precision conformance
791
Node* dprecision_rounding(Node* n);
792
793
// rounding for non-strict double stores
794
Node* dstore_rounding(Node* n);
795
796
// Helper functions for fast/slow path codes
797
Node* opt_iff(Node* region, Node* iff);
798
Node* make_runtime_call(int flags,
799
const TypeFunc* call_type, address call_addr,
800
const char* call_name,
801
const TypePtr* adr_type, // NULL if no memory effects
802
Node* parm0 = NULL, Node* parm1 = NULL,
803
Node* parm2 = NULL, Node* parm3 = NULL,
804
Node* parm4 = NULL, Node* parm5 = NULL,
805
Node* parm6 = NULL, Node* parm7 = NULL);
806
807
Node* sign_extend_byte(Node* in);
808
Node* sign_extend_short(Node* in);
809
810
Node* make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep);
811
812
enum { // flag values for make_runtime_call
813
RC_NO_FP = 1, // CallLeafNoFPNode
814
RC_NO_IO = 2, // do not hook IO edges
815
RC_NO_LEAF = 4, // CallStaticJavaNode
816
RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
817
RC_NARROW_MEM = 16, // input memory is same as output
818
RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
819
RC_VECTOR = 64, // CallLeafVectorNode
820
RC_LEAF = 0 // null value: no flags set
821
};
822
823
// merge in all memory slices from new_mem, along the given path
824
void merge_memory(Node* new_mem, Node* region, int new_path);
825
void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
826
827
// Helper functions to build synchronizations
828
int next_monitor();
829
Node* insert_mem_bar(int opcode, Node* precedent = NULL);
830
Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
831
// Optional 'precedent' is appended as an extra edge, to force ordering.
832
FastLockNode* shared_lock(Node* obj);
833
void shared_unlock(Node* box, Node* obj);
834
835
// helper functions for the fast path/slow path idioms
836
Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
837
838
// Generate an instance-of idiom. Used by both the instance-of bytecode
839
// and the reflective instance-of call.
840
Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
841
842
// Generate a check-cast idiom. Used by both the check-cast bytecode
843
// and the array-store bytecode
844
Node* gen_checkcast( Node *subobj, Node* superkls,
845
Node* *failure_control = NULL );
846
847
Node* gen_subtype_check(Node* obj, Node* superklass);
848
849
// Exact type check used for predicted calls and casts.
850
// Rewrites (*casted_receiver) to be casted to the stronger type.
851
// (Caller is responsible for doing replace_in_map.)
852
Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
853
Node* *casted_receiver);
854
855
// Inexact type check used for predicted calls.
856
Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
857
Node** casted_receiver);
858
859
// implementation of object creation
860
Node* set_output_for_allocation(AllocateNode* alloc,
861
const TypeOopPtr* oop_type,
862
bool deoptimize_on_exception=false);
863
Node* get_layout_helper(Node* klass_node, jint& constant_value);
864
Node* new_instance(Node* klass_node,
865
Node* slow_test = NULL,
866
Node* *return_size_val = NULL,
867
bool deoptimize_on_exception = false);
868
Node* new_array(Node* klass_node, Node* count_val, int nargs,
869
Node* *return_size_val = NULL,
870
bool deoptimize_on_exception = false);
871
872
// java.lang.String helpers
873
Node* load_String_length(Node* str, bool set_ctrl);
874
Node* load_String_value(Node* str, bool set_ctrl);
875
Node* load_String_coder(Node* str, bool set_ctrl);
876
void store_String_value(Node* str, Node* value);
877
void store_String_coder(Node* str, Node* value);
878
Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type);
879
Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count);
880
void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count);
881
void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count);
882
883
// Handy for making control flow
884
IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
885
IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
886
_gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
887
// Place 'if' on worklist if it will be in graph
888
if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
889
return iff;
890
}
891
892
IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
893
IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's
894
_gvn.transform(iff); // Value may be known at parse-time
895
// Place 'if' on worklist if it will be in graph
896
if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
897
return iff;
898
}
899
900
void add_empty_predicates(int nargs = 0);
901
void add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
902
903
Node* make_constant_from_field(ciField* field, Node* obj);
904
905
// Vector API support (implemented in vectorIntrinsics.cpp)
906
Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
907
Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool shuffle_to_vector = false);
908
Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
909
};
910
911
// Helper class to support building of control flow branches. Upon
912
// creation the map and sp at bci are cloned and restored upon de-
913
// struction. Typical use:
914
//
915
// { PreserveJVMState pjvms(this);
916
// // code of new branch
917
// }
918
// // here the JVM state at bci is established
919
920
class PreserveJVMState: public StackObj {
921
protected:
922
GraphKit* _kit;
923
#ifdef ASSERT
924
int _block; // PO of current block, if a Parse
925
int _bci;
926
#endif
927
SafePointNode* _map;
928
uint _sp;
929
930
public:
931
PreserveJVMState(GraphKit* kit, bool clone_map = true);
932
~PreserveJVMState();
933
};
934
935
// Helper class to build cutouts of the form if (p) ; else {x...}.
936
// The code {x...} must not fall through.
937
// The kit's main flow of control is set to the "then" continuation of if(p).
938
class BuildCutout: public PreserveJVMState {
939
public:
940
BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
941
~BuildCutout();
942
};
943
944
// Helper class to preserve the original _reexecute bit and _sp and restore
945
// them back
946
class PreserveReexecuteState: public StackObj {
947
protected:
948
GraphKit* _kit;
949
uint _sp;
950
JVMState::ReexecuteState _reexecute;
951
952
public:
953
PreserveReexecuteState(GraphKit* kit);
954
~PreserveReexecuteState();
955
};
956
957
#endif // SHARE_OPTO_GRAPHKIT_HPP
958
959