Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/graphKit.hpp
32285 views
1
/*
2
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
26
#define SHARE_VM_OPTO_GRAPHKIT_HPP
27
28
#include "ci/ciEnv.hpp"
29
#include "ci/ciMethodData.hpp"
30
#include "opto/addnode.hpp"
31
#include "opto/callnode.hpp"
32
#include "opto/cfgnode.hpp"
33
#include "opto/compile.hpp"
34
#include "opto/divnode.hpp"
35
#include "opto/mulnode.hpp"
36
#include "opto/phaseX.hpp"
37
#include "opto/subnode.hpp"
38
#include "opto/type.hpp"
39
#include "runtime/deoptimization.hpp"
40
41
class FastLockNode;
42
class FastUnlockNode;
43
class IdealKit;
44
class LibraryCallKit;
45
class Parse;
46
class RootNode;
47
48
//-----------------------------------------------------------------------------
49
//----------------------------GraphKit-----------------------------------------
50
// Toolkit for building the common sorts of subgraphs.
51
// Does not know about bytecode parsing or type-flow results.
52
// It is able to create graphs implementing the semantics of most
53
// or all bytecodes, so that it can expand intrinsics and calls.
54
// It may depend on JVMState structure, but it must not depend
55
// on specific bytecode streams.
56
class GraphKit : public Phase {
57
friend class PreserveJVMState;
58
59
protected:
60
ciEnv* _env; // Compilation environment
61
PhaseGVN &_gvn; // Some optimizations while parsing
62
SafePointNode* _map; // Parser map from JVM to Nodes
63
SafePointNode* _exceptions;// Parser map(s) for exception state(s)
64
int _bci; // JVM Bytecode Pointer
65
ciMethod* _method; // JVM Current Method
66
67
private:
68
int _sp; // JVM Expression Stack Pointer; don't modify directly!
69
70
private:
71
SafePointNode* map_not_null() const {
72
assert(_map != NULL, "must call stopped() to test for reset compiler map");
73
return _map;
74
}
75
76
public:
77
GraphKit(); // empty constructor
78
GraphKit(JVMState* jvms); // the JVM state on which to operate
79
80
#ifdef ASSERT
81
~GraphKit() {
82
assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
83
}
84
#endif
85
86
virtual Parse* is_Parse() const { return NULL; }
87
virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
88
89
ciEnv* env() const { return _env; }
90
PhaseGVN& gvn() const { return _gvn; }
91
92
void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
93
94
// Handy well-known nodes:
95
Node* null() const { return zerocon(T_OBJECT); }
96
Node* top() const { return C->top(); }
97
RootNode* root() const { return C->root(); }
98
99
// Create or find a constant node
100
Node* intcon(jint con) const { return _gvn.intcon(con); }
101
Node* longcon(jlong con) const { return _gvn.longcon(con); }
102
Node* makecon(const Type *t) const { return _gvn.makecon(t); }
103
Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
104
// (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
105
106
// Helper for byte_map_base
107
Node* byte_map_base_node() {
108
// Get base of card map
109
CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
110
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
111
if (ct->byte_map_base != NULL) {
112
return makecon(TypeRawPtr::make((address)ct->byte_map_base));
113
} else {
114
return null();
115
}
116
}
117
118
jint find_int_con(Node* n, jint value_if_unknown) {
119
return _gvn.find_int_con(n, value_if_unknown);
120
}
121
jlong find_long_con(Node* n, jlong value_if_unknown) {
122
return _gvn.find_long_con(n, value_if_unknown);
123
}
124
// (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
125
126
// JVM State accessors:
127
// Parser mapping from JVM indices into Nodes.
128
// Low slots are accessed by the StartNode::enum.
129
// Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
130
// Then come JVM stack slots.
131
// Finally come the monitors, if any.
132
// See layout accessors in class JVMState.
133
134
SafePointNode* map() const { return _map; }
135
bool has_exceptions() const { return _exceptions != NULL; }
136
JVMState* jvms() const { return map_not_null()->_jvms; }
137
int sp() const { return _sp; }
138
int bci() const { return _bci; }
139
Bytecodes::Code java_bc() const;
140
ciMethod* method() const { return _method; }
141
142
void set_jvms(JVMState* jvms) { set_map(jvms->map());
143
assert(jvms == this->jvms(), "sanity");
144
_sp = jvms->sp();
145
_bci = jvms->bci();
146
_method = jvms->has_method() ? jvms->method() : NULL; }
147
void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
148
void set_sp(int sp) { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
149
void clean_stack(int from_sp); // clear garbage beyond from_sp to top
150
151
void inc_sp(int i) { set_sp(sp() + i); }
152
void dec_sp(int i) { set_sp(sp() - i); }
153
void set_bci(int bci) { _bci = bci; }
154
155
// Make sure jvms has current bci & sp.
156
JVMState* sync_jvms() const;
157
JVMState* sync_jvms_for_reexecute();
158
159
#ifdef ASSERT
160
// Make sure JVMS has an updated copy of bci and sp.
161
// Also sanity-check method, depth, and monitor depth.
162
bool jvms_in_sync() const;
163
164
// Make sure the map looks OK.
165
void verify_map() const;
166
167
// Make sure a proposed exception state looks OK.
168
static void verify_exception_state(SafePointNode* ex_map);
169
#endif
170
171
// Clone the existing map state. (Implements PreserveJVMState.)
172
SafePointNode* clone_map();
173
174
// Set the map to a clone of the given one.
175
void set_map_clone(SafePointNode* m);
176
177
// Tell if the compilation is failing.
178
bool failing() const { return C->failing(); }
179
180
// Set _map to NULL, signalling a stop to further bytecode execution.
181
// Preserve the map intact for future use, and return it back to the caller.
182
SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
183
184
// Stop, but first smash the map's inputs to NULL, to mark it dead.
185
void stop_and_kill_map();
186
187
// Tell if _map is NULL, or control is top.
188
bool stopped();
189
190
// Tell if this method or any caller method has exception handlers.
191
bool has_ex_handler();
192
193
// Save an exception without blowing stack contents or other JVM state.
194
// (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
195
static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
196
197
// Recover a saved exception from its map.
198
static Node* saved_ex_oop(SafePointNode* ex_map);
199
200
// Recover a saved exception from its map, and remove it from the map.
201
static Node* clear_saved_ex_oop(SafePointNode* ex_map);
202
203
#ifdef ASSERT
204
// Recover a saved exception from its map, and remove it from the map.
205
static bool has_saved_ex_oop(SafePointNode* ex_map);
206
#endif
207
208
// Push an exception in the canonical position for handlers (stack(0)).
209
void push_ex_oop(Node* ex_oop) {
210
ensure_stack(1); // ensure room to push the exception
211
set_stack(0, ex_oop);
212
set_sp(1);
213
clean_stack(1);
214
}
215
216
// Detach and return an exception state.
217
SafePointNode* pop_exception_state() {
218
SafePointNode* ex_map = _exceptions;
219
if (ex_map != NULL) {
220
_exceptions = ex_map->next_exception();
221
ex_map->set_next_exception(NULL);
222
debug_only(verify_exception_state(ex_map));
223
}
224
return ex_map;
225
}
226
227
// Add an exception, using the given JVM state, without commoning.
228
void push_exception_state(SafePointNode* ex_map) {
229
debug_only(verify_exception_state(ex_map));
230
ex_map->set_next_exception(_exceptions);
231
_exceptions = ex_map;
232
}
233
234
// Turn the current JVM state into an exception state, appending the ex_oop.
235
SafePointNode* make_exception_state(Node* ex_oop);
236
237
// Add an exception, using the given JVM state.
238
// Combine all exceptions with a common exception type into a single state.
239
// (This is done via combine_exception_states.)
240
void add_exception_state(SafePointNode* ex_map);
241
242
// Combine all exceptions of any sort whatever into a single master state.
243
SafePointNode* combine_and_pop_all_exception_states() {
244
if (_exceptions == NULL) return NULL;
245
SafePointNode* phi_map = pop_exception_state();
246
SafePointNode* ex_map;
247
while ((ex_map = pop_exception_state()) != NULL) {
248
combine_exception_states(ex_map, phi_map);
249
}
250
return phi_map;
251
}
252
253
// Combine the two exception states, building phis as necessary.
254
// The second argument is updated to include contributions from the first.
255
void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
256
257
// Reset the map to the given state. If there are any half-finished phis
258
// in it (created by combine_exception_states), transform them now.
259
// Returns the exception oop. (Caller must call push_ex_oop if required.)
260
Node* use_exception_state(SafePointNode* ex_map);
261
262
// Collect exceptions from a given JVM state into my exception list.
263
void add_exception_states_from(JVMState* jvms);
264
265
// Collect all raised exceptions into the current JVM state.
266
// Clear the current exception list and map, returns the combined states.
267
JVMState* transfer_exceptions_into_jvms();
268
269
// Helper to throw a built-in exception.
270
// Range checks take the offending index.
271
// Cast and array store checks take the offending class.
272
// Others do not take the optional argument.
273
// The JVMS must allow the bytecode to be re-executed
274
// via an uncommon trap.
275
void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
276
277
// Helper to check the JavaThread::_should_post_on_exceptions flag
278
// and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
279
void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
280
bool must_throw) ;
281
282
// Helper Functions for adding debug information
283
void kill_dead_locals();
284
#ifdef ASSERT
285
bool dead_locals_are_killed();
286
#endif
287
// The call may deoptimize. Supply required JVM state as debug info.
288
// If must_throw is true, the call is guaranteed not to return normally.
289
void add_safepoint_edges(SafePointNode* call,
290
bool must_throw = false);
291
292
// How many stack inputs does the current BC consume?
293
// And, how does the stack change after the bytecode?
294
// Returns false if unknown.
295
bool compute_stack_effects(int& inputs, int& depth);
296
297
// Add a fixed offset to a pointer
298
Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
299
return basic_plus_adr(base, ptr, MakeConX(offset));
300
}
301
Node* basic_plus_adr(Node* base, intptr_t offset) {
302
return basic_plus_adr(base, base, MakeConX(offset));
303
}
304
// Add a variable offset to a pointer
305
Node* basic_plus_adr(Node* base, Node* offset) {
306
return basic_plus_adr(base, base, offset);
307
}
308
Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
309
310
311
// Some convenient shortcuts for common nodes
312
Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C) IfTrueNode(iff)); }
313
Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C) IfFalseNode(iff)); }
314
315
Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C) AddINode(l, r)); }
316
Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C) SubINode(l, r)); }
317
Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C) MulINode(l, r)); }
318
Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C) DivINode(ctl, l, r)); }
319
320
Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C) AndINode(l, r)); }
321
Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C) OrINode(l, r)); }
322
Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C) XorINode(l, r)); }
323
324
Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C) MaxINode(l, r)); }
325
Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C) MinINode(l, r)); }
326
327
Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C) LShiftINode(l, r)); }
328
Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C) RShiftINode(l, r)); }
329
Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C) URShiftINode(l, r)); }
330
331
Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C) CmpINode(l, r)); }
332
Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C) CmpLNode(l, r)); }
333
Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C) CmpPNode(l, r)); }
334
Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C) BoolNode(cmp, relop)); }
335
336
Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C) AddPNode(b, a, o)); }
337
338
// Convert between int and long, and size_t.
339
// (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
340
Node* ConvI2L(Node* offset);
341
Node* ConvI2UL(Node* offset);
342
Node* ConvL2I(Node* offset);
343
// Find out the klass of an object.
344
Node* load_object_klass(Node* object);
345
// Find out the length of an array.
346
Node* load_array_length(Node* array);
347
348
349
// Helper function to do a NULL pointer check or ZERO check based on type.
350
// Throw an exception if a given value is null.
351
// Return the value cast to not-null.
352
// Be clever about equivalent dominating null checks.
353
Node* null_check_common(Node* value, BasicType type,
354
bool assert_null = false, Node* *null_control = NULL);
355
Node* null_check(Node* value, BasicType type = T_OBJECT) {
356
return null_check_common(value, type);
357
}
358
Node* null_check_receiver() {
359
assert(argument(0)->bottom_type()->isa_ptr(), "must be");
360
return null_check(argument(0));
361
}
362
Node* zero_check_int(Node* value) {
363
assert(value->bottom_type()->basic_type() == T_INT,
364
err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
365
return null_check_common(value, T_INT);
366
}
367
Node* zero_check_long(Node* value) {
368
assert(value->bottom_type()->basic_type() == T_LONG,
369
err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
370
return null_check_common(value, T_LONG);
371
}
372
// Throw an uncommon trap if a given value is __not__ null.
373
// Return the value cast to null, and be clever about dominating checks.
374
Node* null_assert(Node* value, BasicType type = T_OBJECT) {
375
return null_check_common(value, type, true);
376
}
377
378
// Null check oop. Return null-path control into (*null_control).
379
// Return a cast-not-null node which depends on the not-null control.
380
// If never_see_null, use an uncommon trap (*null_control sees a top).
381
// The cast is not valid along the null path; keep a copy of the original.
382
// If safe_for_replace, then we can replace the value with the cast
383
// in the parsing map (the cast is guaranteed to dominate the map)
384
Node* null_check_oop(Node* value, Node* *null_control,
385
bool never_see_null = false, bool safe_for_replace = false);
386
387
// Check the null_seen bit.
388
bool seems_never_null(Node* obj, ciProfileData* data);
389
390
// Check for unique class for receiver at call
391
ciKlass* profile_has_unique_klass() {
392
ciCallProfile profile = method()->call_profile_at_bci(bci());
393
if (profile.count() >= 0 && // no cast failures here
394
profile.has_receiver(0) &&
395
profile.morphism() == 1) {
396
return profile.receiver(0);
397
}
398
return NULL;
399
}
400
401
// record type from profiling with the type system
402
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
403
Node* record_profiled_receiver_for_speculation(Node* n);
404
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
405
void record_profiled_parameters_for_speculation();
406
407
// Use the type profile to narrow an object type.
408
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
409
ciKlass* require_klass,
410
ciKlass* spec,
411
bool safe_for_replace);
412
413
// Cast obj to type and emit guard unless we had too many traps here already
414
Node* maybe_cast_profiled_obj(Node* obj,
415
ciKlass* type,
416
bool not_null = false);
417
418
// Cast obj to not-null on this path
419
Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
420
// Replace all occurrences of one node by another.
421
void replace_in_map(Node* old, Node* neww);
422
423
void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
424
Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
425
Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
426
427
void push_pair(Node* ldval) {
428
push(ldval);
429
push(top()); // the halfword is merely a placeholder
430
}
431
void push_pair_local(int i) {
432
// longs are stored in locals in "push" order
433
push( local(i+0) ); // the real value
434
assert(local(i+1) == top(), "");
435
push(top()); // halfword placeholder
436
}
437
Node* pop_pair() {
438
// the second half is pushed last & popped first; it contains exactly nothing
439
Node* halfword = pop();
440
assert(halfword == top(), "");
441
// the long bits are pushed first & popped last:
442
return pop();
443
}
444
void set_pair_local(int i, Node* lval) {
445
// longs are stored in locals as a value/half pair (like doubles)
446
set_local(i+0, lval);
447
set_local(i+1, top());
448
}
449
450
// Push the node, which may be zero, one, or two words.
451
void push_node(BasicType n_type, Node* n) {
452
int n_size = type2size[n_type];
453
if (n_size == 1) push( n ); // T_INT, ...
454
else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
455
else { assert(n_size == 0, "must be T_VOID"); }
456
}
457
458
Node* pop_node(BasicType n_type) {
459
int n_size = type2size[n_type];
460
if (n_size == 1) return pop();
461
else if (n_size == 2) return pop_pair();
462
else return NULL;
463
}
464
465
Node* control() const { return map_not_null()->control(); }
466
Node* i_o() const { return map_not_null()->i_o(); }
467
Node* returnadr() const { return map_not_null()->returnadr(); }
468
Node* frameptr() const { return map_not_null()->frameptr(); }
469
Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
470
Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
471
Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
472
Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
473
Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
474
475
void set_control (Node* c) { map_not_null()->set_control(c); }
476
void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
477
void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
478
void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
479
void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
480
void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
481
482
// Access unaliased memory
483
Node* memory(uint alias_idx);
484
Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
485
Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
486
487
// Access immutable memory
488
Node* immutable_memory() { return C->immutable_memory(); }
489
490
// Set unaliased memory
491
void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
492
void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
493
void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
494
495
// Get the entire memory state (probably a MergeMemNode), and reset it
496
// (The resetting prevents somebody from using the dangling Node pointer.)
497
Node* reset_memory();
498
499
// Get the entire memory state, asserted to be a MergeMemNode.
500
MergeMemNode* merged_memory() {
501
Node* mem = map_not_null()->memory();
502
assert(mem->is_MergeMem(), "parse memory is always pre-split");
503
return mem->as_MergeMem();
504
}
505
506
// Set the entire memory state; produce a new MergeMemNode.
507
void set_all_memory(Node* newmem);
508
509
// Create a memory projection from the call, then set_all_memory.
510
void set_all_memory_call(Node* call, bool separate_io_proj = false);
511
512
// Create a LoadNode, reading from the parser's memory state.
513
// (Note: require_atomic_access is useful only with T_LONG.)
514
//
515
// We choose the unordered semantics by default because we have
516
// adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
517
// of volatile fields.
518
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
519
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
520
bool require_atomic_access = false, bool unaligned = false,
521
bool mismatched = false) {
522
// This version computes alias_index from bottom_type
523
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
524
mo, control_dependency, require_atomic_access,
525
unaligned, mismatched);
526
}
527
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
528
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
529
bool require_atomic_access = false, bool unaligned = false,
530
bool mismatched = false) {
531
// This version computes alias_index from an address type
532
assert(adr_type != NULL, "use other make_load factory");
533
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
534
mo, control_dependency, require_atomic_access,
535
unaligned, mismatched);
536
}
537
// This is the base version which is given an alias index.
538
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
539
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
540
bool require_atomic_access = false, bool unaligned = false,
541
bool mismatched = false);
542
543
// Create & transform a StoreNode and store the effect into the
544
// parser's memory state.
545
//
546
// We must ensure that stores of object references will be visible
547
// only after the object's initialization. So the clients of this
548
// procedure must indicate that the store requires `release'
549
// semantics, if the stored value is an object reference that might
550
// point to a new object and may become externally visible.
551
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
552
const TypePtr* adr_type,
553
MemNode::MemOrd mo,
554
bool require_atomic_access = false,
555
bool unaligned = false,
556
bool mismatched = false) {
557
// This version computes alias_index from an address type
558
assert(adr_type != NULL, "use other store_to_memory factory");
559
return store_to_memory(ctl, adr, val, bt,
560
C->get_alias_index(adr_type),
561
mo, require_atomic_access,
562
unaligned, mismatched);
563
}
564
// This is the base version which is given alias index
565
// Return the new StoreXNode
566
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
567
int adr_idx,
568
MemNode::MemOrd,
569
bool require_atomic_access = false,
570
bool unaligned = false,
571
bool mismatched = false);
572
573
574
// All in one pre-barrier, store, post_barrier
575
// Insert a write-barrier'd store. This is to let generational GC
576
// work; we have to flag all oop-stores before the next GC point.
577
//
578
// It comes in 3 flavors of store to an object, array, or unknown.
579
// We use precise card marks for arrays to avoid scanning the entire
580
// array. We use imprecise for object. We use precise for unknown
581
// since we don't know if we have an array or and object or even
582
// where the object starts.
583
//
584
// If val==NULL, it is taken to be a completely unknown value. QQQ
585
586
Node* store_oop(Node* ctl,
587
Node* obj, // containing obj
588
Node* adr, // actual adress to store val at
589
const TypePtr* adr_type,
590
Node* val,
591
const TypeOopPtr* val_type,
592
BasicType bt,
593
bool use_precise,
594
MemNode::MemOrd mo,
595
bool mismatched = false);
596
597
Node* store_oop_to_object(Node* ctl,
598
Node* obj, // containing obj
599
Node* adr, // actual adress to store val at
600
const TypePtr* adr_type,
601
Node* val,
602
const TypeOopPtr* val_type,
603
BasicType bt,
604
MemNode::MemOrd mo) {
605
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
606
}
607
608
Node* store_oop_to_array(Node* ctl,
609
Node* obj, // containing obj
610
Node* adr, // actual adress to store val at
611
const TypePtr* adr_type,
612
Node* val,
613
const TypeOopPtr* val_type,
614
BasicType bt,
615
MemNode::MemOrd mo) {
616
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
617
}
618
619
// Could be an array or object we don't know at compile time (unsafe ref.)
620
Node* store_oop_to_unknown(Node* ctl,
621
Node* obj, // containing obj
622
Node* adr, // actual adress to store val at
623
const TypePtr* adr_type,
624
Node* val,
625
BasicType bt,
626
MemNode::MemOrd mo,
627
bool mismatched = false);
628
629
// For the few case where the barriers need special help
630
void pre_barrier(bool do_load, Node* ctl,
631
Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
632
Node* pre_val,
633
BasicType bt);
634
635
void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
636
Node* val, BasicType bt, bool use_precise);
637
638
// Return addressing for an array element.
639
Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
640
// Optional constraint on the array size:
641
const TypeInt* sizetype = NULL,
642
// Optional control dependency (for example, on range check)
643
Node* ctrl = NULL);
644
645
// Return a load of array element at idx.
646
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
647
648
//---------------- Dtrace support --------------------
649
void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
650
void make_dtrace_method_entry(ciMethod* method) {
651
make_dtrace_method_entry_exit(method, true);
652
}
653
void make_dtrace_method_exit(ciMethod* method) {
654
make_dtrace_method_entry_exit(method, false);
655
}
656
657
//--------------- stub generation -------------------
658
public:
659
void gen_stub(address C_function,
660
const char *name,
661
int is_fancy_jump,
662
bool pass_tls,
663
bool return_pc);
664
665
//---------- help for generating calls --------------
666
667
// Do a null check on the receiver as it would happen before the call to
668
// callee (with all arguments still on the stack).
669
Node* null_check_receiver_before_call(ciMethod* callee) {
670
assert(!callee->is_static(), "must be a virtual method");
671
// Callsite signature can be different from actual method being called (i.e _linkTo* sites).
672
// Use callsite signature always.
673
ciMethod* declared_method = method()->get_method_at_bci(bci());
674
const int nargs = declared_method->arg_size();
675
inc_sp(nargs);
676
Node* n = null_check_receiver();
677
dec_sp(nargs);
678
return n;
679
}
680
681
// Fill in argument edges for the call from argument(0), argument(1), ...
682
// (The next step is to call set_edges_for_java_call.)
683
void set_arguments_for_java_call(CallJavaNode* call);
684
685
// Fill in non-argument edges for the call.
686
// Transform the call, and update the basics: control, i_o, memory.
687
// (The next step is usually to call set_results_for_java_call.)
688
void set_edges_for_java_call(CallJavaNode* call,
689
bool must_throw = false, bool separate_io_proj = false);
690
691
// Finish up a java call that was started by set_edges_for_java_call.
692
// Call add_exception on any throw arising from the call.
693
// Return the call result (transformed).
694
Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
695
696
// Similar to set_edges_for_java_call, but simplified for runtime calls.
697
void set_predefined_output_for_runtime_call(Node* call) {
698
set_predefined_output_for_runtime_call(call, NULL, NULL);
699
}
700
void set_predefined_output_for_runtime_call(Node* call,
701
Node* keep_mem,
702
const TypePtr* hook_mem);
703
Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
704
705
// Replace the call with the current state of the kit. Requires
706
// that the call was generated with separate io_projs so that
707
// exceptional control flow can be handled properly.
708
void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
709
710
// helper functions for statistics
711
void increment_counter(address counter_addr); // increment a debug counter
712
void increment_counter(Node* counter_addr); // increment a debug counter
713
714
// Bail out to the interpreter right now
715
// The optional klass is the one causing the trap.
716
// The optional reason is debug information written to the compile log.
717
// Optional must_throw is the same as with add_safepoint_edges.
718
void uncommon_trap(int trap_request,
719
ciKlass* klass = NULL, const char* reason_string = NULL,
720
bool must_throw = false, bool keep_exact_action = false);
721
722
// Shorthand, to avoid saying "Deoptimization::" so many times.
723
void uncommon_trap(Deoptimization::DeoptReason reason,
724
Deoptimization::DeoptAction action,
725
ciKlass* klass = NULL, const char* reason_string = NULL,
726
bool must_throw = false, bool keep_exact_action = false) {
727
uncommon_trap(Deoptimization::make_trap_request(reason, action),
728
klass, reason_string, must_throw, keep_exact_action);
729
}
730
731
// Bail out to the interpreter and keep exact action (avoid switching to Action_none).
732
void uncommon_trap_exact(Deoptimization::DeoptReason reason,
733
Deoptimization::DeoptAction action,
734
ciKlass* klass = NULL, const char* reason_string = NULL,
735
bool must_throw = false) {
736
uncommon_trap(Deoptimization::make_trap_request(reason, action),
737
klass, reason_string, must_throw, /*keep_exact_action=*/true);
738
}
739
740
// SP when bytecode needs to be reexecuted.
741
virtual int reexecute_sp() { return sp(); }
742
743
// Report if there were too many traps at the current method and bci.
744
// Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
745
// If there is no MDO at all, report no trap unless told to assume it.
746
bool too_many_traps(Deoptimization::DeoptReason reason) {
747
return C->too_many_traps(method(), bci(), reason);
748
}
749
750
// Report if there were too many recompiles at the current method and bci.
751
bool too_many_recompiles(Deoptimization::DeoptReason reason) {
752
return C->too_many_recompiles(method(), bci(), reason);
753
}
754
755
// Returns the object (if any) which was created the moment before.
756
Node* just_allocated_object(Node* current_control);
757
758
static bool use_ReduceInitialCardMarks() {
759
return (ReduceInitialCardMarks
760
&& Universe::heap()->can_elide_tlab_store_barriers());
761
}
762
763
// Sync Ideal and Graph kits.
764
void sync_kit(IdealKit& ideal);
765
void final_sync(IdealKit& ideal);
766
767
// vanilla/CMS post barrier
768
void write_barrier_post(Node *store, Node* obj,
769
Node* adr, uint adr_idx, Node* val, bool use_precise);
770
771
// Allow reordering of pre-barrier with oop store and/or post-barrier.
772
// Used for load_store operations which loads old value.
773
bool can_move_pre_barrier() const;
774
775
// G1 pre/post barriers
776
void g1_write_barrier_pre(bool do_load,
777
Node* obj,
778
Node* adr,
779
uint alias_idx,
780
Node* val,
781
const TypeOopPtr* val_type,
782
Node* pre_val,
783
BasicType bt);
784
785
void g1_write_barrier_post(Node* store,
786
Node* obj,
787
Node* adr,
788
uint alias_idx,
789
Node* val,
790
BasicType bt,
791
bool use_precise);
792
// Helper function for g1
793
private:
794
void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
795
Node* index, Node* index_adr,
796
Node* buffer, const TypeFunc* tf);
797
798
public:
799
// Helper function to round double arguments before a call
800
void round_double_arguments(ciMethod* dest_method);
801
void round_double_result(ciMethod* dest_method);
802
803
// rounding for strict float precision conformance
804
Node* precision_rounding(Node* n);
805
806
// rounding for strict double precision conformance
807
Node* dprecision_rounding(Node* n);
808
809
// rounding for non-strict double stores
810
Node* dstore_rounding(Node* n);
811
812
// Helper functions for fast/slow path codes
813
Node* opt_iff(Node* region, Node* iff);
814
Node* make_runtime_call(int flags,
815
const TypeFunc* call_type, address call_addr,
816
const char* call_name,
817
const TypePtr* adr_type, // NULL if no memory effects
818
Node* parm0 = NULL, Node* parm1 = NULL,
819
Node* parm2 = NULL, Node* parm3 = NULL,
820
Node* parm4 = NULL, Node* parm5 = NULL,
821
Node* parm6 = NULL, Node* parm7 = NULL);
822
enum { // flag values for make_runtime_call
823
RC_NO_FP = 1, // CallLeafNoFPNode
824
RC_NO_IO = 2, // do not hook IO edges
825
RC_NO_LEAF = 4, // CallStaticJavaNode
826
RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
827
RC_NARROW_MEM = 16, // input memory is same as output
828
RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
829
RC_LEAF = 0 // null value: no flags set
830
};
831
832
// merge in all memory slices from new_mem, along the given path
833
void merge_memory(Node* new_mem, Node* region, int new_path);
834
void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
835
836
// Helper functions to build synchronizations
837
int next_monitor();
838
Node* insert_mem_bar(int opcode, Node* precedent = NULL);
839
Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
840
// Optional 'precedent' is appended as an extra edge, to force ordering.
841
FastLockNode* shared_lock(Node* obj);
842
void shared_unlock(Node* box, Node* obj);
843
844
// helper functions for the fast path/slow path idioms
845
Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
846
847
// Generate an instance-of idiom. Used by both the instance-of bytecode
848
// and the reflective instance-of call.
849
Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
850
851
// Generate a check-cast idiom. Used by both the check-cast bytecode
852
// and the array-store bytecode
853
Node* gen_checkcast( Node *subobj, Node* superkls,
854
Node* *failure_control = NULL );
855
856
// Generate a subtyping check. Takes as input the subtype and supertype.
857
// Returns 2 values: sets the default control() to the true path and
858
// returns the false path. Only reads from constant memory taken from the
859
// default memory; does not write anything. It also doesn't take in an
860
// Object; if you wish to check an Object you need to load the Object's
861
// class prior to coming here.
862
Node* gen_subtype_check(Node* subklass, Node* superklass);
863
864
// Static parse-time type checking logic for gen_subtype_check:
865
enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
866
int static_subtype_check(ciKlass* superk, ciKlass* subk);
867
868
// Exact type check used for predicted calls and casts.
869
// Rewrites (*casted_receiver) to be casted to the stronger type.
870
// (Caller is responsible for doing replace_in_map.)
871
Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
872
Node* *casted_receiver);
873
874
// implementation of object creation
875
Node* set_output_for_allocation(AllocateNode* alloc,
876
const TypeOopPtr* oop_type,
877
bool deoptimize_on_exception=false);
878
Node* get_layout_helper(Node* klass_node, jint& constant_value);
879
Node* new_instance(Node* klass_node,
880
Node* slow_test = NULL,
881
Node* *return_size_val = NULL,
882
bool deoptimize_on_exception = false);
883
Node* new_array(Node* klass_node, Node* count_val, int nargs,
884
Node* *return_size_val = NULL,
885
bool deoptimize_on_exception = false);
886
887
// java.lang.String helpers
888
Node* load_String_offset(Node* ctrl, Node* str);
889
Node* load_String_length(Node* ctrl, Node* str);
890
Node* load_String_value(Node* ctrl, Node* str);
891
void store_String_offset(Node* ctrl, Node* str, Node* value);
892
void store_String_length(Node* ctrl, Node* str, Node* value);
893
void store_String_value(Node* ctrl, Node* str, Node* value);
894
895
// Handy for making control flow
896
IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
897
IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
898
_gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
899
// Place 'if' on worklist if it will be in graph
900
if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
901
return iff;
902
}
903
904
IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
905
IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
906
_gvn.transform(iff); // Value may be known at parse-time
907
// Place 'if' on worklist if it will be in graph
908
if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
909
return iff;
910
}
911
912
// Insert a loop predicate into the graph
913
void add_predicate(int nargs = 0);
914
void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
915
916
// Produce new array node of stable type
917
Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
918
};
919
920
// Helper class to support building of control flow branches. Upon
921
// creation the map and sp at bci are cloned and restored upon de-
922
// struction. Typical use:
923
//
924
// { PreserveJVMState pjvms(this);
925
// // code of new branch
926
// }
927
// // here the JVM state at bci is established
928
929
class PreserveJVMState: public StackObj {
930
protected:
931
GraphKit* _kit;
932
#ifdef ASSERT
933
int _block; // PO of current block, if a Parse
934
int _bci;
935
#endif
936
SafePointNode* _map;
937
uint _sp;
938
939
public:
940
PreserveJVMState(GraphKit* kit, bool clone_map = true);
941
~PreserveJVMState();
942
};
943
944
// Helper class to build cutouts of the form if (p) ; else {x...}.
945
// The code {x...} must not fall through.
946
// The kit's main flow of control is set to the "then" continuation of if(p).
947
class BuildCutout: public PreserveJVMState {
948
public:
949
BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
950
~BuildCutout();
951
};
952
953
// Helper class to preserve the original _reexecute bit and _sp and restore
954
// them back
955
class PreserveReexecuteState: public StackObj {
956
protected:
957
GraphKit* _kit;
958
uint _sp;
959
JVMState::ReexecuteState _reexecute;
960
961
public:
962
PreserveReexecuteState(GraphKit* kit);
963
~PreserveReexecuteState();
964
};
965
966
#endif // SHARE_VM_OPTO_GRAPHKIT_HPP
967
968