Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/code/compiledIC.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_CODE_COMPILEDIC_HPP
26
#define SHARE_VM_CODE_COMPILEDIC_HPP
27
28
#include "interpreter/linkResolver.hpp"
29
#include "oops/compiledICHolder.hpp"
30
#ifdef TARGET_ARCH_x86
31
# include "nativeInst_x86.hpp"
32
#endif
33
#ifdef TARGET_ARCH_aarch32
34
# include "nativeInst_aarch32.hpp"
35
#endif
36
#ifdef TARGET_ARCH_aarch64
37
# include "nativeInst_aarch64.hpp"
38
#endif
39
#ifdef TARGET_ARCH_sparc
40
# include "nativeInst_sparc.hpp"
41
#endif
42
#ifdef TARGET_ARCH_zero
43
# include "nativeInst_zero.hpp"
44
#endif
45
#ifdef TARGET_ARCH_arm
46
# include "nativeInst_arm.hpp"
47
#endif
48
#ifdef TARGET_ARCH_ppc
49
# include "nativeInst_ppc.hpp"
50
#endif
51
52
//-----------------------------------------------------------------------------
53
// The CompiledIC represents a compiled inline cache.
54
//
55
// In order to make patching of the inline cache MT-safe, we only allow the following
56
// transitions (when not at a safepoint):
57
//
58
//
59
// [1] --<-- Clean -->--- [1]
60
// / (null) \
61
// / \ /-<-\
62
// / [2] \ / \
63
// Interpreted ---------> Monomorphic | [3]
64
// (CompiledICHolder*) (Klass*) |
65
// \ / \ /
66
// [4] \ / [4] \->-/
67
// \->- Megamorphic -<-/
68
// (CompiledICHolder*)
69
//
70
// The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
71
//
72
// The numbers in square brackets refer to the kind of transition:
73
// [1]: Initial fixup. Receiver it found from debug information
74
// [2]: Compilation of a method
75
// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
76
// [4]: Inline cache miss. We go directly to megamorphic call.
77
//
78
// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
79
// transition is made to a stub.
80
//
81
class CompiledIC;
82
class ICStub;
83
84
class CompiledICInfo : public StackObj {
85
private:
86
address _entry; // entry point for call
87
void* _cached_value; // Value of cached_value (either in stub or inline cache)
88
bool _is_icholder; // Is the cached value a CompiledICHolder*
89
bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
90
bool _to_interpreter; // Call it to interpreter
91
bool _release_icholder;
92
public:
93
address entry() const { return _entry; }
94
Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
95
CompiledICHolder* claim_cached_icholder() {
96
assert(_is_icholder, "");
97
assert(_cached_value != NULL, "must be non-NULL");
98
_release_icholder = false;
99
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
100
icholder->claim();
101
return icholder;
102
}
103
bool is_optimized() const { return _is_optimized; }
104
bool to_interpreter() const { return _to_interpreter; }
105
106
void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
107
_entry = entry;
108
_cached_value = (void*)klass;
109
_to_interpreter = false;
110
_is_icholder = false;
111
_is_optimized = is_optimized;
112
_release_icholder = false;
113
}
114
115
void set_interpreter_entry(address entry, Method* method) {
116
_entry = entry;
117
_cached_value = (void*)method;
118
_to_interpreter = true;
119
_is_icholder = false;
120
_is_optimized = true;
121
_release_icholder = false;
122
}
123
124
void set_icholder_entry(address entry, CompiledICHolder* icholder) {
125
_entry = entry;
126
_cached_value = (void*)icholder;
127
_to_interpreter = true;
128
_is_icholder = true;
129
_is_optimized = false;
130
_release_icholder = true;
131
}
132
133
CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
134
_to_interpreter(false), _is_optimized(false), _release_icholder(false) {
135
}
136
~CompiledICInfo() {
137
// In rare cases the info is computed but not used, so release any
138
// CompiledICHolder* that was created
139
if (_release_icholder) {
140
assert(_is_icholder, "must be");
141
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
142
icholder->claim();
143
delete icholder;
144
}
145
}
146
};
147
148
class CompiledIC: public ResourceObj {
149
friend class InlineCacheBuffer;
150
friend class ICStub;
151
152
153
private:
154
NativeCall* _ic_call; // the call instruction
155
NativeMovConstReg* _value; // patchable value cell for this IC
156
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
157
158
CompiledIC(nmethod* nm, NativeCall* ic_call);
159
CompiledIC(RelocIterator* iter);
160
161
void initialize_from_iter(RelocIterator* iter);
162
163
static bool is_icholder_entry(address entry);
164
165
// low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
166
// to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
167
// changes to a transition stub.
168
void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
169
void set_ic_destination(ICStub* stub);
170
void set_ic_destination(address entry_point) {
171
assert(_is_optimized, "use set_ic_destination_and_value instead");
172
internal_set_ic_destination(entry_point, false, NULL, false);
173
}
174
// This only for use by ICStubs where the type of the value isn't known
175
void set_ic_destination_and_value(address entry_point, void* value) {
176
internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
177
}
178
void set_ic_destination_and_value(address entry_point, Metadata* value) {
179
internal_set_ic_destination(entry_point, false, value, false);
180
}
181
void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
182
internal_set_ic_destination(entry_point, false, value, true);
183
}
184
185
// Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
186
// associated with the inline cache.
187
address stub_address() const;
188
bool is_in_transition_state() const; // Use InlineCacheBuffer
189
190
public:
191
// conversion (machine PC to CompiledIC*)
192
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
193
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
194
friend CompiledIC* CompiledIC_at(Relocation* call_site);
195
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
196
197
// This is used to release CompiledICHolder*s from nmethods that
198
// are about to be freed. The callsite might contain other stale
199
// values of other kinds so it must be careful.
200
static void cleanup_call_site(virtual_call_Relocation* call_site);
201
static bool is_icholder_call_site(virtual_call_Relocation* call_site);
202
203
// Return the cached_metadata/destination associated with this inline cache. If the cache currently points
204
// to a transition stub, it will read the values from the transition stub.
205
void* cached_value() const;
206
CompiledICHolder* cached_icholder() const {
207
assert(is_icholder_call(), "must be");
208
return (CompiledICHolder*) cached_value();
209
}
210
Metadata* cached_metadata() const {
211
assert(!is_icholder_call(), "must be");
212
return (Metadata*) cached_value();
213
}
214
215
address ic_destination() const;
216
217
bool is_optimized() const { return _is_optimized; }
218
219
// State
220
bool is_clean() const;
221
bool is_megamorphic() const;
222
bool is_call_to_compiled() const;
223
bool is_call_to_interpreted() const;
224
225
bool is_icholder_call() const;
226
227
address end_of_call() { return _ic_call->return_address(); }
228
229
// MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
230
// so you are guaranteed that no patching takes place. The same goes for verify.
231
//
232
// Note: We do not provide any direct access to the stub code, to prevent parts of the code
233
// to manipulate the inline cache in MT-unsafe ways.
234
//
235
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
236
//
237
void set_to_clean(bool in_use = true);
238
void set_to_monomorphic(CompiledICInfo& info);
239
void clear_ic_stub();
240
241
// Returns true if successful and false otherwise. The call can fail if memory
242
// allocation in the code cache fails.
243
bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
244
245
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
246
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
247
248
// Location
249
address instruction_address() const { return _ic_call->instruction_address(); }
250
251
// Misc
252
void print() PRODUCT_RETURN;
253
void print_compiled_ic() PRODUCT_RETURN;
254
void verify() PRODUCT_RETURN;
255
};
256
257
inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
258
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
259
c_ic->verify();
260
return c_ic;
261
}
262
263
inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
264
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
265
c_ic->verify();
266
return c_ic;
267
}
268
269
inline CompiledIC* CompiledIC_at(Relocation* call_site) {
270
assert(call_site->type() == relocInfo::virtual_call_type ||
271
call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
272
CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
273
c_ic->verify();
274
return c_ic;
275
}
276
277
inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
278
assert(reloc_iter->type() == relocInfo::virtual_call_type ||
279
reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
280
CompiledIC* c_ic = new CompiledIC(reloc_iter);
281
c_ic->verify();
282
return c_ic;
283
}
284
285
//-----------------------------------------------------------------------------
286
// The CompiledStaticCall represents a call to a static method in the compiled
287
//
288
// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
289
//
290
//
291
// -----<----- Clean ----->-----
292
// / \
293
// / \
294
// compilled code <------------> interpreted code
295
//
296
// Clean: Calls directly to runtime method for fixup
297
// Compiled code: Calls directly to compiled code
298
// Interpreted code: Calls to stub that set Method* reference
299
//
300
//
301
class CompiledStaticCall;
302
303
class StaticCallInfo {
304
private:
305
address _entry; // Entrypoint
306
methodHandle _callee; // Callee (used when calling interpreter)
307
bool _to_interpreter; // call to interpreted method (otherwise compiled)
308
309
friend class CompiledStaticCall;
310
public:
311
address entry() const { return _entry; }
312
methodHandle callee() const { return _callee; }
313
};
314
315
316
class CompiledStaticCall: public NativeCall {
317
friend class CompiledIC;
318
319
// Also used by CompiledIC
320
void set_to_interpreted(methodHandle callee, address entry);
321
bool is_optimized_virtual();
322
323
public:
324
friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
325
friend CompiledStaticCall* compiledStaticCall_at(address native_call);
326
friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
327
328
// Code
329
#if defined(AARCH64) && !defined(ZERO)
330
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark);
331
#else
332
static address emit_to_interp_stub(CodeBuffer &cbuf);
333
#endif
334
static int to_interp_stub_size();
335
static int reloc_to_interp_stub();
336
337
// State
338
bool is_clean() const;
339
bool is_call_to_compiled() const;
340
bool is_call_to_interpreted() const;
341
342
// Clean static call (will force resolving on next use)
343
void set_to_clean();
344
345
// Set state. The entry must be the same, as computed by compute_entry.
346
// Computation and setting is split up, since the actions are separate during
347
// a OptoRuntime::resolve_xxx.
348
void set(const StaticCallInfo& info);
349
350
// Compute entry point given a method
351
static void compute_entry(methodHandle m, StaticCallInfo& info);
352
353
// Stub support
354
address find_stub();
355
static void set_stub_to_clean(static_stub_Relocation* static_stub);
356
357
// Misc.
358
void print() PRODUCT_RETURN;
359
void verify() PRODUCT_RETURN;
360
};
361
362
363
inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
364
CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
365
st->verify();
366
return st;
367
}
368
369
inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
370
CompiledStaticCall* st = (CompiledStaticCall*)native_call;
371
st->verify();
372
return st;
373
}
374
375
inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
376
return compiledStaticCall_at(call_site->addr());
377
}
378
379
#endif // SHARE_VM_CODE_COMPILEDIC_HPP
380
381