Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/code/codeBlob.cpp
40930 views
1
/*
2
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "jvm.h"
27
#include "code/codeBlob.hpp"
28
#include "code/codeCache.hpp"
29
#include "code/icBuffer.hpp"
30
#include "code/relocInfo.hpp"
31
#include "code/vtableStubs.hpp"
32
#include "compiler/disassembler.hpp"
33
#include "compiler/oopMap.hpp"
34
#include "interpreter/bytecode.hpp"
35
#include "interpreter/interpreter.hpp"
36
#include "memory/allocation.inline.hpp"
37
#include "memory/heap.hpp"
38
#include "memory/resourceArea.hpp"
39
#include "oops/oop.inline.hpp"
40
#include "prims/forte.hpp"
41
#include "prims/jvmtiExport.hpp"
42
#include "runtime/handles.inline.hpp"
43
#include "runtime/interfaceSupport.inline.hpp"
44
#include "runtime/mutexLocker.hpp"
45
#include "runtime/safepoint.hpp"
46
#include "runtime/sharedRuntime.hpp"
47
#include "runtime/stubCodeGenerator.hpp"
48
#include "runtime/stubRoutines.hpp"
49
#include "runtime/vframe.hpp"
50
#include "services/memoryService.hpp"
51
#include "utilities/align.hpp"
52
#ifdef COMPILER1
53
#include "c1/c1_Runtime1.hpp"
54
#endif
55
56
const char* CodeBlob::compiler_name() const {
57
return compilertype2name(_type);
58
}
59
60
unsigned int CodeBlob::align_code_offset(int offset) {
61
// align the size to CodeEntryAlignment
62
int header_size = (int)CodeHeap::header_size();
63
return align_up(offset + header_size, CodeEntryAlignment) - header_size;
64
}
65
66
67
// This must be consistent with the CodeBlob constructor's layout actions.
68
unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
69
unsigned int size = header_size;
70
size += align_up(cb->total_relocation_size(), oopSize);
71
// align the size to CodeEntryAlignment
72
size = align_code_offset(size);
73
size += align_up(cb->total_content_size(), oopSize);
74
size += align_up(cb->total_oop_size(), oopSize);
75
size += align_up(cb->total_metadata_size(), oopSize);
76
return size;
77
}
78
79
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
80
_type(type),
81
_size(layout.size()),
82
_header_size(layout.header_size()),
83
_frame_complete_offset(frame_complete_offset),
84
_data_offset(layout.data_offset()),
85
_frame_size(frame_size),
86
_code_begin(layout.code_begin()),
87
_code_end(layout.code_end()),
88
_content_begin(layout.content_begin()),
89
_data_end(layout.data_end()),
90
_relocation_begin(layout.relocation_begin()),
91
_relocation_end(layout.relocation_end()),
92
_oop_maps(oop_maps),
93
_caller_must_gc_arguments(caller_must_gc_arguments),
94
_name(name)
95
NOT_PRODUCT(COMMA _strings(CodeStrings()))
96
{
97
assert(is_aligned(layout.size(), oopSize), "unaligned size");
98
assert(is_aligned(layout.header_size(), oopSize), "unaligned size");
99
assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size");
100
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
101
#ifdef COMPILER1
102
// probably wrong for tiered
103
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
104
#endif // COMPILER1
105
S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
106
}
107
108
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
109
_type(type),
110
_size(layout.size()),
111
_header_size(layout.header_size()),
112
_frame_complete_offset(frame_complete_offset),
113
_data_offset(layout.data_offset()),
114
_frame_size(frame_size),
115
_code_begin(layout.code_begin()),
116
_code_end(layout.code_end()),
117
_content_begin(layout.content_begin()),
118
_data_end(layout.data_end()),
119
_relocation_begin(layout.relocation_begin()),
120
_relocation_end(layout.relocation_end()),
121
_caller_must_gc_arguments(caller_must_gc_arguments),
122
_name(name)
123
NOT_PRODUCT(COMMA _strings(CodeStrings()))
124
{
125
assert(is_aligned(_size, oopSize), "unaligned size");
126
assert(is_aligned(_header_size, oopSize), "unaligned size");
127
assert(_data_offset <= _size, "codeBlob is too small");
128
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
129
130
set_oop_maps(oop_maps);
131
#ifdef COMPILER1
132
// probably wrong for tiered
133
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
134
#endif // COMPILER1
135
S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
136
}
137
138
139
// Creates a simple CodeBlob. Sets up the size of the different regions.
140
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
141
: CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
142
{
143
assert(is_aligned(locs_size, oopSize), "unaligned size");
144
}
145
146
147
// Creates a RuntimeBlob from a CodeBuffer
148
// and copy code and relocation info.
149
RuntimeBlob::RuntimeBlob(
150
const char* name,
151
CodeBuffer* cb,
152
int header_size,
153
int size,
154
int frame_complete,
155
int frame_size,
156
OopMapSet* oop_maps,
157
bool caller_must_gc_arguments
158
) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
159
cb->copy_code_and_locs_to(this);
160
}
161
162
void CodeBlob::flush() {
163
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
164
_oop_maps = NULL;
165
NOT_PRODUCT(_strings.free();)
166
}
167
168
void CodeBlob::set_oop_maps(OopMapSet* p) {
169
// Danger Will Robinson! This method allocates a big
170
// chunk of memory, its your job to free it.
171
if (p != NULL) {
172
_oop_maps = ImmutableOopMapSet::build_from(p);
173
} else {
174
_oop_maps = NULL;
175
}
176
}
177
178
179
void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
180
// Do not hold the CodeCache lock during name formatting.
181
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
182
183
if (stub != NULL) {
184
char stub_id[256];
185
assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
186
jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
187
if (PrintStubCode) {
188
ttyLocker ttyl;
189
tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
190
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
191
Disassembler::decode(stub->code_begin(), stub->code_end(), tty);
192
if ((stub->oop_maps() != NULL) && AbstractDisassembler::show_structs()) {
193
tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
194
stub->oop_maps()->print();
195
}
196
tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
197
tty->cr();
198
}
199
Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
200
201
if (JvmtiExport::should_post_dynamic_code_generated()) {
202
const char* stub_name = name2;
203
if (name2[0] == '\0') stub_name = name1;
204
JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
205
}
206
}
207
208
// Track memory usage statistic after releasing CodeCache_lock
209
MemoryService::track_code_cache_memory_usage();
210
}
211
212
const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) {
213
assert(_oop_maps != NULL, "nope");
214
return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
215
}
216
217
void CodeBlob::print_code() {
218
ResourceMark m;
219
Disassembler::decode(this, tty);
220
}
221
222
//----------------------------------------------------------------------------------------------------
223
// Implementation of BufferBlob
224
225
226
BufferBlob::BufferBlob(const char* name, int size)
227
: RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
228
{}
229
230
BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
231
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
232
233
BufferBlob* blob = NULL;
234
unsigned int size = sizeof(BufferBlob);
235
// align the size to CodeEntryAlignment
236
size = CodeBlob::align_code_offset(size);
237
size += align_up(buffer_size, oopSize);
238
assert(name != NULL, "must provide a name");
239
{
240
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
241
blob = new (size) BufferBlob(name, size);
242
}
243
// Track memory usage statistic after releasing CodeCache_lock
244
MemoryService::track_code_cache_memory_usage();
245
246
return blob;
247
}
248
249
250
BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
251
: RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
252
{}
253
254
BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
255
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
256
257
BufferBlob* blob = NULL;
258
unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
259
assert(name != NULL, "must provide a name");
260
{
261
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
262
blob = new (size) BufferBlob(name, size, cb);
263
}
264
// Track memory usage statistic after releasing CodeCache_lock
265
MemoryService::track_code_cache_memory_usage();
266
267
return blob;
268
}
269
270
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
271
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
272
}
273
274
void BufferBlob::free(BufferBlob *blob) {
275
assert(blob != NULL, "caller must check for NULL");
276
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
277
blob->flush();
278
{
279
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
280
CodeCache::free((RuntimeBlob*)blob);
281
}
282
// Track memory usage statistic after releasing CodeCache_lock
283
MemoryService::track_code_cache_memory_usage();
284
}
285
286
287
//----------------------------------------------------------------------------------------------------
288
// Implementation of AdapterBlob
289
290
AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
291
BufferBlob("I2C/C2I adapters", size, cb) {
292
CodeCache::commit(this);
293
}
294
295
AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
296
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
297
298
AdapterBlob* blob = NULL;
299
unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
300
{
301
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
302
blob = new (size) AdapterBlob(size, cb);
303
}
304
// Track memory usage statistic after releasing CodeCache_lock
305
MemoryService::track_code_cache_memory_usage();
306
307
return blob;
308
}
309
310
void* VtableBlob::operator new(size_t s, unsigned size) throw() {
311
// Handling of allocation failure stops compilation and prints a bunch of
312
// stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
313
// can be locked, and then re-locking the CodeCache_lock. That is not safe in
314
// this context as we hold the CompiledICLocker. So we just don't handle code
315
// cache exhaustion here; we leave that for a later allocation that does not
316
// hold the CompiledICLocker.
317
return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
318
}
319
320
VtableBlob::VtableBlob(const char* name, int size) :
321
BufferBlob(name, size) {
322
}
323
324
VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
325
assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
326
327
VtableBlob* blob = NULL;
328
unsigned int size = sizeof(VtableBlob);
329
// align the size to CodeEntryAlignment
330
size = align_code_offset(size);
331
size += align_up(buffer_size, oopSize);
332
assert(name != NULL, "must provide a name");
333
{
334
if (!CodeCache_lock->try_lock()) {
335
// If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
336
// IC transition to megamorphic, for which this stub will be needed. It is better to
337
// bail out the transition, and wait for a more opportune moment. Not only is it not
338
// worth waiting for the lock blockingly for the megamorphic transition, it might
339
// also result in a deadlock to blockingly wait, when concurrent class unloading is
340
// performed. At this point in time, the CompiledICLocker is taken, so we are not
341
// allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
342
// consistently taken in the opposite order. Bailing out results in an IC transition to
343
// the clean state instead, which will cause subsequent calls to retry the transitioning
344
// eventually.
345
return NULL;
346
}
347
blob = new (size) VtableBlob(name, size);
348
CodeCache_lock->unlock();
349
}
350
// Track memory usage statistic after releasing CodeCache_lock
351
MemoryService::track_code_cache_memory_usage();
352
353
return blob;
354
}
355
356
//----------------------------------------------------------------------------------------------------
357
// Implementation of MethodHandlesAdapterBlob
358
359
MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
360
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
361
362
MethodHandlesAdapterBlob* blob = NULL;
363
unsigned int size = sizeof(MethodHandlesAdapterBlob);
364
// align the size to CodeEntryAlignment
365
size = CodeBlob::align_code_offset(size);
366
size += align_up(buffer_size, oopSize);
367
{
368
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
369
blob = new (size) MethodHandlesAdapterBlob(size);
370
if (blob == NULL) {
371
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
372
}
373
}
374
// Track memory usage statistic after releasing CodeCache_lock
375
MemoryService::track_code_cache_memory_usage();
376
377
return blob;
378
}
379
380
//----------------------------------------------------------------------------------------------------
381
// Implementation of RuntimeStub
382
383
RuntimeStub::RuntimeStub(
384
const char* name,
385
CodeBuffer* cb,
386
int size,
387
int frame_complete,
388
int frame_size,
389
OopMapSet* oop_maps,
390
bool caller_must_gc_arguments
391
)
392
: RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
393
{
394
}
395
396
RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
397
CodeBuffer* cb,
398
int frame_complete,
399
int frame_size,
400
OopMapSet* oop_maps,
401
bool caller_must_gc_arguments)
402
{
403
RuntimeStub* stub = NULL;
404
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
405
{
406
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
407
unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
408
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
409
}
410
411
trace_new_stub(stub, "RuntimeStub - ", stub_name);
412
413
return stub;
414
}
415
416
417
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
418
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
419
if (!p) fatal("Initial size of CodeCache is too small");
420
return p;
421
}
422
423
// operator new shared by all singletons:
424
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
425
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
426
if (!p) fatal("Initial size of CodeCache is too small");
427
return p;
428
}
429
430
431
//----------------------------------------------------------------------------------------------------
432
// Implementation of DeoptimizationBlob
433
434
DeoptimizationBlob::DeoptimizationBlob(
435
CodeBuffer* cb,
436
int size,
437
OopMapSet* oop_maps,
438
int unpack_offset,
439
int unpack_with_exception_offset,
440
int unpack_with_reexecution_offset,
441
int frame_size
442
)
443
: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
444
{
445
_unpack_offset = unpack_offset;
446
_unpack_with_exception = unpack_with_exception_offset;
447
_unpack_with_reexecution = unpack_with_reexecution_offset;
448
#ifdef COMPILER1
449
_unpack_with_exception_in_tls = -1;
450
#endif
451
}
452
453
454
DeoptimizationBlob* DeoptimizationBlob::create(
455
CodeBuffer* cb,
456
OopMapSet* oop_maps,
457
int unpack_offset,
458
int unpack_with_exception_offset,
459
int unpack_with_reexecution_offset,
460
int frame_size)
461
{
462
DeoptimizationBlob* blob = NULL;
463
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
464
{
465
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
466
unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
467
blob = new (size) DeoptimizationBlob(cb,
468
size,
469
oop_maps,
470
unpack_offset,
471
unpack_with_exception_offset,
472
unpack_with_reexecution_offset,
473
frame_size);
474
}
475
476
trace_new_stub(blob, "DeoptimizationBlob");
477
478
return blob;
479
}
480
481
482
//----------------------------------------------------------------------------------------------------
483
// Implementation of UncommonTrapBlob
484
485
#ifdef COMPILER2
486
UncommonTrapBlob::UncommonTrapBlob(
487
CodeBuffer* cb,
488
int size,
489
OopMapSet* oop_maps,
490
int frame_size
491
)
492
: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
493
{}
494
495
496
UncommonTrapBlob* UncommonTrapBlob::create(
497
CodeBuffer* cb,
498
OopMapSet* oop_maps,
499
int frame_size)
500
{
501
UncommonTrapBlob* blob = NULL;
502
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
503
{
504
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
505
unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
506
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
507
}
508
509
trace_new_stub(blob, "UncommonTrapBlob");
510
511
return blob;
512
}
513
514
515
#endif // COMPILER2
516
517
518
//----------------------------------------------------------------------------------------------------
519
// Implementation of ExceptionBlob
520
521
#ifdef COMPILER2
522
ExceptionBlob::ExceptionBlob(
523
CodeBuffer* cb,
524
int size,
525
OopMapSet* oop_maps,
526
int frame_size
527
)
528
: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
529
{}
530
531
532
ExceptionBlob* ExceptionBlob::create(
533
CodeBuffer* cb,
534
OopMapSet* oop_maps,
535
int frame_size)
536
{
537
ExceptionBlob* blob = NULL;
538
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
539
{
540
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
541
unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
542
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
543
}
544
545
trace_new_stub(blob, "ExceptionBlob");
546
547
return blob;
548
}
549
550
551
#endif // COMPILER2
552
553
554
//----------------------------------------------------------------------------------------------------
555
// Implementation of SafepointBlob
556
557
SafepointBlob::SafepointBlob(
558
CodeBuffer* cb,
559
int size,
560
OopMapSet* oop_maps,
561
int frame_size
562
)
563
: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
564
{}
565
566
567
SafepointBlob* SafepointBlob::create(
568
CodeBuffer* cb,
569
OopMapSet* oop_maps,
570
int frame_size)
571
{
572
SafepointBlob* blob = NULL;
573
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
574
{
575
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
576
unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
577
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
578
}
579
580
trace_new_stub(blob, "SafepointBlob");
581
582
return blob;
583
}
584
585
586
//----------------------------------------------------------------------------------------------------
587
// Verification and printing
588
589
void CodeBlob::print_on(outputStream* st) const {
590
st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
591
st->print_cr("Framesize: %d", _frame_size);
592
}
593
594
void CodeBlob::print() const { print_on(tty); }
595
596
void CodeBlob::print_value_on(outputStream* st) const {
597
st->print_cr("[CodeBlob]");
598
}
599
600
void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
601
if (is_buffer_blob()) {
602
// the interpreter is generated into a buffer blob
603
InterpreterCodelet* i = Interpreter::codelet_containing(addr);
604
if (i != NULL) {
605
st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
606
i->print_on(st);
607
return;
608
}
609
if (Interpreter::contains(addr)) {
610
st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
611
" (not bytecode specific)", p2i(addr));
612
return;
613
}
614
//
615
if (AdapterHandlerLibrary::contains(this)) {
616
st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
617
AdapterHandlerLibrary::print_handler_on(st, this);
618
}
619
// the stubroutines are generated into a buffer blob
620
StubCodeDesc* d = StubCodeDesc::desc_for(addr);
621
if (d != NULL) {
622
st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
623
d->print_on(st);
624
st->cr();
625
return;
626
}
627
if (StubRoutines::contains(addr)) {
628
st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
629
return;
630
}
631
// the InlineCacheBuffer is using stubs generated into a buffer blob
632
if (InlineCacheBuffer::contains(addr)) {
633
st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
634
return;
635
}
636
VtableStub* v = VtableStubs::stub_containing(addr);
637
if (v != NULL) {
638
st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
639
v->print_on(st);
640
st->cr();
641
return;
642
}
643
}
644
if (is_nmethod()) {
645
nmethod* nm = (nmethod*)this;
646
ResourceMark rm;
647
st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
648
p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
649
if (verbose) {
650
st->print(" for ");
651
nm->method()->print_value_on(st);
652
}
653
st->cr();
654
nm->print_nmethod(verbose);
655
return;
656
}
657
st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
658
print_on(st);
659
}
660
661
void RuntimeBlob::verify() {
662
ShouldNotReachHere();
663
}
664
665
void BufferBlob::verify() {
666
// unimplemented
667
}
668
669
void BufferBlob::print_on(outputStream* st) const {
670
RuntimeBlob::print_on(st);
671
print_value_on(st);
672
}
673
674
void BufferBlob::print_value_on(outputStream* st) const {
675
st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
676
}
677
678
void RuntimeStub::verify() {
679
// unimplemented
680
}
681
682
void RuntimeStub::print_on(outputStream* st) const {
683
ttyLocker ttyl;
684
RuntimeBlob::print_on(st);
685
st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
686
st->print_cr("%s", name());
687
Disassembler::decode((RuntimeBlob*)this, st);
688
}
689
690
void RuntimeStub::print_value_on(outputStream* st) const {
691
st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
692
}
693
694
void SingletonBlob::verify() {
695
// unimplemented
696
}
697
698
void SingletonBlob::print_on(outputStream* st) const {
699
ttyLocker ttyl;
700
RuntimeBlob::print_on(st);
701
st->print_cr("%s", name());
702
Disassembler::decode((RuntimeBlob*)this, st);
703
}
704
705
void SingletonBlob::print_value_on(outputStream* st) const {
706
st->print_cr("%s", name());
707
}
708
709
void DeoptimizationBlob::print_value_on(outputStream* st) const {
710
st->print_cr("Deoptimization (frame not available)");
711
}
712
713
// Implementation of OptimizedEntryBlob
714
715
OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset,
716
jobject receiver, ByteSize jfa_sp_offset) :
717
BufferBlob(name, size, cb),
718
_exception_handler_offset(exception_handler_offset),
719
_receiver(receiver),
720
_jfa_sp_offset(jfa_sp_offset) {
721
CodeCache::commit(this);
722
}
723
724
OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset,
725
jobject receiver, ByteSize jfa_sp_offset) {
726
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
727
728
OptimizedEntryBlob* blob = nullptr;
729
unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
730
{
731
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
732
blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, jfa_sp_offset);
733
}
734
// Track memory usage statistic after releasing CodeCache_lock
735
MemoryService::track_code_cache_memory_usage();
736
737
return blob;
738
}
739
740