Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/code/codeBlob.cpp
48785 views
1
/*
2
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "code/codeBlob.hpp"
27
#include "code/codeCache.hpp"
28
#include "code/relocInfo.hpp"
29
#include "compiler/disassembler.hpp"
30
#include "interpreter/bytecode.hpp"
31
#include "memory/allocation.inline.hpp"
32
#include "memory/heap.hpp"
33
#include "oops/oop.inline.hpp"
34
#include "prims/forte.hpp"
35
#include "runtime/handles.inline.hpp"
36
#include "runtime/interfaceSupport.hpp"
37
#include "runtime/mutexLocker.hpp"
38
#include "runtime/safepoint.hpp"
39
#include "runtime/sharedRuntime.hpp"
40
#include "runtime/vframe.hpp"
41
#include "services/memoryService.hpp"
42
#ifdef TARGET_ARCH_x86
43
# include "nativeInst_x86.hpp"
44
#endif
45
#ifdef TARGET_ARCH_aarch64
46
# include "nativeInst_aarch64.hpp"
47
#endif
48
#ifdef TARGET_ARCH_sparc
49
# include "nativeInst_sparc.hpp"
50
#endif
51
#ifdef TARGET_ARCH_zero
52
# include "nativeInst_zero.hpp"
53
#endif
54
#ifdef TARGET_ARCH_arm
55
# include "nativeInst_arm.hpp"
56
#endif
57
#ifdef TARGET_ARCH_ppc
58
# include "nativeInst_ppc.hpp"
59
#endif
60
#ifdef TARGET_ARCH_aarch32
61
# include "nativeInst_aarch32.hpp"
62
#endif
63
#ifdef COMPILER1
64
#include "c1/c1_Runtime1.hpp"
65
#endif
66
67
unsigned int CodeBlob::align_code_offset(int offset) {
68
// align the size to CodeEntryAlignment
69
return
70
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
71
- (int)CodeHeap::header_size();
72
}
73
74
75
// This must be consistent with the CodeBlob constructor's layout actions.
76
unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
77
unsigned int size = header_size;
78
size += round_to(cb->total_relocation_size(), oopSize);
79
// align the size to CodeEntryAlignment
80
size = align_code_offset(size);
81
size += round_to(cb->total_content_size(), oopSize);
82
size += round_to(cb->total_oop_size(), oopSize);
83
size += round_to(cb->total_metadata_size(), oopSize);
84
return size;
85
}
86
87
88
// Creates a simple CodeBlob. Sets up the size of the different regions.
89
CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
90
assert(size == round_to(size, oopSize), "unaligned size");
91
assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
92
assert(header_size == round_to(header_size, oopSize), "unaligned size");
93
assert(!UseRelocIndex, "no space allocated for reloc index yet");
94
95
// Note: If UseRelocIndex is enabled, there needs to be (at least) one
96
// extra word for the relocation information, containing the reloc
97
// index table length. Unfortunately, the reloc index table imple-
98
// mentation is not easily understandable and thus it is not clear
99
// what exactly the format is supposed to be. For now, we just turn
100
// off the use of this table (gri 7/6/2000).
101
102
_name = name;
103
_size = size;
104
_frame_complete_offset = frame_complete;
105
_header_size = header_size;
106
_relocation_size = locs_size;
107
_content_offset = align_code_offset(header_size + _relocation_size);
108
_code_offset = _content_offset;
109
_data_offset = size;
110
_frame_size = 0;
111
set_oop_maps(NULL);
112
}
113
114
115
// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
116
// and copy code and relocation info.
117
CodeBlob::CodeBlob(
118
const char* name,
119
CodeBuffer* cb,
120
int header_size,
121
int size,
122
int frame_complete,
123
int frame_size,
124
OopMapSet* oop_maps
125
) {
126
assert(size == round_to(size, oopSize), "unaligned size");
127
assert(header_size == round_to(header_size, oopSize), "unaligned size");
128
129
_name = name;
130
_size = size;
131
_frame_complete_offset = frame_complete;
132
_header_size = header_size;
133
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
134
_content_offset = align_code_offset(header_size + _relocation_size);
135
_code_offset = _content_offset + cb->total_offset_of(cb->insts());
136
_data_offset = _content_offset + round_to(cb->total_content_size(), oopSize);
137
assert(_data_offset <= size, "codeBlob is too small");
138
139
cb->copy_code_and_locs_to(this);
140
set_oop_maps(oop_maps);
141
_frame_size = frame_size;
142
#ifdef COMPILER1
143
// probably wrong for tiered
144
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
145
#endif // COMPILER1
146
}
147
148
149
void CodeBlob::set_oop_maps(OopMapSet* p) {
150
// Danger Will Robinson! This method allocates a big
151
// chunk of memory, its your job to free it.
152
if (p != NULL) {
153
// We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
154
_oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode);
155
p->copy_to((address)_oop_maps);
156
} else {
157
_oop_maps = NULL;
158
}
159
}
160
161
162
void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
163
// Do not hold the CodeCache lock during name formatting.
164
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
165
166
if (stub != NULL) {
167
char stub_id[256];
168
assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
169
jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
170
if (PrintStubCode) {
171
ttyLocker ttyl;
172
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
173
Disassembler::decode(stub->code_begin(), stub->code_end());
174
tty->cr();
175
}
176
Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
177
178
if (JvmtiExport::should_post_dynamic_code_generated()) {
179
const char* stub_name = name2;
180
if (name2[0] == '\0') stub_name = name1;
181
JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
182
}
183
}
184
185
// Track memory usage statistic after releasing CodeCache_lock
186
MemoryService::track_code_cache_memory_usage();
187
}
188
189
190
void CodeBlob::flush() {
191
if (_oop_maps) {
192
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode);
193
_oop_maps = NULL;
194
}
195
_strings.free();
196
}
197
198
199
OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
200
assert(oop_maps() != NULL, "nope");
201
return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
202
}
203
204
205
//----------------------------------------------------------------------------------------------------
206
// Implementation of BufferBlob
207
208
209
BufferBlob::BufferBlob(const char* name, int size)
210
: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
211
{}
212
213
BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
214
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
215
216
BufferBlob* blob = NULL;
217
unsigned int size = sizeof(BufferBlob);
218
// align the size to CodeEntryAlignment
219
size = align_code_offset(size);
220
size += round_to(buffer_size, oopSize);
221
assert(name != NULL, "must provide a name");
222
{
223
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
224
blob = new (size) BufferBlob(name, size);
225
}
226
// Track memory usage statistic after releasing CodeCache_lock
227
MemoryService::track_code_cache_memory_usage();
228
229
return blob;
230
}
231
232
233
BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
234
: CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
235
{}
236
237
BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
238
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
239
240
BufferBlob* blob = NULL;
241
unsigned int size = allocation_size(cb, sizeof(BufferBlob));
242
assert(name != NULL, "must provide a name");
243
{
244
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
245
blob = new (size) BufferBlob(name, size, cb);
246
}
247
// Track memory usage statistic after releasing CodeCache_lock
248
MemoryService::track_code_cache_memory_usage();
249
250
return blob;
251
}
252
253
254
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
255
void* p = CodeCache::allocate(size, is_critical);
256
return p;
257
}
258
259
260
void BufferBlob::free( BufferBlob *blob ) {
261
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
262
blob->flush();
263
{
264
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
265
CodeCache::free((CodeBlob*)blob);
266
}
267
// Track memory usage statistic after releasing CodeCache_lock
268
MemoryService::track_code_cache_memory_usage();
269
}
270
271
272
//----------------------------------------------------------------------------------------------------
273
// Implementation of AdapterBlob
274
275
AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
276
BufferBlob("I2C/C2I adapters", size, cb) {
277
CodeCache::commit(this);
278
}
279
280
AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
281
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
282
283
AdapterBlob* blob = NULL;
284
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
285
{
286
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
287
// The parameter 'true' indicates a critical memory allocation.
288
// This means that CodeCacheMinimumFreeSpace is used, if necessary
289
const bool is_critical = true;
290
blob = new (size, is_critical) AdapterBlob(size, cb);
291
}
292
// Track memory usage statistic after releasing CodeCache_lock
293
MemoryService::track_code_cache_memory_usage();
294
295
return blob;
296
}
297
298
VtableBlob::VtableBlob(const char* name, int size) :
299
BufferBlob(name, size) {
300
}
301
302
VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
303
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
304
305
VtableBlob* blob = NULL;
306
unsigned int size = sizeof(VtableBlob);
307
// align the size to CodeEntryAlignment
308
size = align_code_offset(size);
309
size += round_to(buffer_size, oopSize);
310
assert(name != NULL, "must provide a name");
311
{
312
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
313
blob = new (size) VtableBlob(name, size);
314
}
315
// Track memory usage statistic after releasing CodeCache_lock
316
MemoryService::track_code_cache_memory_usage();
317
318
return blob;
319
}
320
321
//----------------------------------------------------------------------------------------------------
322
// Implementation of MethodHandlesAdapterBlob
323
324
MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
325
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
326
327
MethodHandlesAdapterBlob* blob = NULL;
328
unsigned int size = sizeof(MethodHandlesAdapterBlob);
329
// align the size to CodeEntryAlignment
330
size = align_code_offset(size);
331
size += round_to(buffer_size, oopSize);
332
{
333
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
334
// The parameter 'true' indicates a critical memory allocation.
335
// This means that CodeCacheMinimumFreeSpace is used, if necessary
336
const bool is_critical = true;
337
blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
338
}
339
// Track memory usage statistic after releasing CodeCache_lock
340
MemoryService::track_code_cache_memory_usage();
341
342
return blob;
343
}
344
345
346
//----------------------------------------------------------------------------------------------------
347
// Implementation of RuntimeStub
348
349
RuntimeStub::RuntimeStub(
350
const char* name,
351
CodeBuffer* cb,
352
int size,
353
int frame_complete,
354
int frame_size,
355
OopMapSet* oop_maps,
356
bool caller_must_gc_arguments
357
)
358
: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
359
{
360
_caller_must_gc_arguments = caller_must_gc_arguments;
361
}
362
363
364
RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
365
CodeBuffer* cb,
366
int frame_complete,
367
int frame_size,
368
OopMapSet* oop_maps,
369
bool caller_must_gc_arguments)
370
{
371
RuntimeStub* stub = NULL;
372
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
373
{
374
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
375
unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
376
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
377
}
378
379
trace_new_stub(stub, "RuntimeStub - ", stub_name);
380
381
return stub;
382
}
383
384
385
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
386
void* p = CodeCache::allocate(size, true);
387
if (!p) fatal("Initial size of CodeCache is too small");
388
return p;
389
}
390
391
// operator new shared by all singletons:
392
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
393
void* p = CodeCache::allocate(size, true);
394
if (!p) fatal("Initial size of CodeCache is too small");
395
return p;
396
}
397
398
399
//----------------------------------------------------------------------------------------------------
400
// Implementation of DeoptimizationBlob
401
402
DeoptimizationBlob::DeoptimizationBlob(
403
CodeBuffer* cb,
404
int size,
405
OopMapSet* oop_maps,
406
int unpack_offset,
407
int unpack_with_exception_offset,
408
int unpack_with_reexecution_offset,
409
int frame_size
410
)
411
: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
412
{
413
_unpack_offset = unpack_offset;
414
_unpack_with_exception = unpack_with_exception_offset;
415
_unpack_with_reexecution = unpack_with_reexecution_offset;
416
#ifdef COMPILER1
417
_unpack_with_exception_in_tls = -1;
418
#endif
419
}
420
421
422
DeoptimizationBlob* DeoptimizationBlob::create(
423
CodeBuffer* cb,
424
OopMapSet* oop_maps,
425
int unpack_offset,
426
int unpack_with_exception_offset,
427
int unpack_with_reexecution_offset,
428
int frame_size)
429
{
430
DeoptimizationBlob* blob = NULL;
431
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
432
{
433
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
434
unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
435
blob = new (size) DeoptimizationBlob(cb,
436
size,
437
oop_maps,
438
unpack_offset,
439
unpack_with_exception_offset,
440
unpack_with_reexecution_offset,
441
frame_size);
442
}
443
444
trace_new_stub(blob, "DeoptimizationBlob");
445
446
return blob;
447
}
448
449
450
//----------------------------------------------------------------------------------------------------
451
// Implementation of UncommonTrapBlob
452
453
#ifdef COMPILER2
454
UncommonTrapBlob::UncommonTrapBlob(
455
CodeBuffer* cb,
456
int size,
457
OopMapSet* oop_maps,
458
int frame_size
459
)
460
: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
461
{}
462
463
464
UncommonTrapBlob* UncommonTrapBlob::create(
465
CodeBuffer* cb,
466
OopMapSet* oop_maps,
467
int frame_size)
468
{
469
UncommonTrapBlob* blob = NULL;
470
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
471
{
472
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
473
unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
474
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
475
}
476
477
trace_new_stub(blob, "UncommonTrapBlob");
478
479
return blob;
480
}
481
482
483
#endif // COMPILER2
484
485
486
//----------------------------------------------------------------------------------------------------
487
// Implementation of ExceptionBlob
488
489
#ifdef COMPILER2
490
ExceptionBlob::ExceptionBlob(
491
CodeBuffer* cb,
492
int size,
493
OopMapSet* oop_maps,
494
int frame_size
495
)
496
: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
497
{}
498
499
500
ExceptionBlob* ExceptionBlob::create(
501
CodeBuffer* cb,
502
OopMapSet* oop_maps,
503
int frame_size)
504
{
505
ExceptionBlob* blob = NULL;
506
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
507
{
508
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
509
unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
510
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
511
}
512
513
trace_new_stub(blob, "ExceptionBlob");
514
515
return blob;
516
}
517
518
519
#endif // COMPILER2
520
521
522
//----------------------------------------------------------------------------------------------------
523
// Implementation of SafepointBlob
524
525
SafepointBlob::SafepointBlob(
526
CodeBuffer* cb,
527
int size,
528
OopMapSet* oop_maps,
529
int frame_size
530
)
531
: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
532
{}
533
534
535
SafepointBlob* SafepointBlob::create(
536
CodeBuffer* cb,
537
OopMapSet* oop_maps,
538
int frame_size)
539
{
540
SafepointBlob* blob = NULL;
541
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
542
{
543
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
544
unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
545
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
546
}
547
548
trace_new_stub(blob, "SafepointBlob");
549
550
return blob;
551
}
552
553
554
//----------------------------------------------------------------------------------------------------
555
// Verification and printing
556
557
void CodeBlob::verify() {
558
ShouldNotReachHere();
559
}
560
561
void CodeBlob::print_on(outputStream* st) const {
562
st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
563
st->print_cr("Framesize: %d", _frame_size);
564
}
565
566
void CodeBlob::print_value_on(outputStream* st) const {
567
st->print_cr("[CodeBlob]");
568
}
569
570
void BufferBlob::verify() {
571
// unimplemented
572
}
573
574
void BufferBlob::print_on(outputStream* st) const {
575
CodeBlob::print_on(st);
576
print_value_on(st);
577
}
578
579
void BufferBlob::print_value_on(outputStream* st) const {
580
st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name());
581
}
582
583
void RuntimeStub::verify() {
584
// unimplemented
585
}
586
587
void RuntimeStub::print_on(outputStream* st) const {
588
ttyLocker ttyl;
589
CodeBlob::print_on(st);
590
st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
591
st->print_cr("%s", name());
592
Disassembler::decode((CodeBlob*)this, st);
593
}
594
595
void RuntimeStub::print_value_on(outputStream* st) const {
596
st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
597
}
598
599
void SingletonBlob::verify() {
600
// unimplemented
601
}
602
603
void SingletonBlob::print_on(outputStream* st) const {
604
ttyLocker ttyl;
605
CodeBlob::print_on(st);
606
st->print_cr("%s", name());
607
Disassembler::decode((CodeBlob*)this, st);
608
}
609
610
void SingletonBlob::print_value_on(outputStream* st) const {
611
st->print_cr("%s", name());
612
}
613
614
void DeoptimizationBlob::print_value_on(outputStream* st) const {
615
st->print_cr("Deoptimization (frame not available)");
616
}
617
618