Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/code/codeCache.cpp
64440 views
1
/*
2
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "jvm_io.h"
27
#include "code/codeBlob.hpp"
28
#include "code/codeCache.hpp"
29
#include "code/codeHeapState.hpp"
30
#include "code/compiledIC.hpp"
31
#include "code/dependencies.hpp"
32
#include "code/dependencyContext.hpp"
33
#include "code/icBuffer.hpp"
34
#include "code/nmethod.hpp"
35
#include "code/pcDesc.hpp"
36
#include "compiler/compilationPolicy.hpp"
37
#include "compiler/compileBroker.hpp"
38
#include "compiler/oopMap.hpp"
39
#include "gc/shared/collectedHeap.hpp"
40
#include "jfr/jfrEvents.hpp"
41
#include "logging/log.hpp"
42
#include "logging/logStream.hpp"
43
#include "memory/allocation.inline.hpp"
44
#include "memory/iterator.hpp"
45
#include "memory/resourceArea.hpp"
46
#include "memory/universe.hpp"
47
#include "oops/method.inline.hpp"
48
#include "oops/objArrayOop.hpp"
49
#include "oops/oop.inline.hpp"
50
#include "oops/verifyOopClosure.hpp"
51
#include "runtime/arguments.hpp"
52
#include "runtime/atomic.hpp"
53
#include "runtime/deoptimization.hpp"
54
#include "runtime/globals_extension.hpp"
55
#include "runtime/handles.inline.hpp"
56
#include "runtime/icache.hpp"
57
#include "runtime/java.hpp"
58
#include "runtime/mutexLocker.hpp"
59
#include "runtime/safepointVerifiers.hpp"
60
#include "runtime/sweeper.hpp"
61
#include "runtime/vmThread.hpp"
62
#include "services/memoryService.hpp"
63
#include "utilities/align.hpp"
64
#include "utilities/vmError.hpp"
65
#include "utilities/xmlstream.hpp"
66
#ifdef COMPILER1
67
#include "c1/c1_Compilation.hpp"
68
#include "c1/c1_Compiler.hpp"
69
#endif
70
#ifdef COMPILER2
71
#include "opto/c2compiler.hpp"
72
#include "opto/compile.hpp"
73
#include "opto/node.hpp"
74
#endif
75
76
// Helper class for printing in CodeCache
77
class CodeBlob_sizes {
78
private:
79
int count;
80
int total_size;
81
int header_size;
82
int code_size;
83
int stub_size;
84
int relocation_size;
85
int scopes_oop_size;
86
int scopes_metadata_size;
87
int scopes_data_size;
88
int scopes_pcs_size;
89
90
public:
91
CodeBlob_sizes() {
92
count = 0;
93
total_size = 0;
94
header_size = 0;
95
code_size = 0;
96
stub_size = 0;
97
relocation_size = 0;
98
scopes_oop_size = 0;
99
scopes_metadata_size = 0;
100
scopes_data_size = 0;
101
scopes_pcs_size = 0;
102
}
103
104
int total() { return total_size; }
105
bool is_empty() { return count == 0; }
106
107
void print(const char* title) {
108
tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
109
count,
110
title,
111
(int)(total() / K),
112
header_size * 100 / total_size,
113
relocation_size * 100 / total_size,
114
code_size * 100 / total_size,
115
stub_size * 100 / total_size,
116
scopes_oop_size * 100 / total_size,
117
scopes_metadata_size * 100 / total_size,
118
scopes_data_size * 100 / total_size,
119
scopes_pcs_size * 100 / total_size);
120
}
121
122
void add(CodeBlob* cb) {
123
count++;
124
total_size += cb->size();
125
header_size += cb->header_size();
126
relocation_size += cb->relocation_size();
127
if (cb->is_nmethod()) {
128
nmethod* nm = cb->as_nmethod_or_null();
129
code_size += nm->insts_size();
130
stub_size += nm->stub_size();
131
132
scopes_oop_size += nm->oops_size();
133
scopes_metadata_size += nm->metadata_size();
134
scopes_data_size += nm->scopes_data_size();
135
scopes_pcs_size += nm->scopes_pcs_size();
136
} else {
137
code_size += cb->code_size();
138
}
139
}
140
};
141
142
// Iterate over all CodeHeaps
143
#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
144
#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
145
#define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
146
147
// Iterate over all CodeBlobs (cb) on the given CodeHeap
148
#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
149
150
address CodeCache::_low_bound = 0;
151
address CodeCache::_high_bound = 0;
152
int CodeCache::_number_of_nmethods_with_dependencies = 0;
153
ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
154
155
// Initialize arrays of CodeHeap subsets
156
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
157
GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
158
GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
159
GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
160
161
void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
162
size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
163
// Prepare error message
164
const char* error = "Invalid code heap sizes";
165
err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
166
" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
167
non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
168
169
if (total_size > cache_size) {
170
// Some code heap sizes were explicitly set: total_size must be <= cache_size
171
message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
172
vm_exit_during_initialization(error, message);
173
} else if (all_set && total_size != cache_size) {
174
// All code heap sizes were explicitly set: total_size must equal cache_size
175
message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
176
vm_exit_during_initialization(error, message);
177
}
178
}
179
180
void CodeCache::initialize_heaps() {
181
bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
182
bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
183
bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
184
size_t min_size = os::vm_page_size();
185
size_t cache_size = ReservedCodeCacheSize;
186
size_t non_nmethod_size = NonNMethodCodeHeapSize;
187
size_t profiled_size = ProfiledCodeHeapSize;
188
size_t non_profiled_size = NonProfiledCodeHeapSize;
189
// Check if total size set via command line flags exceeds the reserved size
190
check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),
191
(profiled_set ? profiled_size : min_size),
192
(non_profiled_set ? non_profiled_size : min_size),
193
cache_size,
194
non_nmethod_set && profiled_set && non_profiled_set);
195
196
// Determine size of compiler buffers
197
size_t code_buffers_size = 0;
198
#ifdef COMPILER1
199
// C1 temporary code buffers (see Compiler::init_buffer_blob())
200
const int c1_count = CompilationPolicy::c1_count();
201
code_buffers_size += c1_count * Compiler::code_buffer_size();
202
#endif
203
#ifdef COMPILER2
204
// C2 scratch buffers (see Compile::init_scratch_buffer_blob())
205
const int c2_count = CompilationPolicy::c2_count();
206
// Initial size of constant table (this may be increased if a compiled method needs more space)
207
code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
208
#endif
209
210
// Increase default non_nmethod_size to account for compiler buffers
211
if (!non_nmethod_set) {
212
non_nmethod_size += code_buffers_size;
213
}
214
// Calculate default CodeHeap sizes if not set by user
215
if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
216
// Check if we have enough space for the non-nmethod code heap
217
if (cache_size > non_nmethod_size) {
218
// Use the default value for non_nmethod_size and one half of the
219
// remaining size for non-profiled and one half for profiled methods
220
size_t remaining_size = cache_size - non_nmethod_size;
221
profiled_size = remaining_size / 2;
222
non_profiled_size = remaining_size - profiled_size;
223
} else {
224
// Use all space for the non-nmethod heap and set other heaps to minimal size
225
non_nmethod_size = cache_size - 2 * min_size;
226
profiled_size = min_size;
227
non_profiled_size = min_size;
228
}
229
} else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
230
// The user explicitly set some code heap sizes. Increase or decrease the (default)
231
// sizes of the other code heaps accordingly. First adapt non-profiled and profiled
232
// code heap sizes and then only change non-nmethod code heap size if still necessary.
233
intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
234
if (non_profiled_set) {
235
if (!profiled_set) {
236
// Adapt size of profiled code heap
237
if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
238
// Not enough space available, set to minimum size
239
diff_size += profiled_size - min_size;
240
profiled_size = min_size;
241
} else {
242
profiled_size += diff_size;
243
diff_size = 0;
244
}
245
}
246
} else if (profiled_set) {
247
// Adapt size of non-profiled code heap
248
if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
249
// Not enough space available, set to minimum size
250
diff_size += non_profiled_size - min_size;
251
non_profiled_size = min_size;
252
} else {
253
non_profiled_size += diff_size;
254
diff_size = 0;
255
}
256
} else if (non_nmethod_set) {
257
// Distribute remaining size between profiled and non-profiled code heaps
258
diff_size = cache_size - non_nmethod_size;
259
profiled_size = diff_size / 2;
260
non_profiled_size = diff_size - profiled_size;
261
diff_size = 0;
262
}
263
if (diff_size != 0) {
264
// Use non-nmethod code heap for remaining space requirements
265
assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
266
non_nmethod_size += diff_size;
267
}
268
}
269
270
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
271
if (!heap_available(CodeBlobType::MethodProfiled)) {
272
non_profiled_size += profiled_size;
273
profiled_size = 0;
274
}
275
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
276
if (!heap_available(CodeBlobType::MethodNonProfiled)) {
277
non_nmethod_size += non_profiled_size;
278
non_profiled_size = 0;
279
}
280
// Make sure we have enough space for VM internal code
281
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
282
if (non_nmethod_size < min_code_cache_size) {
283
vm_exit_during_initialization(err_msg(
284
"Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
285
non_nmethod_size/K, min_code_cache_size/K));
286
}
287
288
// Verify sizes and update flag values
289
assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
290
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
291
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
292
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
293
294
// If large page support is enabled, align code heaps according to large
295
// page size to make sure that code cache is covered by large pages.
296
const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
297
non_nmethod_size = align_up(non_nmethod_size, alignment);
298
profiled_size = align_down(profiled_size, alignment);
299
300
// Reserve one continuous chunk of memory for CodeHeaps and split it into
301
// parts for the individual heaps. The memory layout looks like this:
302
// ---------- high -----------
303
// Non-profiled nmethods
304
// Profiled nmethods
305
// Non-nmethods
306
// ---------- low ------------
307
ReservedCodeSpace rs = reserve_heap_memory(cache_size);
308
ReservedSpace non_method_space = rs.first_part(non_nmethod_size);
309
ReservedSpace rest = rs.last_part(non_nmethod_size);
310
ReservedSpace profiled_space = rest.first_part(profiled_size);
311
ReservedSpace non_profiled_space = rest.last_part(profiled_size);
312
313
// Non-nmethods (stubs, adapters, ...)
314
add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
315
// Tier 2 and tier 3 (profiled) methods
316
add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
317
// Tier 1 and tier 4 (non-profiled) methods and native methods
318
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
319
}
320
321
size_t CodeCache::page_size(bool aligned, size_t min_pages) {
322
if (os::can_execute_large_page_memory()) {
323
if (InitialCodeCacheSize < ReservedCodeCacheSize) {
324
// Make sure that the page size allows for an incremental commit of the reserved space
325
min_pages = MAX2(min_pages, (size_t)8);
326
}
327
return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
328
os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
329
} else {
330
return os::vm_page_size();
331
}
332
}
333
334
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
335
// Align and reserve space for code cache
336
const size_t rs_ps = page_size();
337
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
338
const size_t rs_size = align_up(size, rs_align);
339
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
340
if (!rs.is_reserved()) {
341
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
342
rs_size/K));
343
}
344
345
// Initialize bounds
346
_low_bound = (address)rs.base();
347
_high_bound = _low_bound + rs.size();
348
return rs;
349
}
350
351
// Heaps available for allocation
352
bool CodeCache::heap_available(int code_blob_type) {
353
if (!SegmentedCodeCache) {
354
// No segmentation: use a single code heap
355
return (code_blob_type == CodeBlobType::All);
356
} else if (Arguments::is_interpreter_only()) {
357
// Interpreter only: we don't need any method code heaps
358
return (code_blob_type == CodeBlobType::NonNMethod);
359
} else if (CompilerConfig::is_c1_profiling()) {
360
// Tiered compilation: use all code heaps
361
return (code_blob_type < CodeBlobType::All);
362
} else {
363
// No TieredCompilation: we only need the non-nmethod and non-profiled code heap
364
return (code_blob_type == CodeBlobType::NonNMethod) ||
365
(code_blob_type == CodeBlobType::MethodNonProfiled);
366
}
367
}
368
369
const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
370
switch(code_blob_type) {
371
case CodeBlobType::NonNMethod:
372
return "NonNMethodCodeHeapSize";
373
break;
374
case CodeBlobType::MethodNonProfiled:
375
return "NonProfiledCodeHeapSize";
376
break;
377
case CodeBlobType::MethodProfiled:
378
return "ProfiledCodeHeapSize";
379
break;
380
}
381
ShouldNotReachHere();
382
return NULL;
383
}
384
385
int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
386
if (lhs->code_blob_type() == rhs->code_blob_type()) {
387
return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
388
} else {
389
return lhs->code_blob_type() - rhs->code_blob_type();
390
}
391
}
392
393
void CodeCache::add_heap(CodeHeap* heap) {
394
assert(!Universe::is_fully_initialized(), "late heap addition?");
395
396
_heaps->insert_sorted<code_heap_compare>(heap);
397
398
int type = heap->code_blob_type();
399
if (code_blob_type_accepts_compiled(type)) {
400
_compiled_heaps->insert_sorted<code_heap_compare>(heap);
401
}
402
if (code_blob_type_accepts_nmethod(type)) {
403
_nmethod_heaps->insert_sorted<code_heap_compare>(heap);
404
}
405
if (code_blob_type_accepts_allocable(type)) {
406
_allocable_heaps->insert_sorted<code_heap_compare>(heap);
407
}
408
}
409
410
void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
411
// Check if heap is needed
412
if (!heap_available(code_blob_type)) {
413
return;
414
}
415
416
// Create CodeHeap
417
CodeHeap* heap = new CodeHeap(name, code_blob_type);
418
add_heap(heap);
419
420
// Reserve Space
421
size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
422
size_initial = align_up(size_initial, os::vm_page_size());
423
if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
424
vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
425
heap->name(), size_initial/K));
426
}
427
428
// Register the CodeHeap
429
MemoryService::add_code_heap_memory_pool(heap, name);
430
}
431
432
CodeHeap* CodeCache::get_code_heap_containing(void* start) {
433
FOR_ALL_HEAPS(heap) {
434
if ((*heap)->contains(start)) {
435
return *heap;
436
}
437
}
438
return NULL;
439
}
440
441
CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
442
assert(cb != NULL, "CodeBlob is null");
443
FOR_ALL_HEAPS(heap) {
444
if ((*heap)->contains_blob(cb)) {
445
return *heap;
446
}
447
}
448
ShouldNotReachHere();
449
return NULL;
450
}
451
452
CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
453
FOR_ALL_HEAPS(heap) {
454
if ((*heap)->accepts(code_blob_type)) {
455
return *heap;
456
}
457
}
458
return NULL;
459
}
460
461
CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
462
assert_locked_or_safepoint(CodeCache_lock);
463
assert(heap != NULL, "heap is null");
464
return (CodeBlob*)heap->first();
465
}
466
467
CodeBlob* CodeCache::first_blob(int code_blob_type) {
468
if (heap_available(code_blob_type)) {
469
return first_blob(get_code_heap(code_blob_type));
470
} else {
471
return NULL;
472
}
473
}
474
475
CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
476
assert_locked_or_safepoint(CodeCache_lock);
477
assert(heap != NULL, "heap is null");
478
return (CodeBlob*)heap->next(cb);
479
}
480
481
/**
482
* Do not seize the CodeCache lock here--if the caller has not
483
* already done so, we are going to lose bigtime, since the code
484
* cache will contain a garbage CodeBlob until the caller can
485
* run the constructor for the CodeBlob subclass he is busy
486
* instantiating.
487
*/
488
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {
489
// Possibly wakes up the sweeper thread.
490
NMethodSweeper::report_allocation();
491
assert_locked_or_safepoint(CodeCache_lock);
492
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
493
if (size <= 0) {
494
return NULL;
495
}
496
CodeBlob* cb = NULL;
497
498
// Get CodeHeap for the given CodeBlobType
499
CodeHeap* heap = get_code_heap(code_blob_type);
500
assert(heap != NULL, "heap is null");
501
502
while (true) {
503
cb = (CodeBlob*)heap->allocate(size);
504
if (cb != NULL) break;
505
if (!heap->expand_by(CodeCacheExpansionSize)) {
506
// Save original type for error reporting
507
if (orig_code_blob_type == CodeBlobType::All) {
508
orig_code_blob_type = code_blob_type;
509
}
510
// Expansion failed
511
if (SegmentedCodeCache) {
512
// Fallback solution: Try to store code in another code heap.
513
// NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
514
// Note that in the sweeper, we check the reverse_free_ratio of the code heap
515
// and force stack scanning if less than 10% of the entire code cache are free.
516
int type = code_blob_type;
517
switch (type) {
518
case CodeBlobType::NonNMethod:
519
type = CodeBlobType::MethodNonProfiled;
520
break;
521
case CodeBlobType::MethodNonProfiled:
522
type = CodeBlobType::MethodProfiled;
523
break;
524
case CodeBlobType::MethodProfiled:
525
// Avoid loop if we already tried that code heap
526
if (type == orig_code_blob_type) {
527
type = CodeBlobType::MethodNonProfiled;
528
}
529
break;
530
}
531
if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
532
if (PrintCodeCacheExtension) {
533
tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
534
heap->name(), get_code_heap(type)->name());
535
}
536
return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
537
}
538
}
539
if (handle_alloc_failure) {
540
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
541
CompileBroker::handle_full_code_cache(orig_code_blob_type);
542
}
543
return NULL;
544
}
545
if (PrintCodeCacheExtension) {
546
ResourceMark rm;
547
if (_nmethod_heaps->length() >= 1) {
548
tty->print("%s", heap->name());
549
} else {
550
tty->print("CodeCache");
551
}
552
tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
553
(intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
554
(address)heap->high() - (address)heap->low_boundary());
555
}
556
}
557
print_trace("allocation", cb, size);
558
return cb;
559
}
560
561
void CodeCache::free(CodeBlob* cb) {
562
assert_locked_or_safepoint(CodeCache_lock);
563
CodeHeap* heap = get_code_heap(cb);
564
print_trace("free", cb);
565
if (cb->is_nmethod()) {
566
nmethod* ptr = (nmethod *)cb;
567
heap->set_nmethod_count(heap->nmethod_count() - 1);
568
if (ptr->has_dependencies()) {
569
_number_of_nmethods_with_dependencies--;
570
}
571
ptr->free_native_invokers();
572
}
573
if (cb->is_adapter_blob()) {
574
heap->set_adapter_count(heap->adapter_count() - 1);
575
}
576
577
// Get heap for given CodeBlob and deallocate
578
get_code_heap(cb)->deallocate(cb);
579
580
assert(heap->blob_count() >= 0, "sanity check");
581
}
582
583
void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
584
assert_locked_or_safepoint(CodeCache_lock);
585
guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
586
print_trace("free_unused_tail", cb);
587
588
// We also have to account for the extra space (i.e. header) used by the CodeBlob
589
// which provides the memory (see BufferBlob::create() in codeBlob.cpp).
590
used += CodeBlob::align_code_offset(cb->header_size());
591
592
// Get heap for given CodeBlob and deallocate its unused tail
593
get_code_heap(cb)->deallocate_tail(cb, used);
594
// Adjust the sizes of the CodeBlob
595
cb->adjust_size(used);
596
}
597
598
void CodeCache::commit(CodeBlob* cb) {
599
// this is called by nmethod::nmethod, which must already own CodeCache_lock
600
assert_locked_or_safepoint(CodeCache_lock);
601
CodeHeap* heap = get_code_heap(cb);
602
if (cb->is_nmethod()) {
603
heap->set_nmethod_count(heap->nmethod_count() + 1);
604
if (((nmethod *)cb)->has_dependencies()) {
605
_number_of_nmethods_with_dependencies++;
606
}
607
}
608
if (cb->is_adapter_blob()) {
609
heap->set_adapter_count(heap->adapter_count() + 1);
610
}
611
612
// flush the hardware I-cache
613
ICache::invalidate_range(cb->content_begin(), cb->content_size());
614
}
615
616
bool CodeCache::contains(void *p) {
617
// S390 uses contains() in current_frame(), which is used before
618
// code cache initialization if NativeMemoryTracking=detail is set.
619
S390_ONLY(if (_heaps == NULL) return false;)
620
// It should be ok to call contains without holding a lock.
621
FOR_ALL_HEAPS(heap) {
622
if ((*heap)->contains(p)) {
623
return true;
624
}
625
}
626
return false;
627
}
628
629
bool CodeCache::contains(nmethod *nm) {
630
return contains((void *)nm);
631
}
632
633
static bool is_in_asgct() {
634
Thread* current_thread = Thread::current_or_null_safe();
635
return current_thread != NULL && current_thread->is_Java_thread() && current_thread->as_Java_thread()->in_asgct();
636
}
637
638
// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
639
// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
640
// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
641
CodeBlob* CodeCache::find_blob(void* start) {
642
CodeBlob* result = find_blob_unsafe(start);
643
// We could potentially look up non_entrant methods
644
bool is_zombie = result != NULL && result->is_zombie();
645
bool is_result_safe = !is_zombie || result->is_locked_by_vm() || VMError::is_error_reported();
646
guarantee(is_result_safe || is_in_asgct(), "unsafe access to zombie method");
647
// When in ASGCT the previous gurantee will pass for a zombie method but we still don't want that code blob returned in order
648
// to minimize the chance of accessing dead memory
649
return is_result_safe ? result : NULL;
650
}
651
652
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
653
// what you are doing)
654
CodeBlob* CodeCache::find_blob_unsafe(void* start) {
655
// NMT can walk the stack before code cache is created
656
if (_heaps != NULL) {
657
CodeHeap* heap = get_code_heap_containing(start);
658
if (heap != NULL) {
659
return heap->find_blob_unsafe(start);
660
}
661
}
662
return NULL;
663
}
664
665
nmethod* CodeCache::find_nmethod(void* start) {
666
CodeBlob* cb = find_blob(start);
667
assert(cb->is_nmethod(), "did not find an nmethod");
668
return (nmethod*)cb;
669
}
670
671
void CodeCache::blobs_do(void f(CodeBlob* nm)) {
672
assert_locked_or_safepoint(CodeCache_lock);
673
FOR_ALL_HEAPS(heap) {
674
FOR_ALL_BLOBS(cb, *heap) {
675
f(cb);
676
}
677
}
678
}
679
680
void CodeCache::nmethods_do(void f(nmethod* nm)) {
681
assert_locked_or_safepoint(CodeCache_lock);
682
NMethodIterator iter(NMethodIterator::all_blobs);
683
while(iter.next()) {
684
f(iter.method());
685
}
686
}
687
688
void CodeCache::metadata_do(MetadataClosure* f) {
689
assert_locked_or_safepoint(CodeCache_lock);
690
NMethodIterator iter(NMethodIterator::only_alive);
691
while(iter.next()) {
692
iter.method()->metadata_do(f);
693
}
694
}
695
696
int CodeCache::alignment_unit() {
697
return (int)_heaps->first()->alignment_unit();
698
}
699
700
int CodeCache::alignment_offset() {
701
return (int)_heaps->first()->alignment_offset();
702
}
703
704
// Mark nmethods for unloading if they contain otherwise unreachable oops.
705
void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
706
assert_locked_or_safepoint(CodeCache_lock);
707
UnloadingScope scope(is_alive);
708
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
709
while(iter.next()) {
710
iter.method()->do_unloading(unloading_occurred);
711
}
712
}
713
714
void CodeCache::blobs_do(CodeBlobClosure* f) {
715
assert_locked_or_safepoint(CodeCache_lock);
716
FOR_ALL_ALLOCABLE_HEAPS(heap) {
717
FOR_ALL_BLOBS(cb, *heap) {
718
if (cb->is_alive()) {
719
f->do_code_blob(cb);
720
#ifdef ASSERT
721
if (cb->is_nmethod()) {
722
Universe::heap()->verify_nmethod((nmethod*)cb);
723
}
724
#endif //ASSERT
725
}
726
}
727
}
728
}
729
730
void CodeCache::verify_clean_inline_caches() {
731
#ifdef ASSERT
732
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
733
while(iter.next()) {
734
nmethod* nm = iter.method();
735
assert(!nm->is_unloaded(), "Tautology");
736
nm->verify_clean_inline_caches();
737
nm->verify();
738
}
739
#endif
740
}
741
742
void CodeCache::verify_icholder_relocations() {
743
#ifdef ASSERT
744
// make sure that we aren't leaking icholders
745
int count = 0;
746
FOR_ALL_HEAPS(heap) {
747
FOR_ALL_BLOBS(cb, *heap) {
748
CompiledMethod *nm = cb->as_compiled_method_or_null();
749
if (nm != NULL) {
750
count += nm->verify_icholder_relocations();
751
}
752
}
753
}
754
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
755
CompiledICHolder::live_count(), "must agree");
756
#endif
757
}
758
759
// Defer freeing of concurrently cleaned ExceptionCache entries until
760
// after a global handshake operation.
761
void CodeCache::release_exception_cache(ExceptionCache* entry) {
762
if (SafepointSynchronize::is_at_safepoint()) {
763
delete entry;
764
} else {
765
for (;;) {
766
ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
767
entry->set_purge_list_next(purge_list_head);
768
if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
769
break;
770
}
771
}
772
}
773
}
774
775
// Delete exception caches that have been concurrently unlinked,
776
// followed by a global handshake operation.
777
void CodeCache::purge_exception_caches() {
778
ExceptionCache* curr = _exception_cache_purge_list;
779
while (curr != NULL) {
780
ExceptionCache* next = curr->purge_list_next();
781
delete curr;
782
curr = next;
783
}
784
_exception_cache_purge_list = NULL;
785
}
786
787
uint8_t CodeCache::_unloading_cycle = 1;
788
789
void CodeCache::increment_unloading_cycle() {
790
// 2-bit value (see IsUnloadingState in nmethod.cpp for details)
791
// 0 is reserved for new methods.
792
_unloading_cycle = (_unloading_cycle + 1) % 4;
793
if (_unloading_cycle == 0) {
794
_unloading_cycle = 1;
795
}
796
}
797
798
CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
799
: _is_unloading_behaviour(is_alive)
800
{
801
_saved_behaviour = IsUnloadingBehaviour::current();
802
IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
803
increment_unloading_cycle();
804
DependencyContext::cleaning_start();
805
}
806
807
CodeCache::UnloadingScope::~UnloadingScope() {
808
IsUnloadingBehaviour::set_current(_saved_behaviour);
809
DependencyContext::cleaning_end();
810
}
811
812
void CodeCache::verify_oops() {
813
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
814
VerifyOopClosure voc;
815
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
816
while(iter.next()) {
817
nmethod* nm = iter.method();
818
nm->oops_do(&voc);
819
nm->verify_oop_relocations();
820
}
821
}
822
823
int CodeCache::blob_count(int code_blob_type) {
824
CodeHeap* heap = get_code_heap(code_blob_type);
825
return (heap != NULL) ? heap->blob_count() : 0;
826
}
827
828
int CodeCache::blob_count() {
829
int count = 0;
830
FOR_ALL_HEAPS(heap) {
831
count += (*heap)->blob_count();
832
}
833
return count;
834
}
835
836
int CodeCache::nmethod_count(int code_blob_type) {
837
CodeHeap* heap = get_code_heap(code_blob_type);
838
return (heap != NULL) ? heap->nmethod_count() : 0;
839
}
840
841
int CodeCache::nmethod_count() {
842
int count = 0;
843
FOR_ALL_NMETHOD_HEAPS(heap) {
844
count += (*heap)->nmethod_count();
845
}
846
return count;
847
}
848
849
int CodeCache::adapter_count(int code_blob_type) {
850
CodeHeap* heap = get_code_heap(code_blob_type);
851
return (heap != NULL) ? heap->adapter_count() : 0;
852
}
853
854
int CodeCache::adapter_count() {
855
int count = 0;
856
FOR_ALL_HEAPS(heap) {
857
count += (*heap)->adapter_count();
858
}
859
return count;
860
}
861
862
address CodeCache::low_bound(int code_blob_type) {
863
CodeHeap* heap = get_code_heap(code_blob_type);
864
return (heap != NULL) ? (address)heap->low_boundary() : NULL;
865
}
866
867
address CodeCache::high_bound(int code_blob_type) {
868
CodeHeap* heap = get_code_heap(code_blob_type);
869
return (heap != NULL) ? (address)heap->high_boundary() : NULL;
870
}
871
872
size_t CodeCache::capacity() {
873
size_t cap = 0;
874
FOR_ALL_ALLOCABLE_HEAPS(heap) {
875
cap += (*heap)->capacity();
876
}
877
return cap;
878
}
879
880
size_t CodeCache::unallocated_capacity(int code_blob_type) {
881
CodeHeap* heap = get_code_heap(code_blob_type);
882
return (heap != NULL) ? heap->unallocated_capacity() : 0;
883
}
884
885
size_t CodeCache::unallocated_capacity() {
886
size_t unallocated_cap = 0;
887
FOR_ALL_ALLOCABLE_HEAPS(heap) {
888
unallocated_cap += (*heap)->unallocated_capacity();
889
}
890
return unallocated_cap;
891
}
892
893
size_t CodeCache::max_capacity() {
894
size_t max_cap = 0;
895
FOR_ALL_ALLOCABLE_HEAPS(heap) {
896
max_cap += (*heap)->max_capacity();
897
}
898
return max_cap;
899
}
900
901
902
// Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
903
// is free, reverse_free_ratio() returns 4.
904
// Since code heap for each type of code blobs falls forward to the next
905
// type of code heap, return the reverse free ratio for the entire
906
// code cache.
907
double CodeCache::reverse_free_ratio() {
908
double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
909
double max = (double)max_capacity();
910
double result = max / unallocated;
911
assert (max >= unallocated, "Must be");
912
assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
913
return result;
914
}
915
916
size_t CodeCache::bytes_allocated_in_freelists() {
917
size_t allocated_bytes = 0;
918
FOR_ALL_ALLOCABLE_HEAPS(heap) {
919
allocated_bytes += (*heap)->allocated_in_freelist();
920
}
921
return allocated_bytes;
922
}
923
924
int CodeCache::allocated_segments() {
925
int number_of_segments = 0;
926
FOR_ALL_ALLOCABLE_HEAPS(heap) {
927
number_of_segments += (*heap)->allocated_segments();
928
}
929
return number_of_segments;
930
}
931
932
size_t CodeCache::freelists_length() {
933
size_t length = 0;
934
FOR_ALL_ALLOCABLE_HEAPS(heap) {
935
length += (*heap)->freelist_length();
936
}
937
return length;
938
}
939
940
void icache_init();
941
942
void CodeCache::initialize() {
943
assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
944
#ifdef COMPILER2
945
assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
946
#endif
947
assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
948
// This was originally just a check of the alignment, causing failure, instead, round
949
// the code cache to the page size. In particular, Solaris is moving to a larger
950
// default page size.
951
CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
952
953
if (SegmentedCodeCache) {
954
// Use multiple code heaps
955
initialize_heaps();
956
} else {
957
// Use a single code heap
958
FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);
959
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
960
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
961
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
962
add_heap(rs, "CodeCache", CodeBlobType::All);
963
}
964
965
// Initialize ICache flush mechanism
966
// This service is needed for os::register_code_area
967
icache_init();
968
969
// Give OS a chance to register generated code area.
970
// This is used on Windows 64 bit platforms to register
971
// Structured Exception Handlers for our generated code.
972
os::register_code_area((char*)low_bound(), (char*)high_bound());
973
}
974
975
void codeCache_init() {
976
CodeCache::initialize();
977
}
978
979
//------------------------------------------------------------------------------------------------
980
981
int CodeCache::number_of_nmethods_with_dependencies() {
982
return _number_of_nmethods_with_dependencies;
983
}
984
985
void CodeCache::clear_inline_caches() {
986
assert_locked_or_safepoint(CodeCache_lock);
987
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
988
while(iter.next()) {
989
iter.method()->clear_inline_caches();
990
}
991
}
992
993
void CodeCache::cleanup_inline_caches() {
994
assert_locked_or_safepoint(CodeCache_lock);
995
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
996
while(iter.next()) {
997
iter.method()->cleanup_inline_caches(/*clean_all=*/true);
998
}
999
}
1000
1001
// Keeps track of time spent for checking dependencies
1002
NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1003
1004
int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
1005
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1006
int number_of_marked_CodeBlobs = 0;
1007
1008
// search the hierarchy looking for nmethods which are affected by the loading of this class
1009
1010
// then search the interfaces this class implements looking for nmethods
1011
// which might be dependent of the fact that an interface only had one
1012
// implementor.
1013
// nmethod::check_all_dependencies works only correctly, if no safepoint
1014
// can happen
1015
NoSafepointVerifier nsv;
1016
for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1017
Klass* d = str.klass();
1018
number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1019
}
1020
1021
#ifndef PRODUCT
1022
if (VerifyDependencies) {
1023
// Object pointers are used as unique identifiers for dependency arguments. This
1024
// is only possible if no safepoint, i.e., GC occurs during the verification code.
1025
dependentCheckTime.start();
1026
nmethod::check_all_dependencies(changes);
1027
dependentCheckTime.stop();
1028
}
1029
#endif
1030
1031
return number_of_marked_CodeBlobs;
1032
}
1033
1034
CompiledMethod* CodeCache::find_compiled(void* start) {
1035
CodeBlob *cb = find_blob(start);
1036
assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1037
return (CompiledMethod*)cb;
1038
}
1039
1040
#if INCLUDE_JVMTI
1041
// RedefineClasses support for saving nmethods that are dependent on "old" methods.
1042
// We don't really expect this table to grow very large. If it does, it can become a hashtable.
1043
static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL;
1044
1045
static void add_to_old_table(CompiledMethod* c) {
1046
if (old_compiled_method_table == NULL) {
1047
old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
1048
}
1049
old_compiled_method_table->push(c);
1050
}
1051
1052
static void reset_old_method_table() {
1053
if (old_compiled_method_table != NULL) {
1054
delete old_compiled_method_table;
1055
old_compiled_method_table = NULL;
1056
}
1057
}
1058
1059
// Remove this method when zombied or unloaded.
1060
void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1061
assert_lock_strong(CodeCache_lock);
1062
if (old_compiled_method_table != NULL) {
1063
int index = old_compiled_method_table->find(c);
1064
if (index != -1) {
1065
old_compiled_method_table->delete_at(index);
1066
}
1067
}
1068
}
1069
1070
void CodeCache::old_nmethods_do(MetadataClosure* f) {
1071
// Walk old method table and mark those on stack.
1072
int length = 0;
1073
if (old_compiled_method_table != NULL) {
1074
length = old_compiled_method_table->length();
1075
for (int i = 0; i < length; i++) {
1076
CompiledMethod* cm = old_compiled_method_table->at(i);
1077
// Only walk alive nmethods, the dead ones will get removed by the sweeper or GC.
1078
if (cm->is_alive() && !cm->is_unloading()) {
1079
old_compiled_method_table->at(i)->metadata_do(f);
1080
}
1081
}
1082
}
1083
log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1084
}
1085
1086
// Just marks the methods in this class as needing deoptimization
1087
void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
1088
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1089
}
1090
1091
1092
// Walk compiled methods and mark dependent methods for deoptimization.
1093
int CodeCache::mark_dependents_for_evol_deoptimization() {
1094
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1095
// Each redefinition creates a new set of nmethods that have references to "old" Methods
1096
// So delete old method table and create a new one.
1097
reset_old_method_table();
1098
1099
int number_of_marked_CodeBlobs = 0;
1100
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
1101
while(iter.next()) {
1102
CompiledMethod* nm = iter.method();
1103
// Walk all alive nmethods to check for old Methods.
1104
// This includes methods whose inline caches point to old methods, so
1105
// inline cache clearing is unnecessary.
1106
if (nm->has_evol_metadata()) {
1107
nm->mark_for_deoptimization();
1108
add_to_old_table(nm);
1109
number_of_marked_CodeBlobs++;
1110
}
1111
}
1112
1113
// return total count of nmethods marked for deoptimization, if zero the caller
1114
// can skip deoptimization
1115
return number_of_marked_CodeBlobs;
1116
}
1117
1118
void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1119
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1120
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
1121
while(iter.next()) {
1122
CompiledMethod* nm = iter.method();
1123
if (!nm->method()->is_method_handle_intrinsic()) {
1124
nm->mark_for_deoptimization();
1125
if (nm->has_evol_metadata()) {
1126
add_to_old_table(nm);
1127
}
1128
}
1129
}
1130
}
1131
1132
// Flushes compiled methods dependent on redefined classes, that have already been
1133
// marked for deoptimization.
1134
void CodeCache::flush_evol_dependents() {
1135
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1136
1137
// CodeCache can only be updated by a thread_in_VM and they will all be
1138
// stopped during the safepoint so CodeCache will be safe to update without
1139
// holding the CodeCache_lock.
1140
1141
// At least one nmethod has been marked for deoptimization
1142
1143
Deoptimization::deoptimize_all_marked();
1144
}
1145
#endif // INCLUDE_JVMTI
1146
1147
// Mark methods for deopt (if safe or possible).
1148
void CodeCache::mark_all_nmethods_for_deoptimization() {
1149
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1150
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1151
while(iter.next()) {
1152
CompiledMethod* nm = iter.method();
1153
if (!nm->is_native_method()) {
1154
nm->mark_for_deoptimization();
1155
}
1156
}
1157
}
1158
1159
int CodeCache::mark_for_deoptimization(Method* dependee) {
1160
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1161
int number_of_marked_CodeBlobs = 0;
1162
1163
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1164
while(iter.next()) {
1165
CompiledMethod* nm = iter.method();
1166
if (nm->is_dependent_on_method(dependee)) {
1167
ResourceMark rm;
1168
nm->mark_for_deoptimization();
1169
number_of_marked_CodeBlobs++;
1170
}
1171
}
1172
1173
return number_of_marked_CodeBlobs;
1174
}
1175
1176
void CodeCache::make_marked_nmethods_not_entrant() {
1177
assert_locked_or_safepoint(CodeCache_lock);
1178
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1179
while(iter.next()) {
1180
CompiledMethod* nm = iter.method();
1181
if (nm->is_marked_for_deoptimization()) {
1182
nm->make_not_entrant();
1183
}
1184
}
1185
}
1186
1187
// Flushes compiled methods dependent on dependee.
1188
void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1189
assert_lock_strong(Compile_lock);
1190
1191
if (number_of_nmethods_with_dependencies() == 0) return;
1192
1193
int marked = 0;
1194
if (dependee->is_linked()) {
1195
// Class initialization state change.
1196
KlassInitDepChange changes(dependee);
1197
marked = mark_for_deoptimization(changes);
1198
} else {
1199
// New class is loaded.
1200
NewKlassDepChange changes(dependee);
1201
marked = mark_for_deoptimization(changes);
1202
}
1203
1204
if (marked > 0) {
1205
// At least one nmethod has been marked for deoptimization
1206
Deoptimization::deoptimize_all_marked();
1207
}
1208
}
1209
1210
// Flushes compiled methods dependent on dependee
1211
void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
1212
// --- Compile_lock is not held. However we are at a safepoint.
1213
assert_locked_or_safepoint(Compile_lock);
1214
1215
// Compute the dependent nmethods
1216
if (mark_for_deoptimization(m_h()) > 0) {
1217
Deoptimization::deoptimize_all_marked();
1218
}
1219
}
1220
1221
void CodeCache::verify() {
1222
assert_locked_or_safepoint(CodeCache_lock);
1223
FOR_ALL_HEAPS(heap) {
1224
(*heap)->verify();
1225
FOR_ALL_BLOBS(cb, *heap) {
1226
if (cb->is_alive()) {
1227
cb->verify();
1228
}
1229
}
1230
}
1231
}
1232
1233
// A CodeHeap is full. Print out warning and report event.
1234
PRAGMA_DIAG_PUSH
1235
PRAGMA_FORMAT_NONLITERAL_IGNORED
1236
void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1237
// Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1238
CodeHeap* heap = get_code_heap(code_blob_type);
1239
assert(heap != NULL, "heap is null");
1240
1241
if ((heap->full_count() == 0) || print) {
1242
// Not yet reported for this heap, report
1243
if (SegmentedCodeCache) {
1244
ResourceMark rm;
1245
stringStream msg1_stream, msg2_stream;
1246
msg1_stream.print("%s is full. Compiler has been disabled.",
1247
get_code_heap_name(code_blob_type));
1248
msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1249
get_code_heap_flag_name(code_blob_type));
1250
const char *msg1 = msg1_stream.as_string();
1251
const char *msg2 = msg2_stream.as_string();
1252
1253
log_warning(codecache)("%s", msg1);
1254
log_warning(codecache)("%s", msg2);
1255
warning("%s", msg1);
1256
warning("%s", msg2);
1257
} else {
1258
const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1259
const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1260
1261
log_warning(codecache)("%s", msg1);
1262
log_warning(codecache)("%s", msg2);
1263
warning("%s", msg1);
1264
warning("%s", msg2);
1265
}
1266
ResourceMark rm;
1267
stringStream s;
1268
// Dump code cache into a buffer before locking the tty.
1269
{
1270
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1271
print_summary(&s);
1272
}
1273
{
1274
ttyLocker ttyl;
1275
tty->print("%s", s.as_string());
1276
}
1277
1278
if (heap->full_count() == 0) {
1279
if (PrintCodeHeapAnalytics) {
1280
CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1281
}
1282
}
1283
}
1284
1285
heap->report_full();
1286
1287
EventCodeCacheFull event;
1288
if (event.should_commit()) {
1289
event.set_codeBlobType((u1)code_blob_type);
1290
event.set_startAddress((u8)heap->low_boundary());
1291
event.set_commitedTopAddress((u8)heap->high());
1292
event.set_reservedTopAddress((u8)heap->high_boundary());
1293
event.set_entryCount(heap->blob_count());
1294
event.set_methodCount(heap->nmethod_count());
1295
event.set_adaptorCount(heap->adapter_count());
1296
event.set_unallocatedCapacity(heap->unallocated_capacity());
1297
event.set_fullCount(heap->full_count());
1298
event.commit();
1299
}
1300
}
1301
PRAGMA_DIAG_POP
1302
1303
void CodeCache::print_memory_overhead() {
1304
size_t wasted_bytes = 0;
1305
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1306
CodeHeap* curr_heap = *heap;
1307
for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1308
HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1309
wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1310
}
1311
}
1312
// Print bytes that are allocated in the freelist
1313
ttyLocker ttl;
1314
tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1315
tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1316
tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1317
tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1318
}
1319
1320
//------------------------------------------------------------------------------------------------
1321
// Non-product version
1322
1323
#ifndef PRODUCT
1324
1325
void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1326
if (PrintCodeCache2) { // Need to add a new flag
1327
ResourceMark rm;
1328
if (size == 0) size = cb->size();
1329
tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1330
}
1331
}
1332
1333
void CodeCache::print_internals() {
1334
int nmethodCount = 0;
1335
int runtimeStubCount = 0;
1336
int adapterCount = 0;
1337
int deoptimizationStubCount = 0;
1338
int uncommonTrapStubCount = 0;
1339
int bufferBlobCount = 0;
1340
int total = 0;
1341
int nmethodAlive = 0;
1342
int nmethodNotEntrant = 0;
1343
int nmethodZombie = 0;
1344
int nmethodUnloaded = 0;
1345
int nmethodJava = 0;
1346
int nmethodNative = 0;
1347
int max_nm_size = 0;
1348
ResourceMark rm;
1349
1350
int i = 0;
1351
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1352
if ((_nmethod_heaps->length() >= 1) && Verbose) {
1353
tty->print_cr("-- %s --", (*heap)->name());
1354
}
1355
FOR_ALL_BLOBS(cb, *heap) {
1356
total++;
1357
if (cb->is_nmethod()) {
1358
nmethod* nm = (nmethod*)cb;
1359
1360
if (Verbose && nm->method() != NULL) {
1361
ResourceMark rm;
1362
char *method_name = nm->method()->name_and_sig_as_C_string();
1363
tty->print("%s", method_name);
1364
if(nm->is_alive()) { tty->print_cr(" alive"); }
1365
if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1366
if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1367
}
1368
1369
nmethodCount++;
1370
1371
if(nm->is_alive()) { nmethodAlive++; }
1372
if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1373
if(nm->is_zombie()) { nmethodZombie++; }
1374
if(nm->is_unloaded()) { nmethodUnloaded++; }
1375
if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1376
1377
if(nm->method() != NULL && nm->is_java_method()) {
1378
nmethodJava++;
1379
max_nm_size = MAX2(max_nm_size, nm->size());
1380
}
1381
} else if (cb->is_runtime_stub()) {
1382
runtimeStubCount++;
1383
} else if (cb->is_deoptimization_stub()) {
1384
deoptimizationStubCount++;
1385
} else if (cb->is_uncommon_trap_stub()) {
1386
uncommonTrapStubCount++;
1387
} else if (cb->is_adapter_blob()) {
1388
adapterCount++;
1389
} else if (cb->is_buffer_blob()) {
1390
bufferBlobCount++;
1391
}
1392
}
1393
}
1394
1395
int bucketSize = 512;
1396
int bucketLimit = max_nm_size / bucketSize + 1;
1397
int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1398
memset(buckets, 0, sizeof(int) * bucketLimit);
1399
1400
NMethodIterator iter(NMethodIterator::all_blobs);
1401
while(iter.next()) {
1402
nmethod* nm = iter.method();
1403
if(nm->method() != NULL && nm->is_java_method()) {
1404
buckets[nm->size() / bucketSize]++;
1405
}
1406
}
1407
1408
tty->print_cr("Code Cache Entries (total of %d)",total);
1409
tty->print_cr("-------------------------------------------------");
1410
tty->print_cr("nmethods: %d",nmethodCount);
1411
tty->print_cr("\talive: %d",nmethodAlive);
1412
tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1413
tty->print_cr("\tzombie: %d",nmethodZombie);
1414
tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1415
tty->print_cr("\tjava: %d",nmethodJava);
1416
tty->print_cr("\tnative: %d",nmethodNative);
1417
tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1418
tty->print_cr("adapters: %d",adapterCount);
1419
tty->print_cr("buffer blobs: %d",bufferBlobCount);
1420
tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1421
tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1422
tty->print_cr("\nnmethod size distribution (non-zombie java)");
1423
tty->print_cr("-------------------------------------------------");
1424
1425
for(int i=0; i<bucketLimit; i++) {
1426
if(buckets[i] != 0) {
1427
tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1428
tty->fill_to(40);
1429
tty->print_cr("%d",buckets[i]);
1430
}
1431
}
1432
1433
FREE_C_HEAP_ARRAY(int, buckets);
1434
print_memory_overhead();
1435
}
1436
1437
#endif // !PRODUCT
1438
1439
void CodeCache::print() {
1440
print_summary(tty);
1441
1442
#ifndef PRODUCT
1443
if (!Verbose) return;
1444
1445
CodeBlob_sizes live;
1446
CodeBlob_sizes dead;
1447
1448
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1449
FOR_ALL_BLOBS(cb, *heap) {
1450
if (!cb->is_alive()) {
1451
dead.add(cb);
1452
} else {
1453
live.add(cb);
1454
}
1455
}
1456
}
1457
1458
tty->print_cr("CodeCache:");
1459
tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1460
1461
if (!live.is_empty()) {
1462
live.print("live");
1463
}
1464
if (!dead.is_empty()) {
1465
dead.print("dead");
1466
}
1467
1468
if (WizardMode) {
1469
// print the oop_map usage
1470
int code_size = 0;
1471
int number_of_blobs = 0;
1472
int number_of_oop_maps = 0;
1473
int map_size = 0;
1474
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1475
FOR_ALL_BLOBS(cb, *heap) {
1476
if (cb->is_alive()) {
1477
number_of_blobs++;
1478
code_size += cb->code_size();
1479
ImmutableOopMapSet* set = cb->oop_maps();
1480
if (set != NULL) {
1481
number_of_oop_maps += set->count();
1482
map_size += set->nr_of_bytes();
1483
}
1484
}
1485
}
1486
}
1487
tty->print_cr("OopMaps");
1488
tty->print_cr(" #blobs = %d", number_of_blobs);
1489
tty->print_cr(" code size = %d", code_size);
1490
tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1491
tty->print_cr(" map size = %d", map_size);
1492
}
1493
1494
#endif // !PRODUCT
1495
}
1496
1497
void CodeCache::print_summary(outputStream* st, bool detailed) {
1498
int full_count = 0;
1499
FOR_ALL_HEAPS(heap_iterator) {
1500
CodeHeap* heap = (*heap_iterator);
1501
size_t total = (heap->high_boundary() - heap->low_boundary());
1502
if (_heaps->length() >= 1) {
1503
st->print("%s:", heap->name());
1504
} else {
1505
st->print("CodeCache:");
1506
}
1507
st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1508
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1509
total/K, (total - heap->unallocated_capacity())/K,
1510
heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1511
1512
if (detailed) {
1513
st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1514
p2i(heap->low_boundary()),
1515
p2i(heap->high()),
1516
p2i(heap->high_boundary()));
1517
1518
full_count += get_codemem_full_count(heap->code_blob_type());
1519
}
1520
}
1521
1522
if (detailed) {
1523
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1524
" adapters=" UINT32_FORMAT,
1525
blob_count(), nmethod_count(), adapter_count());
1526
st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1527
"enabled" : Arguments::mode() == Arguments::_int ?
1528
"disabled (interpreter mode)" :
1529
"disabled (not enough contiguous free space left)");
1530
st->print_cr(" stopped_count=%d, restarted_count=%d",
1531
CompileBroker::get_total_compiler_stopped_count(),
1532
CompileBroker::get_total_compiler_restarted_count());
1533
st->print_cr(" full_count=%d", full_count);
1534
}
1535
}
1536
1537
void CodeCache::print_codelist(outputStream* st) {
1538
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1539
1540
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1541
while (iter.next()) {
1542
CompiledMethod* cm = iter.method();
1543
ResourceMark rm;
1544
char* method_name = cm->method()->name_and_sig_as_C_string();
1545
st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1546
cm->compile_id(), cm->comp_level(), cm->get_state(),
1547
method_name,
1548
(intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1549
}
1550
}
1551
1552
void CodeCache::print_layout(outputStream* st) {
1553
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1554
ResourceMark rm;
1555
print_summary(st, true);
1556
}
1557
1558
void CodeCache::log_state(outputStream* st) {
1559
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1560
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1561
blob_count(), nmethod_count(), adapter_count(),
1562
unallocated_capacity());
1563
}
1564
1565
#ifdef LINUX
1566
void CodeCache::write_perf_map() {
1567
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1568
1569
// Perf expects to find the map file at /tmp/perf-<pid>.map.
1570
char fname[32];
1571
jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1572
1573
fileStream fs(fname, "w");
1574
if (!fs.is_open()) {
1575
log_warning(codecache)("Failed to create %s for perf map", fname);
1576
return;
1577
}
1578
1579
AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading);
1580
while (iter.next()) {
1581
CodeBlob *cb = iter.method();
1582
ResourceMark rm;
1583
const char* method_name =
1584
cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
1585
: cb->name();
1586
fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1587
(intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1588
method_name);
1589
}
1590
}
1591
#endif // LINUX
1592
1593
//---< BEGIN >--- CodeHeap State Analytics.
1594
1595
void CodeCache::aggregate(outputStream *out, size_t granularity) {
1596
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1597
CodeHeapState::aggregate(out, (*heap), granularity);
1598
}
1599
}
1600
1601
void CodeCache::discard(outputStream *out) {
1602
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1603
CodeHeapState::discard(out, (*heap));
1604
}
1605
}
1606
1607
void CodeCache::print_usedSpace(outputStream *out) {
1608
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1609
CodeHeapState::print_usedSpace(out, (*heap));
1610
}
1611
}
1612
1613
void CodeCache::print_freeSpace(outputStream *out) {
1614
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1615
CodeHeapState::print_freeSpace(out, (*heap));
1616
}
1617
}
1618
1619
void CodeCache::print_count(outputStream *out) {
1620
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1621
CodeHeapState::print_count(out, (*heap));
1622
}
1623
}
1624
1625
void CodeCache::print_space(outputStream *out) {
1626
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1627
CodeHeapState::print_space(out, (*heap));
1628
}
1629
}
1630
1631
void CodeCache::print_age(outputStream *out) {
1632
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1633
CodeHeapState::print_age(out, (*heap));
1634
}
1635
}
1636
1637
void CodeCache::print_names(outputStream *out) {
1638
FOR_ALL_ALLOCABLE_HEAPS(heap) {
1639
CodeHeapState::print_names(out, (*heap));
1640
}
1641
}
1642
//---< END >--- CodeHeap State Analytics.
1643
1644