Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/memory/metaspace.cpp
40950 views
1
/*
2
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2017, 2021 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "cds/metaspaceShared.hpp"
28
#include "classfile/classLoaderData.hpp"
29
#include "gc/shared/collectedHeap.hpp"
30
#include "logging/log.hpp"
31
#include "logging/logStream.hpp"
32
#include "memory/classLoaderMetaspace.hpp"
33
#include "memory/metaspace.hpp"
34
#include "memory/metaspace/chunkHeaderPool.hpp"
35
#include "memory/metaspace/chunkManager.hpp"
36
#include "memory/metaspace/commitLimiter.hpp"
37
#include "memory/metaspace/internalStats.hpp"
38
#include "memory/metaspace/metaspaceCommon.hpp"
39
#include "memory/metaspace/metaspaceContext.hpp"
40
#include "memory/metaspace/metaspaceReporter.hpp"
41
#include "memory/metaspace/metaspaceSettings.hpp"
42
#include "memory/metaspace/runningCounters.hpp"
43
#include "memory/metaspace/virtualSpaceList.hpp"
44
#include "memory/metaspaceTracer.hpp"
45
#include "memory/metaspaceStats.hpp"
46
#include "memory/metaspaceUtils.hpp"
47
#include "memory/resourceArea.hpp"
48
#include "memory/universe.hpp"
49
#include "oops/compressedOops.hpp"
50
#include "prims/jvmtiExport.hpp"
51
#include "runtime/atomic.hpp"
52
#include "runtime/globals_extension.hpp"
53
#include "runtime/init.hpp"
54
#include "runtime/java.hpp"
55
#include "services/memTracker.hpp"
56
#include "utilities/copy.hpp"
57
#include "utilities/debug.hpp"
58
#include "utilities/formatBuffer.hpp"
59
#include "utilities/globalDefinitions.hpp"
60
61
using metaspace::ChunkManager;
62
using metaspace::CommitLimiter;
63
using metaspace::MetaspaceContext;
64
using metaspace::MetaspaceReporter;
65
using metaspace::RunningCounters;
66
using metaspace::VirtualSpaceList;
67
68
size_t MetaspaceUtils::used_words() {
69
return RunningCounters::used_words();
70
}
71
72
size_t MetaspaceUtils::used_words(Metaspace::MetadataType mdtype) {
73
return mdtype == Metaspace::ClassType ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass();
74
}
75
76
size_t MetaspaceUtils::reserved_words() {
77
return RunningCounters::reserved_words();
78
}
79
80
size_t MetaspaceUtils::reserved_words(Metaspace::MetadataType mdtype) {
81
return mdtype == Metaspace::ClassType ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass();
82
}
83
84
size_t MetaspaceUtils::committed_words() {
85
return RunningCounters::committed_words();
86
}
87
88
size_t MetaspaceUtils::committed_words(Metaspace::MetadataType mdtype) {
89
return mdtype == Metaspace::ClassType ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass();
90
}
91
92
// Helper for get_statistics()
93
static void get_values_for(Metaspace::MetadataType mdtype, size_t* reserved, size_t* committed, size_t* used) {
94
#define w2b(x) (x * sizeof(MetaWord))
95
if (mdtype == Metaspace::ClassType) {
96
*reserved = w2b(RunningCounters::reserved_words_class());
97
*committed = w2b(RunningCounters::committed_words_class());
98
*used = w2b(RunningCounters::used_words_class());
99
} else {
100
*reserved = w2b(RunningCounters::reserved_words_nonclass());
101
*committed = w2b(RunningCounters::committed_words_nonclass());
102
*used = w2b(RunningCounters::used_words_nonclass());
103
}
104
#undef w2b
105
}
106
107
// Retrieve all statistics in one go; make sure the values are consistent.
108
MetaspaceStats MetaspaceUtils::get_statistics(Metaspace::MetadataType mdtype) {
109
110
// Consistency:
111
// This function reads three values (reserved, committed, used) from different counters. These counters
112
// may (very rarely) be out of sync. This has been a source for intermittent test errors in the past
113
// (see e.g. JDK-8237872, JDK-8151460).
114
// - reserved and committed counter are updated under protection of Metaspace_lock; an inconsistency
115
// between them can be the result of a dirty read.
116
// - used is an atomic counter updated outside any lock range; there is no way to guarantee
117
// a clean read wrt the other two values.
118
// Reading these values under lock protection would would only help for the first case. Therefore
119
// we don't bother and just re-read several times, then give up and correct the values.
120
121
size_t r = 0, c = 0, u = 0; // Note: byte values.
122
get_values_for(mdtype, &r, &c, &u);
123
int retries = 10;
124
// If the first retrieval resulted in inconsistent values, retry a bit...
125
while ((r < c || c < u) && --retries >= 0) {
126
get_values_for(mdtype, &r, &c, &u);
127
}
128
if (c < u || r < c) { // still inconsistent.
129
// ... but not endlessly. If we don't get consistent values, correct them on the fly.
130
// The logic here is that we trust the used counter - its an atomic counter and whatever we see
131
// must have been the truth once - and from that we reconstruct a likely set of committed/reserved
132
// values.
133
metaspace::InternalStats::inc_num_inconsistent_stats();
134
if (c < u) {
135
c = align_up(u, Metaspace::commit_alignment());
136
}
137
if (r < c) {
138
r = align_up(c, Metaspace::reserve_alignment());
139
}
140
}
141
return MetaspaceStats(r, c, u);
142
}
143
144
MetaspaceCombinedStats MetaspaceUtils::get_combined_statistics() {
145
return MetaspaceCombinedStats(get_statistics(Metaspace::ClassType), get_statistics(Metaspace::NonClassType));
146
}
147
148
void MetaspaceUtils::print_metaspace_change(const MetaspaceCombinedStats& pre_meta_values) {
149
// Get values now:
150
const MetaspaceCombinedStats meta_values = get_combined_statistics();
151
152
// We print used and committed since these are the most useful at-a-glance vitals for Metaspace:
153
// - used tells you how much memory is actually used for metadata
154
// - committed tells you how much memory is committed for the purpose of metadata
155
// The difference between those two would be waste, which can have various forms (freelists,
156
// unused parts of committed chunks etc)
157
//
158
// Left out is reserved, since this is not as exciting as the first two values: for class space,
159
// it is a constant (to uninformed users, often confusingly large). For non-class space, it would
160
// be interesting since free chunks can be uncommitted, but for now it is left out.
161
162
if (Metaspace::using_class_space()) {
163
log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
164
HEAP_CHANGE_FORMAT" "
165
HEAP_CHANGE_FORMAT,
166
HEAP_CHANGE_FORMAT_ARGS("Metaspace",
167
pre_meta_values.used(),
168
pre_meta_values.committed(),
169
meta_values.used(),
170
meta_values.committed()),
171
HEAP_CHANGE_FORMAT_ARGS("NonClass",
172
pre_meta_values.non_class_used(),
173
pre_meta_values.non_class_committed(),
174
meta_values.non_class_used(),
175
meta_values.non_class_committed()),
176
HEAP_CHANGE_FORMAT_ARGS("Class",
177
pre_meta_values.class_used(),
178
pre_meta_values.class_committed(),
179
meta_values.class_used(),
180
meta_values.class_committed()));
181
} else {
182
log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
183
HEAP_CHANGE_FORMAT_ARGS("Metaspace",
184
pre_meta_values.used(),
185
pre_meta_values.committed(),
186
meta_values.used(),
187
meta_values.committed()));
188
}
189
}
190
191
// This will print out a basic metaspace usage report but
192
// unlike print_report() is guaranteed not to lock or to walk the CLDG.
193
void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
194
MetaspaceReporter::print_basic_report(out, scale);
195
}
196
197
// Prints a report about the current metaspace state.
198
// Optional parts can be enabled via flags.
199
// Function will walk the CLDG and will lock the expand lock; if that is not
200
// convenient, use print_basic_report() instead.
201
void MetaspaceUtils::print_report(outputStream* out, size_t scale) {
202
const int flags =
203
(int)MetaspaceReporter::Option::ShowLoaders |
204
(int)MetaspaceReporter::Option::BreakDownByChunkType |
205
(int)MetaspaceReporter::Option::ShowClasses;
206
MetaspaceReporter::print_report(out, scale, flags);
207
}
208
209
void MetaspaceUtils::print_on(outputStream* out) {
210
211
// Used from all GCs. It first prints out totals, then, separately, the class space portion.
212
MetaspaceCombinedStats stats = get_combined_statistics();
213
out->print_cr(" Metaspace "
214
"used " SIZE_FORMAT "K, "
215
"committed " SIZE_FORMAT "K, "
216
"reserved " SIZE_FORMAT "K",
217
stats.used()/K,
218
stats.committed()/K,
219
stats.reserved()/K);
220
221
if (Metaspace::using_class_space()) {
222
out->print_cr(" class space "
223
"used " SIZE_FORMAT "K, "
224
"committed " SIZE_FORMAT "K, "
225
"reserved " SIZE_FORMAT "K",
226
stats.class_space_stats().used()/K,
227
stats.class_space_stats().committed()/K,
228
stats.class_space_stats().reserved()/K);
229
}
230
}
231
232
#ifdef ASSERT
233
void MetaspaceUtils::verify() {
234
if (Metaspace::initialized()) {
235
236
// Verify non-class chunkmanager...
237
ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
238
cm->verify();
239
240
// ... and space list.
241
VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass();
242
vsl->verify();
243
244
if (Metaspace::using_class_space()) {
245
// If we use compressed class pointers, verify class chunkmanager...
246
cm = ChunkManager::chunkmanager_class();
247
cm->verify();
248
249
// ... and class spacelist.
250
vsl = VirtualSpaceList::vslist_class();
251
vsl->verify();
252
}
253
254
}
255
}
256
#endif
257
258
////////////////////////////////7
259
// MetaspaceGC methods
260
261
volatile size_t MetaspaceGC::_capacity_until_GC = 0;
262
uint MetaspaceGC::_shrink_factor = 0;
263
264
// VM_CollectForMetadataAllocation is the vm operation used to GC.
265
// Within the VM operation after the GC the attempt to allocate the metadata
266
// should succeed. If the GC did not free enough space for the metaspace
267
// allocation, the HWM is increased so that another virtualspace will be
268
// allocated for the metadata. With perm gen the increase in the perm
269
// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
270
// metaspace policy uses those as the small and large steps for the HWM.
271
//
272
// After the GC the compute_new_size() for MetaspaceGC is called to
273
// resize the capacity of the metaspaces. The current implementation
274
// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
275
// to resize the Java heap by some GC's. New flags can be implemented
276
// if really needed. MinMetaspaceFreeRatio is used to calculate how much
277
// free space is desirable in the metaspace capacity to decide how much
278
// to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
279
// free space is desirable in the metaspace capacity before decreasing
280
// the HWM.
281
282
// Calculate the amount to increase the high water mark (HWM).
283
// Increase by a minimum amount (MinMetaspaceExpansion) so that
284
// another expansion is not requested too soon. If that is not
285
// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
286
// If that is still not enough, expand by the size of the allocation
287
// plus some.
288
size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
289
size_t min_delta = MinMetaspaceExpansion;
290
size_t max_delta = MaxMetaspaceExpansion;
291
size_t delta = align_up(bytes, Metaspace::commit_alignment());
292
293
if (delta <= min_delta) {
294
delta = min_delta;
295
} else if (delta <= max_delta) {
296
// Don't want to hit the high water mark on the next
297
// allocation so make the delta greater than just enough
298
// for this allocation.
299
delta = max_delta;
300
} else {
301
// This allocation is large but the next ones are probably not
302
// so increase by the minimum.
303
delta = delta + min_delta;
304
}
305
306
assert_is_aligned(delta, Metaspace::commit_alignment());
307
308
return delta;
309
}
310
311
size_t MetaspaceGC::capacity_until_GC() {
312
size_t value = Atomic::load_acquire(&_capacity_until_GC);
313
assert(value >= MetaspaceSize, "Not initialized properly?");
314
return value;
315
}
316
317
// Try to increase the _capacity_until_GC limit counter by v bytes.
318
// Returns true if it succeeded. It may fail if either another thread
319
// concurrently increased the limit or the new limit would be larger
320
// than MaxMetaspaceSize.
321
// On success, optionally returns new and old metaspace capacity in
322
// new_cap_until_GC and old_cap_until_GC respectively.
323
// On error, optionally sets can_retry to indicate whether if there is
324
// actually enough space remaining to satisfy the request.
325
bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
326
assert_is_aligned(v, Metaspace::commit_alignment());
327
328
size_t old_capacity_until_GC = _capacity_until_GC;
329
size_t new_value = old_capacity_until_GC + v;
330
331
if (new_value < old_capacity_until_GC) {
332
// The addition wrapped around, set new_value to aligned max value.
333
new_value = align_down(max_uintx, Metaspace::reserve_alignment());
334
}
335
336
if (new_value > MaxMetaspaceSize) {
337
if (can_retry != NULL) {
338
*can_retry = false;
339
}
340
return false;
341
}
342
343
if (can_retry != NULL) {
344
*can_retry = true;
345
}
346
size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
347
348
if (old_capacity_until_GC != prev_value) {
349
return false;
350
}
351
352
if (new_cap_until_GC != NULL) {
353
*new_cap_until_GC = new_value;
354
}
355
if (old_cap_until_GC != NULL) {
356
*old_cap_until_GC = old_capacity_until_GC;
357
}
358
return true;
359
}
360
361
size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
362
assert_is_aligned(v, Metaspace::commit_alignment());
363
364
return Atomic::sub(&_capacity_until_GC, v);
365
}
366
367
void MetaspaceGC::initialize() {
368
// Set the high-water mark to MaxMetapaceSize during VM initializaton since
369
// we can't do a GC during initialization.
370
_capacity_until_GC = MaxMetaspaceSize;
371
}
372
373
void MetaspaceGC::post_initialize() {
374
// Reset the high-water mark once the VM initialization is done.
375
_capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
376
}
377
378
bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
379
// Check if the compressed class space is full.
380
if (is_class && Metaspace::using_class_space()) {
381
size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
382
if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
383
log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
384
(is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
385
return false;
386
}
387
}
388
389
// Check if the user has imposed a limit on the metaspace memory.
390
size_t committed_bytes = MetaspaceUtils::committed_bytes();
391
if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
392
log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
393
(is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
394
return false;
395
}
396
397
return true;
398
}
399
400
size_t MetaspaceGC::allowed_expansion() {
401
size_t committed_bytes = MetaspaceUtils::committed_bytes();
402
size_t capacity_until_gc = capacity_until_GC();
403
404
assert(capacity_until_gc >= committed_bytes,
405
"capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
406
capacity_until_gc, committed_bytes);
407
408
size_t left_until_max = MaxMetaspaceSize - committed_bytes;
409
size_t left_until_GC = capacity_until_gc - committed_bytes;
410
size_t left_to_commit = MIN2(left_until_GC, left_until_max);
411
log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
412
" (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
413
left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
414
415
return left_to_commit / BytesPerWord;
416
}
417
418
void MetaspaceGC::compute_new_size() {
419
assert(_shrink_factor <= 100, "invalid shrink factor");
420
uint current_shrink_factor = _shrink_factor;
421
_shrink_factor = 0;
422
423
// Using committed_bytes() for used_after_gc is an overestimation, since the
424
// chunk free lists are included in committed_bytes() and the memory in an
425
// un-fragmented chunk free list is available for future allocations.
426
// However, if the chunk free lists becomes fragmented, then the memory may
427
// not be available for future allocations and the memory is therefore "in use".
428
// Including the chunk free lists in the definition of "in use" is therefore
429
// necessary. Not including the chunk free lists can cause capacity_until_GC to
430
// shrink below committed_bytes() and this has caused serious bugs in the past.
431
const size_t used_after_gc = MetaspaceUtils::committed_bytes();
432
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
433
434
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
435
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
436
437
const double min_tmp = used_after_gc / maximum_used_percentage;
438
size_t minimum_desired_capacity =
439
(size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
440
// Don't shrink less than the initial generation size
441
minimum_desired_capacity = MAX2(minimum_desired_capacity,
442
MetaspaceSize);
443
444
log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
445
log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
446
minimum_free_percentage, maximum_used_percentage);
447
log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
448
449
size_t shrink_bytes = 0;
450
if (capacity_until_GC < minimum_desired_capacity) {
451
// If we have less capacity below the metaspace HWM, then
452
// increment the HWM.
453
size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
454
expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
455
// Don't expand unless it's significant
456
if (expand_bytes >= MinMetaspaceExpansion) {
457
size_t new_capacity_until_GC = 0;
458
bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
459
assert(succeeded, "Should always succesfully increment HWM when at safepoint");
460
461
Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
462
new_capacity_until_GC,
463
MetaspaceGCThresholdUpdater::ComputeNewSize);
464
log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB",
465
minimum_desired_capacity / (double) K,
466
expand_bytes / (double) K,
467
MinMetaspaceExpansion / (double) K,
468
new_capacity_until_GC / (double) K);
469
}
470
return;
471
}
472
473
// No expansion, now see if we want to shrink
474
// We would never want to shrink more than this
475
assert(capacity_until_GC >= minimum_desired_capacity,
476
SIZE_FORMAT " >= " SIZE_FORMAT,
477
capacity_until_GC, minimum_desired_capacity);
478
size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
479
480
// Should shrinking be considered?
481
if (MaxMetaspaceFreeRatio < 100) {
482
const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
483
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
484
const double max_tmp = used_after_gc / minimum_used_percentage;
485
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
486
maximum_desired_capacity = MAX2(maximum_desired_capacity,
487
MetaspaceSize);
488
log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
489
maximum_free_percentage, minimum_used_percentage);
490
log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
491
minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
492
493
assert(minimum_desired_capacity <= maximum_desired_capacity,
494
"sanity check");
495
496
if (capacity_until_GC > maximum_desired_capacity) {
497
// Capacity too large, compute shrinking size
498
shrink_bytes = capacity_until_GC - maximum_desired_capacity;
499
// We don't want shrink all the way back to initSize if people call
500
// System.gc(), because some programs do that between "phases" and then
501
// we'd just have to grow the heap up again for the next phase. So we
502
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
503
// on the third call, and 100% by the fourth call. But if we recompute
504
// size without shrinking, it goes back to 0%.
505
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
506
507
shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
508
509
assert(shrink_bytes <= max_shrink_bytes,
510
"invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
511
shrink_bytes, max_shrink_bytes);
512
if (current_shrink_factor == 0) {
513
_shrink_factor = 10;
514
} else {
515
_shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
516
}
517
log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK",
518
MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
519
log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK",
520
shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
521
}
522
}
523
524
// Don't shrink unless it's significant
525
if (shrink_bytes >= MinMetaspaceExpansion &&
526
((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
527
size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
528
Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
529
new_capacity_until_GC,
530
MetaspaceGCThresholdUpdater::ComputeNewSize);
531
}
532
}
533
534
////// Metaspace methods /////
535
536
const MetaspaceTracer* Metaspace::_tracer = NULL;
537
538
bool Metaspace::initialized() {
539
return metaspace::MetaspaceContext::context_nonclass() != NULL
540
LP64_ONLY(&& (using_class_space() ? Metaspace::class_space_is_initialized() : true));
541
}
542
543
#ifdef _LP64
544
545
void Metaspace::print_compressed_class_space(outputStream* st) {
546
if (VirtualSpaceList::vslist_class() != NULL) {
547
MetaWord* base = VirtualSpaceList::vslist_class()->base_of_first_node();
548
size_t size = VirtualSpaceList::vslist_class()->word_size_of_first_node();
549
MetaWord* top = base + size;
550
st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", reserved size: " SIZE_FORMAT,
551
p2i(base), p2i(top), (top - base) * BytesPerWord);
552
st->cr();
553
}
554
}
555
556
// Given a prereserved space, use that to set up the compressed class space list.
557
void Metaspace::initialize_class_space(ReservedSpace rs) {
558
assert(rs.size() >= CompressedClassSpaceSize,
559
SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
560
assert(using_class_space(), "Must be using class space");
561
562
assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT,
563
rs.size(), CompressedClassSpaceSize);
564
assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
565
is_aligned(rs.size(), Metaspace::reserve_alignment()),
566
"wrong alignment");
567
568
MetaspaceContext::initialize_class_space_context(rs);
569
570
// This does currently not work because rs may be the result of a split
571
// operation and NMT seems not to be able to handle splits.
572
// Will be fixed with JDK-8243535.
573
// MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
574
575
}
576
577
// Returns true if class space has been setup (initialize_class_space).
578
bool Metaspace::class_space_is_initialized() {
579
return MetaspaceContext::context_class() != NULL;
580
}
581
582
// Reserve a range of memory at an address suitable for en/decoding narrow
583
// Klass pointers (see: CompressedClassPointers::is_valid_base()).
584
// The returned address shall both be suitable as a compressed class pointers
585
// base, and aligned to Metaspace::reserve_alignment (which is equal to or a
586
// multiple of allocation granularity).
587
// On error, returns an unreserved space.
588
ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size) {
589
590
#if defined(AARCH64) || defined(PPC64)
591
const size_t alignment = Metaspace::reserve_alignment();
592
593
// AArch64: Try to align metaspace so that we can decode a compressed
594
// klass with a single MOVK instruction. We can do this iff the
595
// compressed class base is a multiple of 4G.
596
// Additionally, above 32G, ensure the lower LogKlassAlignmentInBytes bits
597
// of the upper 32-bits of the address are zero so we can handle a shift
598
// when decoding.
599
600
// PPC64: smaller heaps up to 2g will be mapped just below 4g. Then the
601
// attempt to place the compressed class space just after the heap fails on
602
// Linux 4.1.42 and higher because the launcher is loaded at 4g
603
// (ELF_ET_DYN_BASE). In that case we reach here and search the address space
604
// below 32g to get a zerobased CCS. For simplicity we reuse the search
605
// strategy for AARCH64.
606
607
static const struct {
608
address from;
609
address to;
610
size_t increment;
611
} search_ranges[] = {
612
{ (address)(4*G), (address)(32*G), 4*G, },
613
{ (address)(32*G), (address)(1024*G), (4 << LogKlassAlignmentInBytes) * G },
614
{ NULL, NULL, 0 }
615
};
616
617
for (int i = 0; search_ranges[i].from != NULL; i ++) {
618
address a = search_ranges[i].from;
619
assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
620
while (a < search_ranges[i].to) {
621
ReservedSpace rs(size, Metaspace::reserve_alignment(),
622
os::vm_page_size(), (char*)a);
623
if (rs.is_reserved()) {
624
assert(a == (address)rs.base(), "Sanity");
625
return rs;
626
}
627
a += search_ranges[i].increment;
628
}
629
}
630
#endif // defined(AARCH64) || defined(PPC64)
631
632
#ifdef AARCH64
633
// Note: on AARCH64, if the code above does not find any good placement, we
634
// have no recourse. We return an empty space and the VM will exit.
635
return ReservedSpace();
636
#else
637
// Default implementation: Just reserve anywhere.
638
return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)NULL);
639
#endif // AARCH64
640
}
641
642
#endif // _LP64
643
644
size_t Metaspace::reserve_alignment_words() {
645
return metaspace::Settings::virtual_space_node_reserve_alignment_words();
646
}
647
648
size_t Metaspace::commit_alignment_words() {
649
return metaspace::Settings::commit_granule_words();
650
}
651
652
void Metaspace::ergo_initialize() {
653
654
// Must happen before using any setting from Settings::---
655
metaspace::Settings::ergo_initialize();
656
657
// MaxMetaspaceSize and CompressedClassSpaceSize:
658
//
659
// MaxMetaspaceSize is the maximum size, in bytes, of memory we are allowed
660
// to commit for the Metaspace.
661
// It is just a number; a limit we compare against before committing. It
662
// does not have to be aligned to anything.
663
// It gets used as compare value before attempting to increase the metaspace
664
// commit charge. It defaults to max_uintx (unlimited).
665
//
666
// CompressedClassSpaceSize is the size, in bytes, of the address range we
667
// pre-reserve for the compressed class space (if we use class space).
668
// This size has to be aligned to the metaspace reserve alignment (to the
669
// size of a root chunk). It gets aligned up from whatever value the caller
670
// gave us to the next multiple of root chunk size.
671
//
672
// Note: Strictly speaking MaxMetaspaceSize and CompressedClassSpaceSize have
673
// very little to do with each other. The notion often encountered:
674
// MaxMetaspaceSize = CompressedClassSpaceSize + <non-class metadata size>
675
// is subtly wrong: MaxMetaspaceSize can besmaller than CompressedClassSpaceSize,
676
// in which case we just would not be able to fully commit the class space range.
677
//
678
// We still adjust CompressedClassSpaceSize to reasonable limits, mainly to
679
// save on reserved space, and to make ergnonomics less confusing.
680
681
MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment());
682
683
if (UseCompressedClassPointers) {
684
// Let CCS size not be larger than 80% of MaxMetaspaceSize. Note that is
685
// grossly over-dimensioned for most usage scenarios; typical ratio of
686
// class space : non class space usage is about 1:6. With many small classes,
687
// it can get as low as 1:2. It is not a big deal though since ccs is only
688
// reserved and will be committed on demand only.
689
size_t max_ccs_size = MaxMetaspaceSize * 0.8;
690
size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size);
691
692
// CCS must be aligned to root chunk size, and be at least the size of one
693
// root chunk.
694
adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment());
695
adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment());
696
697
// Note: re-adjusting may have us left with a CompressedClassSpaceSize
698
// larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize.
699
// Lets just live with that, its not a big deal.
700
701
if (adjusted_ccs_size != CompressedClassSpaceSize) {
702
FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size);
703
log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".",
704
CompressedClassSpaceSize);
705
}
706
}
707
708
// Set MetaspaceSize, MinMetaspaceExpansion and MaxMetaspaceExpansion
709
if (MetaspaceSize > MaxMetaspaceSize) {
710
MetaspaceSize = MaxMetaspaceSize;
711
}
712
713
MetaspaceSize = align_down_bounded(MetaspaceSize, commit_alignment());
714
715
assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
716
717
MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, commit_alignment());
718
MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, commit_alignment());
719
720
}
721
722
void Metaspace::global_initialize() {
723
MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed?
724
725
metaspace::ChunkHeaderPool::initialize();
726
727
if (DumpSharedSpaces) {
728
assert(!UseSharedSpaces, "sanity");
729
MetaspaceShared::initialize_for_static_dump();
730
}
731
732
// If UseCompressedClassPointers=1, we have two cases:
733
// a) if CDS is active (runtime, Xshare=on), it will create the class space
734
// for us, initialize it and set up CompressedKlassPointers encoding.
735
// Class space will be reserved above the mapped archives.
736
// b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump),
737
// we will create the class space on our own. It will be placed above the java heap,
738
// since we assume it has been placed in low
739
// address regions. We may rethink this (see JDK-8244943). Failing that,
740
// it will be placed anywhere.
741
742
#if INCLUDE_CDS
743
// case (a)
744
if (UseSharedSpaces) {
745
MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
746
// If any of the archived space fails to map, UseSharedSpaces
747
// is reset to false.
748
}
749
750
if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
751
vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
752
}
753
#endif // INCLUDE_CDS
754
755
#ifdef _LP64
756
757
if (using_class_space() && !class_space_is_initialized()) {
758
assert(!UseSharedSpaces, "CDS archive is not mapped at this point");
759
760
// case (b)
761
ReservedSpace rs;
762
763
// If UseCompressedOops=1 and the java heap has been placed in coops-friendly
764
// territory, i.e. its base is under 32G, then we attempt to place ccs
765
// right above the java heap.
766
// Otherwise the lower 32G are still free. We try to place ccs at the lowest
767
// allowed mapping address.
768
address base = (UseCompressedOops && (uint64_t)CompressedOops::base() < OopEncodingHeapMax) ?
769
CompressedOops::end() : (address)HeapBaseMinAddress;
770
base = align_up(base, Metaspace::reserve_alignment());
771
772
const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
773
if (base != NULL) {
774
if (CompressedKlassPointers::is_valid_base(base)) {
775
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
776
os::vm_page_size(), (char*)base);
777
}
778
}
779
780
// ...failing that, reserve anywhere, but let platform do optimized placement:
781
if (!rs.is_reserved()) {
782
rs = Metaspace::reserve_address_space_for_compressed_classes(size);
783
}
784
785
// ...failing that, give up.
786
if (!rs.is_reserved()) {
787
vm_exit_during_initialization(
788
err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes",
789
CompressedClassSpaceSize));
790
}
791
792
// Initialize space
793
Metaspace::initialize_class_space(rs);
794
795
// Set up compressed class pointer encoding.
796
CompressedKlassPointers::initialize((address)rs.base(), rs.size());
797
}
798
799
#endif
800
801
// Initialize non-class virtual space list, and its chunk manager:
802
MetaspaceContext::initialize_nonclass_space_context();
803
804
_tracer = new MetaspaceTracer();
805
806
// We must prevent the very first address of the ccs from being used to store
807
// metadata, since that address would translate to a narrow pointer of 0, and the
808
// VM does not distinguish between "narrow 0 as in NULL" and "narrow 0 as in start
809
// of ccs".
810
// Before Elastic Metaspace that did not happen due to the fact that every Metachunk
811
// had a header and therefore could not allocate anything at offset 0.
812
#ifdef _LP64
813
if (using_class_space()) {
814
// The simplest way to fix this is to allocate a tiny dummy chunk right at the
815
// start of ccs and do not use it for anything.
816
MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL);
817
}
818
#endif
819
820
#ifdef _LP64
821
if (UseCompressedClassPointers) {
822
// Note: "cds" would be a better fit but keep this for backward compatibility.
823
LogTarget(Info, gc, metaspace) lt;
824
if (lt.is_enabled()) {
825
ResourceMark rm;
826
LogStream ls(lt);
827
CDS_ONLY(MetaspaceShared::print_on(&ls);)
828
Metaspace::print_compressed_class_space(&ls);
829
CompressedKlassPointers::print_mode(&ls);
830
}
831
}
832
#endif
833
834
}
835
836
void Metaspace::post_initialize() {
837
MetaspaceGC::post_initialize();
838
}
839
840
size_t Metaspace::max_allocation_word_size() {
841
const size_t max_overhead_words = metaspace::get_raw_word_size_for_requested_word_size(1);
842
return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE - max_overhead_words;
843
}
844
845
// This version of Metaspace::allocate does not throw OOM but simply returns NULL, and
846
// is suitable for calling from non-Java threads.
847
// Callers are responsible for checking null.
848
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
849
MetaspaceObj::Type type) {
850
assert(word_size <= Metaspace::max_allocation_word_size(),
851
"allocation size too large (" SIZE_FORMAT ")", word_size);
852
853
assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
854
"ClassLoaderData::the_null_class_loader_data() should have been used.");
855
856
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
857
858
// Try to allocate metadata.
859
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
860
861
if (result != NULL) {
862
// Zero initialize.
863
Copy::fill_to_words((HeapWord*)result, word_size, 0);
864
865
log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
866
}
867
868
return result;
869
}
870
871
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
872
MetaspaceObj::Type type, TRAPS) {
873
874
if (HAS_PENDING_EXCEPTION) {
875
assert(false, "Should not allocate with exception pending");
876
return NULL; // caller does a CHECK_NULL too
877
}
878
879
MetaWord* result = allocate(loader_data, word_size, type);
880
881
if (result == NULL) {
882
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
883
tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
884
885
// Allocation failed.
886
if (is_init_completed()) {
887
// Only start a GC if the bootstrapping has completed.
888
// Try to clean out some heap memory and retry. This can prevent premature
889
// expansion of the metaspace.
890
result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
891
}
892
893
if (result == NULL) {
894
report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
895
assert(HAS_PENDING_EXCEPTION, "sanity");
896
return NULL;
897
}
898
899
// Zero initialize.
900
Copy::fill_to_words((HeapWord*)result, word_size, 0);
901
902
log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result));
903
}
904
905
return result;
906
}
907
908
void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
909
tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
910
911
// If result is still null, we are out of memory.
912
Log(gc, metaspace, freelist, oom) log;
913
if (log.is_info()) {
914
log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
915
is_class_space_allocation(mdtype) ? "class" : "data", word_size);
916
ResourceMark rm;
917
if (log.is_debug()) {
918
if (loader_data->metaspace_or_null() != NULL) {
919
LogStream ls(log.debug());
920
loader_data->print_value_on(&ls);
921
}
922
}
923
LogStream ls(log.info());
924
// In case of an OOM, log out a short but still useful report.
925
MetaspaceUtils::print_basic_report(&ls, 0);
926
}
927
928
bool out_of_compressed_class_space = false;
929
if (is_class_space_allocation(mdtype)) {
930
ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
931
out_of_compressed_class_space =
932
MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
933
align_up(word_size * BytesPerWord, 4 * M) >
934
CompressedClassSpaceSize;
935
}
936
937
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
938
const char* space_string = out_of_compressed_class_space ?
939
"Compressed class space" : "Metaspace";
940
941
report_java_out_of_memory(space_string);
942
943
if (JvmtiExport::should_post_resource_exhausted()) {
944
JvmtiExport::post_resource_exhausted(
945
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
946
space_string);
947
}
948
949
if (!is_init_completed()) {
950
vm_exit_during_initialization("OutOfMemoryError", space_string);
951
}
952
953
if (out_of_compressed_class_space) {
954
THROW_OOP(Universe::out_of_memory_error_class_metaspace());
955
} else {
956
THROW_OOP(Universe::out_of_memory_error_metaspace());
957
}
958
}
959
960
const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
961
switch (mdtype) {
962
case Metaspace::ClassType: return "Class";
963
case Metaspace::NonClassType: return "Metadata";
964
default:
965
assert(false, "Got bad mdtype: %d", (int) mdtype);
966
return NULL;
967
}
968
}
969
970
void Metaspace::purge() {
971
ChunkManager* cm = ChunkManager::chunkmanager_nonclass();
972
if (cm != NULL) {
973
cm->purge();
974
}
975
if (using_class_space()) {
976
cm = ChunkManager::chunkmanager_class();
977
if (cm != NULL) {
978
cm->purge();
979
}
980
}
981
}
982
983
bool Metaspace::contains(const void* ptr) {
984
if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
985
return true;
986
}
987
return contains_non_shared(ptr);
988
}
989
990
bool Metaspace::contains_non_shared(const void* ptr) {
991
if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) {
992
return true;
993
}
994
995
return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr);
996
}
997
998