Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/memory/metaspace/chunkManager.cpp
40957 views
1
/*
2
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "logging/log.hpp"
28
#include "logging/logStream.hpp"
29
#include "memory/metaspace/chunkManager.hpp"
30
#include "memory/metaspace/internalStats.hpp"
31
#include "memory/metaspace/metachunk.hpp"
32
#include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp"
33
#include "memory/metaspace/metaspaceCommon.hpp"
34
#include "memory/metaspace/metaspaceContext.hpp"
35
#include "memory/metaspace/metaspaceSettings.hpp"
36
#include "memory/metaspace/metaspaceStatistics.hpp"
37
#include "memory/metaspace/virtualSpaceList.hpp"
38
#include "memory/metaspace/virtualSpaceNode.hpp"
39
#include "runtime/mutexLocker.hpp"
40
#include "utilities/debug.hpp"
41
#include "utilities/globalDefinitions.hpp"
42
43
namespace metaspace {
44
45
#define LOGFMT "ChkMgr @" PTR_FORMAT " (%s)"
46
#define LOGFMT_ARGS p2i(this), this->_name
47
48
// Return a single chunk to the freelist and adjust accounting. No merge is attempted.
49
void ChunkManager::return_chunk_simple_locked(Metachunk* c) {
50
assert_lock_strong(Metaspace_lock);
51
DEBUG_ONLY(c->verify());
52
_chunks.add(c);
53
c->reset_used_words();
54
// Tracing
55
log_debug(metaspace)("ChunkManager %s: returned chunk " METACHUNK_FORMAT ".",
56
_name, METACHUNK_FORMAT_ARGS(c));
57
}
58
59
// Creates a chunk manager with a given name (which is for debug purposes only)
60
// and an associated space list which will be used to request new chunks from
61
// (see get_chunk())
62
ChunkManager::ChunkManager(const char* name, VirtualSpaceList* space_list) :
63
_vslist(space_list),
64
_name(name),
65
_chunks()
66
{
67
}
68
69
// Given a chunk, split it into a target chunk of a smaller size (higher target level)
70
// and at least one, possible several splinter chunks.
71
// The original chunk must be outside of the freelist and its state must be free.
72
// The splinter chunks are added to the freelist.
73
// The resulting target chunk will be located at the same address as the original
74
// chunk, but it will of course be smaller (of a higher level).
75
// The committed areas within the original chunk carry over to the resulting
76
// chunks.
77
void ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chunklevel_t target_level) {
78
assert_lock_strong(Metaspace_lock);
79
assert(c->is_free(), "chunk to be split must be free.");
80
assert(c->level() < target_level, "Target level must be higher than current level.");
81
assert(c->prev() == NULL && c->next() == NULL, "Chunk must be outside of any list.");
82
83
DEBUG_ONLY(chunklevel::check_valid_level(target_level);)
84
DEBUG_ONLY(c->verify();)
85
86
UL2(debug, "splitting chunk " METACHUNK_FORMAT " to " CHKLVL_FORMAT ".",
87
METACHUNK_FORMAT_ARGS(c), target_level);
88
89
DEBUG_ONLY(size_t committed_words_before = c->committed_words();)
90
91
const chunklevel_t orig_level = c->level();
92
c->vsnode()->split(target_level, c, &_chunks);
93
94
// Splitting should never fail.
95
assert(c->level() == target_level, "Sanity");
96
97
// The size of the committed portion should not change (subject to the reduced chunk size of course)
98
#ifdef ASSERT
99
if (committed_words_before > c->word_size()) {
100
assert(c->is_fully_committed(), "Sanity");
101
} else {
102
assert(c->committed_words() == committed_words_before, "Sanity");
103
}
104
c->verify();
105
verify_locked();
106
SOMETIMES(c->vsnode()->verify_locked();)
107
#endif
108
InternalStats::inc_num_chunk_splits();
109
}
110
111
// On success, returns a chunk of level of <preferred_level>, but at most <max_level>.
112
// The first first <min_committed_words> of the chunk are guaranteed to be committed.
113
// On error, will return NULL.
114
//
115
// This function may fail for two reasons:
116
// - Either we are unable to reserve space for a new chunk (if the underlying VirtualSpaceList
117
// is non-expandable but needs expanding - aka out of compressed class space).
118
// - Or, if the necessary space cannot be committed because we hit a commit limit.
119
// This may be either the GC threshold or MaxMetaspaceSize.
120
Metachunk* ChunkManager::get_chunk(chunklevel_t preferred_level, chunklevel_t max_level, size_t min_committed_words) {
121
assert(preferred_level <= max_level, "Sanity");
122
assert(chunklevel::level_fitting_word_size(min_committed_words) >= max_level, "Sanity");
123
124
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
125
126
DEBUG_ONLY(verify_locked();)
127
DEBUG_ONLY(chunklevel::check_valid_level(max_level);)
128
DEBUG_ONLY(chunklevel::check_valid_level(preferred_level);)
129
130
UL2(debug, "requested chunk: pref_level: " CHKLVL_FORMAT
131
", max_level: " CHKLVL_FORMAT ", min committed size: " SIZE_FORMAT ".",
132
preferred_level, max_level, min_committed_words);
133
134
// First, optimistically look for a chunk which is already committed far enough to hold min_word_size.
135
136
// 1) Search best or smaller committed chunks (first attempt):
137
// Start at the preferred chunk size and work your way down (level up).
138
// But for now, only consider chunks larger than a certain threshold -
139
// this is to prevent large loaders (eg boot) from unnecessarily gobbling up
140
// all the tiny splinter chunks lambdas leave around.
141
Metachunk* c = NULL;
142
c = _chunks.search_chunk_ascending(preferred_level, MIN2((chunklevel_t)(preferred_level + 2), max_level), min_committed_words);
143
144
// 2) Search larger committed chunks:
145
// If that did not yield anything, look at larger chunks, which may be committed. We would have to split
146
// them first, of course.
147
if (c == NULL) {
148
c = _chunks.search_chunk_descending(preferred_level, min_committed_words);
149
}
150
// 3) Search best or smaller committed chunks (second attempt):
151
// Repeat (1) but now consider even the tiniest chunks as long as they are large enough to hold the
152
// committed min size.
153
if (c == NULL) {
154
c = _chunks.search_chunk_ascending(preferred_level, max_level, min_committed_words);
155
}
156
// if we did not get anything yet, there are no free chunks commmitted enough. Repeat search but look for uncommitted chunks too:
157
// 4) Search best or smaller chunks, can be uncommitted:
158
if (c == NULL) {
159
c = _chunks.search_chunk_ascending(preferred_level, max_level, 0);
160
}
161
// 5) Search a larger uncommitted chunk:
162
if (c == NULL) {
163
c = _chunks.search_chunk_descending(preferred_level, 0);
164
}
165
166
if (c != NULL) {
167
UL(trace, "taken from freelist.");
168
}
169
170
// Failing all that, allocate a new root chunk from the connected virtual space.
171
// This may fail if the underlying vslist cannot be expanded (e.g. compressed class space)
172
if (c == NULL) {
173
c = _vslist->allocate_root_chunk();
174
if (c == NULL) {
175
UL(info, "failed to get new root chunk.");
176
} else {
177
assert(c->level() == chunklevel::ROOT_CHUNK_LEVEL, "root chunk expected");
178
UL(debug, "allocated new root chunk.");
179
}
180
}
181
if (c == NULL) {
182
// If we end up here, we found no match in the freelists and were unable to get a new
183
// root chunk (so we used up all address space, e.g. out of CompressedClassSpace).
184
UL2(info, "failed to get chunk (preferred level: " CHKLVL_FORMAT
185
", max level " CHKLVL_FORMAT ".", preferred_level, max_level);
186
c = NULL;
187
}
188
if (c != NULL) {
189
// Now we have a chunk.
190
// It may be larger than what the caller wanted, so we may want to split it. This should
191
// always work.
192
if (c->level() < preferred_level) {
193
split_chunk_and_add_splinters(c, preferred_level);
194
assert(c->level() == preferred_level, "split failed?");
195
}
196
// Attempt to commit the chunk (depending on settings, we either fully commit it or just
197
// commit enough to get the caller going). That may fail if we hit a commit limit. In
198
// that case put the chunk back to the freelist (re-merging it with its neighbors if we
199
// did split it) and return NULL.
200
const size_t to_commit = Settings::new_chunks_are_fully_committed() ? c->word_size() : min_committed_words;
201
if (c->committed_words() < to_commit) {
202
if (c->ensure_committed_locked(to_commit) == false) {
203
UL2(info, "failed to commit " SIZE_FORMAT " words on chunk " METACHUNK_FORMAT ".",
204
to_commit, METACHUNK_FORMAT_ARGS(c));
205
return_chunk_locked(c);
206
c = NULL;
207
}
208
}
209
if (c != NULL) {
210
// Still here? We have now a good chunk, all is well.
211
assert(c->committed_words() >= min_committed_words, "Sanity");
212
213
// Any chunk returned from ChunkManager shall be marked as in use.
214
c->set_in_use();
215
216
UL2(debug, "handing out chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
217
218
InternalStats::inc_num_chunks_taken_from_freelist();
219
220
SOMETIMES(c->vsnode()->verify_locked();)
221
}
222
}
223
224
DEBUG_ONLY(verify_locked();)
225
return c;
226
}
227
228
// Return a single chunk to the ChunkManager and adjust accounting. May merge chunk
229
// with neighbors.
230
// As a side effect this removes the chunk from whatever list it has been in previously.
231
// Happens after a Classloader was unloaded and releases its metaspace chunks.
232
// !! Note: this may invalidate the chunk. Do not access the chunk after
233
// this function returns !!
234
void ChunkManager::return_chunk(Metachunk* c) {
235
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
236
return_chunk_locked(c);
237
}
238
239
// See return_chunk().
240
void ChunkManager::return_chunk_locked(Metachunk* c) {
241
assert_lock_strong(Metaspace_lock);
242
UL2(debug, ": returning chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
243
DEBUG_ONLY(c->verify();)
244
assert(contains_chunk(c) == false, "A chunk to be added to the freelist must not be in the freelist already.");
245
assert(c->is_in_use() || c->is_free(), "Unexpected chunk state");
246
assert(!c->in_list(), "Remove from list first");
247
248
c->set_free();
249
c->reset_used_words();
250
const chunklevel_t orig_lvl = c->level();
251
252
Metachunk* merged = NULL;
253
if (!c->is_root_chunk()) {
254
// Only attempt merging if we are not of the lowest level already.
255
merged = c->vsnode()->merge(c, &_chunks);
256
}
257
258
if (merged != NULL) {
259
InternalStats::inc_num_chunk_merges();
260
DEBUG_ONLY(merged->verify());
261
// We did merge chunks and now have a bigger chunk.
262
assert(merged->level() < orig_lvl, "Sanity");
263
UL2(debug, "merged into chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(merged));
264
c = merged;
265
}
266
267
if (Settings::uncommit_free_chunks() &&
268
c->word_size() >= Settings::commit_granule_words()) {
269
UL2(debug, "uncommitting free chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
270
c->uncommit_locked();
271
}
272
273
return_chunk_simple_locked(c);
274
DEBUG_ONLY(verify_locked();)
275
SOMETIMES(c->vsnode()->verify_locked();)
276
InternalStats::inc_num_chunks_returned_to_freelist();
277
}
278
279
// Given a chunk c, whose state must be "in-use" and must not be a root chunk, attempt to
280
// enlarge it in place by claiming its trailing buddy.
281
//
282
// This will only work if c is the leader of the buddy pair and the trailing buddy is free.
283
//
284
// If successful, the follower chunk will be removed from the freelists, the leader chunk c will
285
// double in size (level decreased by one).
286
//
287
// On success, true is returned, false otherwise.
288
bool ChunkManager::attempt_enlarge_chunk(Metachunk* c) {
289
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
290
return c->vsnode()->attempt_enlarge_chunk(c, &_chunks);
291
}
292
293
static void print_word_size_delta(outputStream* st, size_t word_size_1, size_t word_size_2) {
294
if (word_size_1 == word_size_2) {
295
print_scaled_words(st, word_size_1);
296
st->print (" (no change)");
297
} else {
298
print_scaled_words(st, word_size_1);
299
st->print("->");
300
print_scaled_words(st, word_size_2);
301
st->print(" (");
302
if (word_size_2 <= word_size_1) {
303
st->print("-");
304
print_scaled_words(st, word_size_1 - word_size_2);
305
} else {
306
st->print("+");
307
print_scaled_words(st, word_size_2 - word_size_1);
308
}
309
st->print(")");
310
}
311
}
312
313
void ChunkManager::purge() {
314
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
315
UL(info, ": reclaiming memory...");
316
317
const size_t reserved_before = _vslist->reserved_words();
318
const size_t committed_before = _vslist->committed_words();
319
int num_nodes_purged = 0;
320
321
// We purge to return unused memory to the Operating System. We do this in
322
// two independent steps.
323
324
// 1) We purge the virtual space list: any memory mappings which are
325
// completely deserted can be potentially unmapped. We iterate over the list
326
// of mappings (VirtualSpaceList::purge) and delete every node whose memory
327
// only contains free chunks. Deleting that node includes unmapping its memory,
328
// so all chunk vanish automatically.
329
// Of course we need to remove the chunk headers of those vanished chunks from
330
// the ChunkManager freelist.
331
num_nodes_purged = _vslist->purge(&_chunks);
332
InternalStats::inc_num_purges();
333
334
// 2) Since (1) is rather ineffective - it is rare that a whole node only contains
335
// free chunks - we now iterate over all remaining free chunks and
336
// and uncommit those which can be uncommitted (>= commit granule size).
337
if (Settings::uncommit_free_chunks()) {
338
const chunklevel_t max_level =
339
chunklevel::level_fitting_word_size(Settings::commit_granule_words());
340
for (chunklevel_t l = chunklevel::LOWEST_CHUNK_LEVEL;
341
l <= max_level;
342
l++) {
343
// Since we uncommit all chunks at this level, we do not break the "committed chunks are
344
// at the front of the list" condition.
345
for (Metachunk* c = _chunks.first_at_level(l); c != NULL; c = c->next()) {
346
c->uncommit_locked();
347
}
348
}
349
}
350
351
const size_t reserved_after = _vslist->reserved_words();
352
const size_t committed_after = _vslist->committed_words();
353
354
// Print a nice report.
355
if (reserved_after == reserved_before && committed_after == committed_before) {
356
UL(info, "nothing reclaimed.");
357
} else {
358
LogTarget(Info, metaspace) lt;
359
if (lt.is_enabled()) {
360
LogStream ls(lt);
361
ls.print_cr(LOGFMT ": finished reclaiming memory: ", LOGFMT_ARGS);
362
ls.print("reserved: ");
363
print_word_size_delta(&ls, reserved_before, reserved_after);
364
ls.cr();
365
ls.print("committed: ");
366
print_word_size_delta(&ls, committed_before, committed_after);
367
ls.cr();
368
ls.print_cr("full nodes purged: %d", num_nodes_purged);
369
}
370
}
371
DEBUG_ONLY(_vslist->verify_locked());
372
DEBUG_ONLY(verify_locked());
373
}
374
375
// Convenience methods to return the global class-space chunkmanager
376
// and non-class chunkmanager, respectively.
377
ChunkManager* ChunkManager::chunkmanager_class() {
378
return MetaspaceContext::context_class() == NULL ? NULL : MetaspaceContext::context_class()->cm();
379
}
380
381
ChunkManager* ChunkManager::chunkmanager_nonclass() {
382
return MetaspaceContext::context_nonclass() == NULL ? NULL : MetaspaceContext::context_nonclass()->cm();
383
}
384
385
// Calculates the total number of committed words over all chunks. Walks chunks.
386
size_t ChunkManager::calc_committed_word_size() const {
387
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
388
return calc_committed_word_size_locked();
389
}
390
391
size_t ChunkManager::calc_committed_word_size_locked() const {
392
assert_lock_strong(Metaspace_lock);
393
return _chunks.calc_committed_word_size();
394
}
395
396
// Update statistics.
397
void ChunkManager::add_to_statistics(ChunkManagerStats* out) const {
398
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
399
for (chunklevel_t l = chunklevel::ROOT_CHUNK_LEVEL; l <= chunklevel::HIGHEST_CHUNK_LEVEL; l++) {
400
out->_num_chunks[l] += _chunks.num_chunks_at_level(l);
401
out->_committed_word_size[l] += _chunks.calc_committed_word_size_at_level(l);
402
}
403
DEBUG_ONLY(out->verify();)
404
}
405
406
#ifdef ASSERT
407
408
void ChunkManager::verify() const {
409
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
410
verify_locked();
411
}
412
413
void ChunkManager::verify_locked() const {
414
assert_lock_strong(Metaspace_lock);
415
assert(_vslist != NULL, "No vslist");
416
_chunks.verify();
417
}
418
419
bool ChunkManager::contains_chunk(Metachunk* c) const {
420
return _chunks.contains(c);
421
}
422
423
#endif // ASSERT
424
425
void ChunkManager::print_on(outputStream* st) const {
426
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
427
print_on_locked(st);
428
}
429
430
void ChunkManager::print_on_locked(outputStream* st) const {
431
assert_lock_strong(Metaspace_lock);
432
st->print_cr("cm %s: %d chunks, total word size: " SIZE_FORMAT ".", _name,
433
total_num_chunks(), total_word_size());
434
_chunks.print_on(st);
435
}
436
437
} // namespace metaspace
438
439