Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/g1/g1Allocator.cpp
40960 views
1
/*
2
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/g1/g1Allocator.inline.hpp"
27
#include "gc/g1/g1AllocRegion.inline.hpp"
28
#include "gc/g1/g1EvacStats.inline.hpp"
29
#include "gc/g1/g1EvacuationInfo.hpp"
30
#include "gc/g1/g1CollectedHeap.inline.hpp"
31
#include "gc/g1/g1NUMA.hpp"
32
#include "gc/g1/g1Policy.hpp"
33
#include "gc/g1/heapRegion.inline.hpp"
34
#include "gc/g1/heapRegionSet.inline.hpp"
35
#include "gc/g1/heapRegionType.hpp"
36
#include "gc/shared/tlab_globals.hpp"
37
#include "utilities/align.hpp"
38
39
G1Allocator::G1Allocator(G1CollectedHeap* heap) :
40
_g1h(heap),
41
_numa(heap->numa()),
42
_survivor_is_full(false),
43
_old_is_full(false),
44
_num_alloc_regions(_numa->num_active_nodes()),
45
_mutator_alloc_regions(NULL),
46
_survivor_gc_alloc_regions(NULL),
47
_old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
48
_retained_old_gc_alloc_region(NULL) {
49
50
_mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC);
51
_survivor_gc_alloc_regions = NEW_C_HEAP_ARRAY(SurvivorGCAllocRegion, _num_alloc_regions, mtGC);
52
G1EvacStats* stat = heap->alloc_buffer_stats(G1HeapRegionAttr::Young);
53
54
for (uint i = 0; i < _num_alloc_regions; i++) {
55
::new(_mutator_alloc_regions + i) MutatorAllocRegion(i);
56
::new(_survivor_gc_alloc_regions + i) SurvivorGCAllocRegion(stat, i);
57
}
58
}
59
60
G1Allocator::~G1Allocator() {
61
for (uint i = 0; i < _num_alloc_regions; i++) {
62
_mutator_alloc_regions[i].~MutatorAllocRegion();
63
_survivor_gc_alloc_regions[i].~SurvivorGCAllocRegion();
64
}
65
FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions);
66
FREE_C_HEAP_ARRAY(SurvivorGCAllocRegion, _survivor_gc_alloc_regions);
67
}
68
69
#ifdef ASSERT
70
bool G1Allocator::has_mutator_alloc_region() {
71
uint node_index = current_node_index();
72
return mutator_alloc_region(node_index)->get() != NULL;
73
}
74
#endif
75
76
void G1Allocator::init_mutator_alloc_regions() {
77
for (uint i = 0; i < _num_alloc_regions; i++) {
78
assert(mutator_alloc_region(i)->get() == NULL, "pre-condition");
79
mutator_alloc_region(i)->init();
80
}
81
}
82
83
void G1Allocator::release_mutator_alloc_regions() {
84
for (uint i = 0; i < _num_alloc_regions; i++) {
85
mutator_alloc_region(i)->release();
86
assert(mutator_alloc_region(i)->get() == NULL, "post-condition");
87
}
88
}
89
90
bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
91
return _retained_old_gc_alloc_region == hr;
92
}
93
94
void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
95
OldGCAllocRegion* old,
96
HeapRegion** retained_old) {
97
HeapRegion* retained_region = *retained_old;
98
*retained_old = NULL;
99
assert(retained_region == NULL || !retained_region->is_archive(),
100
"Archive region should not be alloc region (index %u)", retained_region->hrm_index());
101
102
// We will discard the current GC alloc region if:
103
// a) it's in the collection set (it can happen!),
104
// b) it's already full (no point in using it),
105
// c) it's empty (this means that it was emptied during
106
// a cleanup and it should be on the free list now), or
107
// d) it's humongous (this means that it was emptied
108
// during a cleanup and was added to the free list, but
109
// has been subsequently used to allocate a humongous
110
// object that may be less than the region size).
111
if (retained_region != NULL &&
112
!retained_region->in_collection_set() &&
113
!(retained_region->top() == retained_region->end()) &&
114
!retained_region->is_empty() &&
115
!retained_region->is_humongous()) {
116
// The retained region was added to the old region set when it was
117
// retired. We have to remove it now, since we don't allow regions
118
// we allocate to in the region sets. We'll re-add it later, when
119
// it's retired again.
120
_g1h->old_set_remove(retained_region);
121
old->set(retained_region);
122
_g1h->hr_printer()->reuse(retained_region);
123
evacuation_info.set_alloc_regions_used_before(retained_region->used());
124
}
125
}
126
127
void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
128
assert_at_safepoint_on_vm_thread();
129
130
_survivor_is_full = false;
131
_old_is_full = false;
132
133
for (uint i = 0; i < _num_alloc_regions; i++) {
134
survivor_gc_alloc_region(i)->init();
135
}
136
137
_old_gc_alloc_region.init();
138
reuse_retained_old_region(evacuation_info,
139
&_old_gc_alloc_region,
140
&_retained_old_gc_alloc_region);
141
}
142
143
void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
144
uint survivor_region_count = 0;
145
for (uint node_index = 0; node_index < _num_alloc_regions; node_index++) {
146
survivor_region_count += survivor_gc_alloc_region(node_index)->count();
147
survivor_gc_alloc_region(node_index)->release();
148
}
149
evacuation_info.set_allocation_regions(survivor_region_count +
150
old_gc_alloc_region()->count());
151
152
// If we have an old GC alloc region to release, we'll save it in
153
// _retained_old_gc_alloc_region. If we don't
154
// _retained_old_gc_alloc_region will become NULL. This is what we
155
// want either way so no reason to check explicitly for either
156
// condition.
157
_retained_old_gc_alloc_region = old_gc_alloc_region()->release();
158
}
159
160
void G1Allocator::abandon_gc_alloc_regions() {
161
for (uint i = 0; i < _num_alloc_regions; i++) {
162
assert(survivor_gc_alloc_region(i)->get() == NULL, "pre-condition");
163
}
164
assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
165
_retained_old_gc_alloc_region = NULL;
166
}
167
168
bool G1Allocator::survivor_is_full() const {
169
return _survivor_is_full;
170
}
171
172
bool G1Allocator::old_is_full() const {
173
return _old_is_full;
174
}
175
176
void G1Allocator::set_survivor_full() {
177
_survivor_is_full = true;
178
}
179
180
void G1Allocator::set_old_full() {
181
_old_is_full = true;
182
}
183
184
size_t G1Allocator::unsafe_max_tlab_alloc() {
185
// Return the remaining space in the cur alloc region, but not less than
186
// the min TLAB size.
187
188
// Also, this value can be at most the humongous object threshold,
189
// since we can't allow tlabs to grow big enough to accommodate
190
// humongous objects.
191
192
uint node_index = current_node_index();
193
HeapRegion* hr = mutator_alloc_region(node_index)->get();
194
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
195
if (hr == NULL) {
196
return max_tlab;
197
} else {
198
return clamp(hr->free(), MinTLABSize, max_tlab);
199
}
200
}
201
202
size_t G1Allocator::used_in_alloc_regions() {
203
assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
204
size_t used = 0;
205
for (uint i = 0; i < _num_alloc_regions; i++) {
206
used += mutator_alloc_region(i)->used_in_alloc_regions();
207
}
208
return used;
209
}
210
211
212
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
213
size_t word_size,
214
uint node_index) {
215
size_t temp = 0;
216
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index);
217
assert(result == NULL || temp == word_size,
218
"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
219
word_size, temp, p2i(result));
220
return result;
221
}
222
223
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
224
size_t min_word_size,
225
size_t desired_word_size,
226
size_t* actual_word_size,
227
uint node_index) {
228
switch (dest.type()) {
229
case G1HeapRegionAttr::Young:
230
return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, node_index);
231
case G1HeapRegionAttr::Old:
232
return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
233
default:
234
ShouldNotReachHere();
235
return NULL; // Keep some compilers happy
236
}
237
}
238
239
HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
240
size_t desired_word_size,
241
size_t* actual_word_size,
242
uint node_index) {
243
assert(!_g1h->is_humongous(desired_word_size),
244
"we should not be seeing humongous-size allocations in this path");
245
246
HeapWord* result = survivor_gc_alloc_region(node_index)->attempt_allocation(min_word_size,
247
desired_word_size,
248
actual_word_size);
249
if (result == NULL && !survivor_is_full()) {
250
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
251
result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size,
252
desired_word_size,
253
actual_word_size);
254
if (result == NULL) {
255
set_survivor_full();
256
}
257
}
258
if (result != NULL) {
259
_g1h->dirty_young_block(result, *actual_word_size);
260
}
261
return result;
262
}
263
264
HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
265
size_t desired_word_size,
266
size_t* actual_word_size) {
267
assert(!_g1h->is_humongous(desired_word_size),
268
"we should not be seeing humongous-size allocations in this path");
269
270
HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size,
271
desired_word_size,
272
actual_word_size);
273
if (result == NULL && !old_is_full()) {
274
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
275
result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,
276
desired_word_size,
277
actual_word_size);
278
if (result == NULL) {
279
set_old_full();
280
}
281
}
282
return result;
283
}
284
285
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
286
_g1h(G1CollectedHeap::heap()),
287
_allocator(allocator) {
288
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
289
_direct_allocated[state] = 0;
290
uint length = alloc_buffers_length(state);
291
_alloc_buffers[state] = NEW_C_HEAP_ARRAY(PLAB*, length, mtGC);
292
for (uint node_index = 0; node_index < length; node_index++) {
293
_alloc_buffers[state][node_index] = new PLAB(_g1h->desired_plab_sz(state));
294
}
295
}
296
}
297
298
G1PLABAllocator::~G1PLABAllocator() {
299
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
300
uint length = alloc_buffers_length(state);
301
for (uint node_index = 0; node_index < length; node_index++) {
302
delete _alloc_buffers[state][node_index];
303
}
304
FREE_C_HEAP_ARRAY(PLAB*, _alloc_buffers[state]);
305
}
306
}
307
308
bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
309
return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
310
}
311
312
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
313
size_t word_sz,
314
bool* plab_refill_failed,
315
uint node_index) {
316
size_t plab_word_size = _g1h->desired_plab_sz(dest);
317
size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
318
319
// Only get a new PLAB if the allocation fits and it would not waste more than
320
// ParallelGCBufferWastePct in the existing buffer.
321
if ((required_in_plab <= plab_word_size) &&
322
may_throw_away_buffer(required_in_plab, plab_word_size)) {
323
324
PLAB* alloc_buf = alloc_buffer(dest, node_index);
325
alloc_buf->retire();
326
327
size_t actual_plab_size = 0;
328
HeapWord* buf = _allocator->par_allocate_during_gc(dest,
329
required_in_plab,
330
plab_word_size,
331
&actual_plab_size,
332
node_index);
333
334
assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
335
"Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
336
required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
337
338
if (buf != NULL) {
339
alloc_buf->set_buf(buf, actual_plab_size);
340
341
HeapWord* const obj = alloc_buf->allocate(word_sz);
342
assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
343
SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
344
word_sz, required_in_plab, plab_word_size);
345
return obj;
346
}
347
// Otherwise.
348
*plab_refill_failed = true;
349
}
350
// Try direct allocation.
351
HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index);
352
if (result != NULL) {
353
_direct_allocated[dest.type()] += word_sz;
354
}
355
return result;
356
}
357
358
void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index) {
359
alloc_buffer(dest, node_index)->undo_allocation(obj, word_sz);
360
}
361
362
void G1PLABAllocator::flush_and_retire_stats() {
363
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
364
G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
365
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
366
PLAB* const buf = alloc_buffer(state, node_index);
367
if (buf != NULL) {
368
buf->flush_and_retire_stats(stats);
369
}
370
}
371
stats->add_direct_allocated(_direct_allocated[state]);
372
_direct_allocated[state] = 0;
373
}
374
}
375
376
size_t G1PLABAllocator::waste() const {
377
size_t result = 0;
378
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
379
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
380
PLAB* const buf = alloc_buffer(state, node_index);
381
if (buf != NULL) {
382
result += buf->waste();
383
}
384
}
385
}
386
return result;
387
}
388
389
size_t G1PLABAllocator::undo_waste() const {
390
size_t result = 0;
391
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
392
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
393
PLAB* const buf = alloc_buffer(state, node_index);
394
if (buf != NULL) {
395
result += buf->undo_waste();
396
}
397
}
398
}
399
return result;
400
}
401
402
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
403
return new G1ArchiveAllocator(g1h, open);
404
}
405
406
bool G1ArchiveAllocator::alloc_new_region() {
407
// Allocate the highest free region in the reserved heap,
408
// and add it to our list of allocated regions. It is marked
409
// archive and added to the old set.
410
HeapRegion* hr = _g1h->alloc_highest_free_region();
411
if (hr == NULL) {
412
return false;
413
}
414
assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
415
if (_open) {
416
hr->set_open_archive();
417
} else {
418
hr->set_closed_archive();
419
}
420
_g1h->policy()->remset_tracker()->update_at_allocate(hr);
421
_g1h->archive_set_add(hr);
422
_g1h->hr_printer()->alloc(hr);
423
_allocated_regions.append(hr);
424
_allocation_region = hr;
425
426
// Set up _bottom and _max to begin allocating in the lowest
427
// min_region_size'd chunk of the allocated G1 region.
428
_bottom = hr->bottom();
429
_max = _bottom + HeapRegion::min_region_size_in_words();
430
431
// Since we've modified the old set, call update_sizes.
432
_g1h->g1mm()->update_sizes();
433
return true;
434
}
435
436
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
437
assert(word_size != 0, "size must not be zero");
438
if (_allocation_region == NULL) {
439
if (!alloc_new_region()) {
440
return NULL;
441
}
442
}
443
HeapWord* old_top = _allocation_region->top();
444
assert(_bottom >= _allocation_region->bottom(),
445
"inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
446
p2i(_bottom), p2i(_allocation_region->bottom()));
447
assert(_max <= _allocation_region->end(),
448
"inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
449
p2i(_max), p2i(_allocation_region->end()));
450
assert(_bottom <= old_top && old_top <= _max,
451
"inconsistent allocation state: expected "
452
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
453
p2i(_bottom), p2i(old_top), p2i(_max));
454
455
// Try to allocate word_size in the current allocation chunk. Two cases
456
// require special treatment:
457
// 1. no enough space for word_size
458
// 2. after allocating word_size, there's non-zero space left, but too small for the minimal filler
459
// In both cases, we retire the current chunk and move on to the next one.
460
size_t free_words = pointer_delta(_max, old_top);
461
if (free_words < word_size ||
462
((free_words - word_size != 0) && (free_words - word_size < CollectedHeap::min_fill_size()))) {
463
// Retiring the current chunk
464
if (old_top != _max) {
465
// Non-zero space; need to insert the filler
466
size_t fill_size = free_words;
467
CollectedHeap::fill_with_object(old_top, fill_size);
468
_summary_bytes_used += fill_size * HeapWordSize;
469
}
470
// Set the current chunk as "full"
471
_allocation_region->set_top(_max);
472
473
// Check if we've just used up the last min_region_size'd chunk
474
// in the current region, and if so, allocate a new one.
475
if (_max != _allocation_region->end()) {
476
// Shift to the next chunk
477
old_top = _bottom = _max;
478
_max = _bottom + HeapRegion::min_region_size_in_words();
479
} else {
480
if (!alloc_new_region()) {
481
return NULL;
482
}
483
old_top = _allocation_region->bottom();
484
}
485
}
486
assert(pointer_delta(_max, old_top) >= word_size, "enough space left");
487
_allocation_region->set_top(old_top + word_size);
488
_summary_bytes_used += word_size * HeapWordSize;
489
490
return old_top;
491
}
492
493
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
494
size_t end_alignment_in_bytes) {
495
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
496
"alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
497
assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
498
"alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
499
500
// If we've allocated nothing, simply return.
501
if (_allocation_region == NULL) {
502
return;
503
}
504
505
// If an end alignment was requested, insert filler objects.
506
if (end_alignment_in_bytes != 0) {
507
HeapWord* currtop = _allocation_region->top();
508
HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
509
size_t fill_size = pointer_delta(newtop, currtop);
510
if (fill_size != 0) {
511
if (fill_size < CollectedHeap::min_fill_size()) {
512
// If the required fill is smaller than we can represent,
513
// bump up to the next aligned address. We know we won't exceed the current
514
// region boundary because the max supported alignment is smaller than the min
515
// region size, and because the allocation code never leaves space smaller than
516
// the min_fill_size at the top of the current allocation region.
517
newtop = align_up(currtop + CollectedHeap::min_fill_size(),
518
end_alignment_in_bytes);
519
fill_size = pointer_delta(newtop, currtop);
520
}
521
HeapWord* fill = archive_mem_allocate(fill_size);
522
CollectedHeap::fill_with_objects(fill, fill_size);
523
}
524
}
525
526
// Loop through the allocated regions, and create MemRegions summarizing
527
// the allocated address range, combining contiguous ranges. Add the
528
// MemRegions to the GrowableArray provided by the caller.
529
int index = _allocated_regions.length() - 1;
530
assert(_allocated_regions.at(index) == _allocation_region,
531
"expected region %u at end of array, found %u",
532
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
533
HeapWord* base_address = _allocation_region->bottom();
534
HeapWord* top = base_address;
535
536
while (index >= 0) {
537
HeapRegion* next = _allocated_regions.at(index);
538
HeapWord* new_base = next->bottom();
539
HeapWord* new_top = next->top();
540
if (new_base != top) {
541
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
542
base_address = new_base;
543
}
544
top = new_top;
545
index = index - 1;
546
}
547
548
assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
549
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
550
_allocated_regions.clear();
551
_allocation_region = NULL;
552
};
553
554