Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/g1/g1AllocRegion.cpp
40957 views
1
/*
2
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/g1/g1AllocRegion.inline.hpp"
27
#include "gc/g1/g1EvacStats.inline.hpp"
28
#include "gc/g1/g1CollectedHeap.inline.hpp"
29
#include "gc/shared/tlab_globals.hpp"
30
#include "logging/log.hpp"
31
#include "logging/logStream.hpp"
32
#include "memory/resourceArea.hpp"
33
#include "runtime/orderAccess.hpp"
34
#include "utilities/align.hpp"
35
36
G1CollectedHeap* G1AllocRegion::_g1h = NULL;
37
HeapRegion* G1AllocRegion::_dummy_region = NULL;
38
39
void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
40
assert(_dummy_region == NULL, "should be set once");
41
assert(dummy_region != NULL, "pre-condition");
42
assert(dummy_region->free() == 0, "pre-condition");
43
44
// Make sure that any allocation attempt on this region will fail
45
// and will not trigger any asserts.
46
assert(dummy_region->allocate_no_bot_updates(1) == NULL, "should fail");
47
assert(dummy_region->allocate(1) == NULL, "should fail");
48
DEBUG_ONLY(size_t assert_tmp);
49
assert(dummy_region->par_allocate_no_bot_updates(1, 1, &assert_tmp) == NULL, "should fail");
50
assert(dummy_region->par_allocate(1, 1, &assert_tmp) == NULL, "should fail");
51
52
_g1h = g1h;
53
_dummy_region = dummy_region;
54
}
55
56
size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) {
57
assert(alloc_region != NULL && alloc_region != _dummy_region,
58
"pre-condition");
59
size_t result = 0;
60
61
// Other threads might still be trying to allocate using a CAS out
62
// of the region we are trying to retire, as they can do so without
63
// holding the lock. So, we first have to make sure that noone else
64
// can allocate out of it by doing a maximal allocation. Even if our
65
// CAS attempt fails a few times, we'll succeed sooner or later
66
// given that failed CAS attempts mean that the region is getting
67
// closed to being full.
68
size_t free_word_size = alloc_region->free() / HeapWordSize;
69
70
// This is the minimum free chunk we can turn into a dummy
71
// object. If the free space falls below this, then noone can
72
// allocate in this region anyway (all allocation requests will be
73
// of a size larger than this) so we won't have to perform the dummy
74
// allocation.
75
size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
76
77
while (free_word_size >= min_word_size_to_fill) {
78
HeapWord* dummy = par_allocate(alloc_region, free_word_size);
79
if (dummy != NULL) {
80
// If the allocation was successful we should fill in the space.
81
CollectedHeap::fill_with_object(dummy, free_word_size);
82
alloc_region->set_pre_dummy_top(dummy);
83
result += free_word_size * HeapWordSize;
84
break;
85
}
86
87
free_word_size = alloc_region->free() / HeapWordSize;
88
// It's also possible that someone else beats us to the
89
// allocation and they fill up the region. In that case, we can
90
// just get out of the loop.
91
}
92
result += alloc_region->free();
93
94
assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
95
"post-condition");
96
return result;
97
}
98
99
size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) {
100
// We never have to check whether the active region is empty or not,
101
// and potentially free it if it is, given that it's guaranteed that
102
// it will never be empty.
103
size_t waste = 0;
104
assert_alloc_region(!alloc_region->is_empty(),
105
"the alloc region should never be empty");
106
107
if (fill_up) {
108
waste = fill_up_remaining_space(alloc_region);
109
}
110
111
assert_alloc_region(alloc_region->used() >= _used_bytes_before, "invariant");
112
size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
113
retire_region(alloc_region, allocated_bytes);
114
_used_bytes_before = 0;
115
116
return waste;
117
}
118
119
size_t G1AllocRegion::retire(bool fill_up) {
120
assert_alloc_region(_alloc_region != NULL, "not initialized properly");
121
122
size_t waste = 0;
123
124
trace("retiring");
125
HeapRegion* alloc_region = _alloc_region;
126
if (alloc_region != _dummy_region) {
127
waste = retire_internal(alloc_region, fill_up);
128
reset_alloc_region();
129
}
130
trace("retired");
131
132
return waste;
133
}
134
135
HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
136
bool force) {
137
assert_alloc_region(_alloc_region == _dummy_region, "pre-condition");
138
assert_alloc_region(_used_bytes_before == 0, "pre-condition");
139
140
trace("attempting region allocation");
141
HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
142
if (new_alloc_region != NULL) {
143
new_alloc_region->reset_pre_dummy_top();
144
// Need to do this before the allocation
145
_used_bytes_before = new_alloc_region->used();
146
HeapWord* result = allocate(new_alloc_region, word_size);
147
assert_alloc_region(result != NULL, "the allocation should succeeded");
148
149
OrderAccess::storestore();
150
// Note that we first perform the allocation and then we store the
151
// region in _alloc_region. This is the reason why an active region
152
// can never be empty.
153
update_alloc_region(new_alloc_region);
154
trace("region allocation successful");
155
return result;
156
} else {
157
trace("region allocation failed");
158
return NULL;
159
}
160
ShouldNotReachHere();
161
}
162
163
void G1AllocRegion::init() {
164
trace("initializing");
165
assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition");
166
assert_alloc_region(_dummy_region != NULL, "should have been set");
167
_alloc_region = _dummy_region;
168
_count = 0;
169
trace("initialized");
170
}
171
172
void G1AllocRegion::set(HeapRegion* alloc_region) {
173
trace("setting");
174
// We explicitly check that the region is not empty to make sure we
175
// maintain the "the alloc region cannot be empty" invariant.
176
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
177
assert_alloc_region(_alloc_region == _dummy_region &&
178
_used_bytes_before == 0 && _count == 0,
179
"pre-condition");
180
181
_used_bytes_before = alloc_region->used();
182
_alloc_region = alloc_region;
183
_count += 1;
184
trace("set");
185
}
186
187
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
188
trace("update");
189
// We explicitly check that the region is not empty to make sure we
190
// maintain the "the alloc region cannot be empty" invariant.
191
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
192
193
_alloc_region = alloc_region;
194
_count += 1;
195
trace("updated");
196
}
197
198
HeapRegion* G1AllocRegion::release() {
199
trace("releasing");
200
HeapRegion* alloc_region = _alloc_region;
201
retire(false /* fill_up */);
202
assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
203
_alloc_region = NULL;
204
trace("released");
205
return (alloc_region == _dummy_region) ? NULL : alloc_region;
206
}
207
208
#ifndef PRODUCT
209
void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) {
210
// All the calls to trace that set either just the size or the size
211
// and the result are considered part of detailed tracing and are
212
// skipped during other tracing.
213
214
Log(gc, alloc, region) log;
215
216
if (!log.is_debug()) {
217
return;
218
}
219
220
bool detailed_info = log.is_trace();
221
222
if ((actual_word_size == 0 && result == NULL) || detailed_info) {
223
ResourceMark rm;
224
LogStream ls_trace(log.trace());
225
LogStream ls_debug(log.debug());
226
outputStream* out = detailed_info ? &ls_trace : &ls_debug;
227
228
out->print("%s: %u ", _name, _count);
229
230
if (_alloc_region == NULL) {
231
out->print("NULL");
232
} else if (_alloc_region == _dummy_region) {
233
out->print("DUMMY");
234
} else {
235
out->print(HR_FORMAT, HR_FORMAT_PARAMS(_alloc_region));
236
}
237
238
out->print(" : %s", str);
239
240
if (detailed_info) {
241
if (result != NULL) {
242
out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
243
min_word_size, desired_word_size, actual_word_size, p2i(result));
244
} else if (min_word_size != 0) {
245
out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size);
246
}
247
}
248
out->cr();
249
}
250
}
251
#endif // PRODUCT
252
253
G1AllocRegion::G1AllocRegion(const char* name,
254
bool bot_updates,
255
uint node_index)
256
: _alloc_region(NULL),
257
_count(0),
258
_used_bytes_before(0),
259
_bot_updates(bot_updates),
260
_name(name),
261
_node_index(node_index)
262
{ }
263
264
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
265
bool force) {
266
return _g1h->new_mutator_alloc_region(word_size, force, _node_index);
267
}
268
269
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
270
size_t allocated_bytes) {
271
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
272
}
273
274
void MutatorAllocRegion::init() {
275
assert(_retained_alloc_region == NULL, "Pre-condition");
276
G1AllocRegion::init();
277
_wasted_bytes = 0;
278
}
279
280
bool MutatorAllocRegion::should_retain(HeapRegion* region) {
281
size_t free_bytes = region->free();
282
if (free_bytes < MinTLABSize) {
283
return false;
284
}
285
286
if (_retained_alloc_region != NULL &&
287
free_bytes < _retained_alloc_region->free()) {
288
return false;
289
}
290
291
return true;
292
}
293
294
size_t MutatorAllocRegion::retire(bool fill_up) {
295
size_t waste = 0;
296
trace("retiring");
297
HeapRegion* current_region = get();
298
if (current_region != NULL) {
299
// Retain the current region if it fits a TLAB and has more
300
// free than the currently retained region.
301
if (should_retain(current_region)) {
302
trace("mutator retained");
303
if (_retained_alloc_region != NULL) {
304
waste = retire_internal(_retained_alloc_region, true);
305
}
306
_retained_alloc_region = current_region;
307
} else {
308
waste = retire_internal(current_region, fill_up);
309
}
310
reset_alloc_region();
311
}
312
313
_wasted_bytes += waste;
314
trace("retired");
315
return waste;
316
}
317
318
size_t MutatorAllocRegion::used_in_alloc_regions() {
319
size_t used = 0;
320
HeapRegion* hr = get();
321
if (hr != NULL) {
322
used += hr->used();
323
}
324
325
hr = _retained_alloc_region;
326
if (hr != NULL) {
327
used += hr->used();
328
}
329
return used;
330
}
331
332
HeapRegion* MutatorAllocRegion::release() {
333
HeapRegion* ret = G1AllocRegion::release();
334
335
// The retained alloc region must be retired and this must be
336
// done after the above call to release the mutator alloc region,
337
// since it might update the _retained_alloc_region member.
338
if (_retained_alloc_region != NULL) {
339
_wasted_bytes += retire_internal(_retained_alloc_region, false);
340
_retained_alloc_region = NULL;
341
}
342
log_debug(gc, alloc, region)("Mutator Allocation stats, regions: %u, wasted size: " SIZE_FORMAT "%s (%4.1f%%)",
343
count(),
344
byte_size_in_proper_unit(_wasted_bytes),
345
proper_unit_for_byte_size(_wasted_bytes),
346
percent_of(_wasted_bytes, count() * HeapRegion::GrainBytes));
347
return ret;
348
}
349
350
HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
351
bool force) {
352
assert(!force, "not supported for GC alloc regions");
353
return _g1h->new_gc_alloc_region(word_size, _purpose, _node_index);
354
}
355
356
void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
357
size_t allocated_bytes) {
358
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose);
359
}
360
361
size_t G1GCAllocRegion::retire(bool fill_up) {
362
HeapRegion* retired = get();
363
size_t end_waste = G1AllocRegion::retire(fill_up);
364
// Do not count retirement of the dummy allocation region.
365
if (retired != NULL) {
366
_stats->add_region_end_waste(end_waste / HeapWordSize);
367
}
368
return end_waste;
369
}
370
371
HeapRegion* OldGCAllocRegion::release() {
372
HeapRegion* cur = get();
373
if (cur != NULL) {
374
// Determine how far we are from the next card boundary. If it is smaller than
375
// the minimum object size we can allocate into, expand into the next card.
376
HeapWord* top = cur->top();
377
HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
378
379
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
380
381
if (to_allocate_words != 0) {
382
// We are not at a card boundary. Fill up, possibly into the next, taking the
383
// end of the region and the minimum object size into account.
384
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
385
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
386
387
// Skip allocation if there is not enough space to allocate even the smallest
388
// possible object. In this case this region will not be retained, so the
389
// original problem cannot occur.
390
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
391
HeapWord* dummy = attempt_allocation(to_allocate_words);
392
CollectedHeap::fill_with_object(dummy, to_allocate_words);
393
}
394
}
395
}
396
return G1AllocRegion::release();
397
}
398
399