Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
48790 views
1
/*
2
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#if !defined(__clang_major__) && defined(__GNUC__)
26
#define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
27
#endif
28
29
#include "precompiled.hpp"
30
#include "classfile/metadataOnStackMark.hpp"
31
#include "code/codeCache.hpp"
32
#include "code/icBuffer.hpp"
33
#include "gc_implementation/g1/bufferingOopClosure.hpp"
34
#include "gc_implementation/g1/concurrentG1Refine.hpp"
35
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
36
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
37
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
38
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
39
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
40
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
41
#include "gc_implementation/g1/g1EvacFailure.hpp"
42
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
43
#include "gc_implementation/g1/g1Log.hpp"
44
#include "gc_implementation/g1/g1MarkSweep.hpp"
45
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
46
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
47
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
48
#include "gc_implementation/g1/g1RemSet.inline.hpp"
49
#include "gc_implementation/g1/g1RootProcessor.hpp"
50
#include "gc_implementation/g1/g1StringDedup.hpp"
51
#include "gc_implementation/g1/g1YCTypes.hpp"
52
#include "gc_implementation/g1/heapRegion.inline.hpp"
53
#include "gc_implementation/g1/heapRegionRemSet.hpp"
54
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
55
#include "gc_implementation/g1/vm_operations_g1.hpp"
56
#include "gc_implementation/shared/gcHeapSummary.hpp"
57
#include "gc_implementation/shared/gcTimer.hpp"
58
#include "gc_implementation/shared/gcTrace.hpp"
59
#include "gc_implementation/shared/gcTraceTime.hpp"
60
#include "gc_implementation/shared/isGCActiveMark.hpp"
61
#include "memory/allocation.hpp"
62
#include "memory/gcLocker.inline.hpp"
63
#include "memory/generationSpec.hpp"
64
#include "memory/iterator.hpp"
65
#include "memory/referenceProcessor.hpp"
66
#include "oops/oop.inline.hpp"
67
#include "oops/oop.pcgc.inline.hpp"
68
#include "runtime/orderAccess.inline.hpp"
69
#include "runtime/vmThread.hpp"
70
71
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
72
73
// turn it on so that the contents of the young list (scan-only /
74
// to-be-collected) are printed at "strategic" points before / during
75
// / after the collection --- this is useful for debugging
76
#define YOUNG_LIST_VERBOSE 0
77
// CURRENT STATUS
78
// This file is under construction. Search for "FIXME".
79
80
// INVARIANTS/NOTES
81
//
82
// All allocation activity covered by the G1CollectedHeap interface is
83
// serialized by acquiring the HeapLock. This happens in mem_allocate
84
// and allocate_new_tlab, which are the "entry" points to the
85
// allocation code from the rest of the JVM. (Note that this does not
86
// apply to TLAB allocation, which is not part of this interface: it
87
// is done by clients of this interface.)
88
89
// Local to this file.
90
91
class RefineCardTableEntryClosure: public CardTableEntryClosure {
92
bool _concurrent;
93
public:
94
RefineCardTableEntryClosure() : _concurrent(true) { }
95
96
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
97
bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
98
// This path is executed by the concurrent refine or mutator threads,
99
// concurrently, and so we do not care if card_ptr contains references
100
// that point into the collection set.
101
assert(!oops_into_cset, "should be");
102
103
if (_concurrent && SuspendibleThreadSet::should_yield()) {
104
// Caller will actually yield.
105
return false;
106
}
107
// Otherwise, we finished successfully; return true.
108
return true;
109
}
110
111
void set_concurrent(bool b) { _concurrent = b; }
112
};
113
114
115
class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
116
size_t _num_processed;
117
CardTableModRefBS* _ctbs;
118
int _histo[256];
119
120
public:
121
ClearLoggedCardTableEntryClosure() :
122
_num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
123
{
124
for (int i = 0; i < 256; i++) _histo[i] = 0;
125
}
126
127
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
128
unsigned char* ujb = (unsigned char*)card_ptr;
129
int ind = (int)(*ujb);
130
_histo[ind]++;
131
132
*card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
133
_num_processed++;
134
135
return true;
136
}
137
138
size_t num_processed() { return _num_processed; }
139
140
void print_histo() {
141
gclog_or_tty->print_cr("Card table value histogram:");
142
for (int i = 0; i < 256; i++) {
143
if (_histo[i] != 0) {
144
gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
145
}
146
}
147
}
148
};
149
150
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
151
private:
152
size_t _num_processed;
153
154
public:
155
RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
156
157
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
158
*card_ptr = CardTableModRefBS::dirty_card_val();
159
_num_processed++;
160
return true;
161
}
162
163
size_t num_processed() const { return _num_processed; }
164
};
165
166
YoungList::YoungList(G1CollectedHeap* g1h) :
167
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
168
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
169
guarantee(check_list_empty(false), "just making sure...");
170
}
171
172
void YoungList::push_region(HeapRegion *hr) {
173
assert(!hr->is_young(), "should not already be young");
174
assert(hr->get_next_young_region() == NULL, "cause it should!");
175
176
hr->set_next_young_region(_head);
177
_head = hr;
178
179
_g1h->g1_policy()->set_region_eden(hr, (int) _length);
180
++_length;
181
}
182
183
void YoungList::add_survivor_region(HeapRegion* hr) {
184
assert(hr->is_survivor(), "should be flagged as survivor region");
185
assert(hr->get_next_young_region() == NULL, "cause it should!");
186
187
hr->set_next_young_region(_survivor_head);
188
if (_survivor_head == NULL) {
189
_survivor_tail = hr;
190
}
191
_survivor_head = hr;
192
++_survivor_length;
193
}
194
195
void YoungList::empty_list(HeapRegion* list) {
196
while (list != NULL) {
197
HeapRegion* next = list->get_next_young_region();
198
list->set_next_young_region(NULL);
199
list->uninstall_surv_rate_group();
200
// This is called before a Full GC and all the non-empty /
201
// non-humongous regions at the end of the Full GC will end up as
202
// old anyway.
203
list->set_old();
204
list = next;
205
}
206
}
207
208
void YoungList::empty_list() {
209
assert(check_list_well_formed(), "young list should be well formed");
210
211
empty_list(_head);
212
_head = NULL;
213
_length = 0;
214
215
empty_list(_survivor_head);
216
_survivor_head = NULL;
217
_survivor_tail = NULL;
218
_survivor_length = 0;
219
220
_last_sampled_rs_lengths = 0;
221
222
assert(check_list_empty(false), "just making sure...");
223
}
224
225
bool YoungList::check_list_well_formed() {
226
bool ret = true;
227
228
uint length = 0;
229
HeapRegion* curr = _head;
230
HeapRegion* last = NULL;
231
while (curr != NULL) {
232
if (!curr->is_young()) {
233
gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
234
"incorrectly tagged (y: %d, surv: %d)",
235
curr->bottom(), curr->end(),
236
curr->is_young(), curr->is_survivor());
237
ret = false;
238
}
239
++length;
240
last = curr;
241
curr = curr->get_next_young_region();
242
}
243
ret = ret && (length == _length);
244
245
if (!ret) {
246
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
247
gclog_or_tty->print_cr("### list has %u entries, _length is %u",
248
length, _length);
249
}
250
251
return ret;
252
}
253
254
bool YoungList::check_list_empty(bool check_sample) {
255
bool ret = true;
256
257
if (_length != 0) {
258
gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
259
_length);
260
ret = false;
261
}
262
if (check_sample && _last_sampled_rs_lengths != 0) {
263
gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
264
ret = false;
265
}
266
if (_head != NULL) {
267
gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
268
ret = false;
269
}
270
if (!ret) {
271
gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
272
}
273
274
return ret;
275
}
276
277
void
278
YoungList::rs_length_sampling_init() {
279
_sampled_rs_lengths = 0;
280
_curr = _head;
281
}
282
283
bool
284
YoungList::rs_length_sampling_more() {
285
return _curr != NULL;
286
}
287
288
void
289
YoungList::rs_length_sampling_next() {
290
assert( _curr != NULL, "invariant" );
291
size_t rs_length = _curr->rem_set()->occupied();
292
293
_sampled_rs_lengths += rs_length;
294
295
// The current region may not yet have been added to the
296
// incremental collection set (it gets added when it is
297
// retired as the current allocation region).
298
if (_curr->in_collection_set()) {
299
// Update the collection set policy information for this region
300
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
301
}
302
303
_curr = _curr->get_next_young_region();
304
if (_curr == NULL) {
305
_last_sampled_rs_lengths = _sampled_rs_lengths;
306
// gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
307
}
308
}
309
310
void
311
YoungList::reset_auxilary_lists() {
312
guarantee( is_empty(), "young list should be empty" );
313
assert(check_list_well_formed(), "young list should be well formed");
314
315
// Add survivor regions to SurvRateGroup.
316
_g1h->g1_policy()->note_start_adding_survivor_regions();
317
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
318
319
int young_index_in_cset = 0;
320
for (HeapRegion* curr = _survivor_head;
321
curr != NULL;
322
curr = curr->get_next_young_region()) {
323
_g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
324
325
// The region is a non-empty survivor so let's add it to
326
// the incremental collection set for the next evacuation
327
// pause.
328
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
329
young_index_in_cset += 1;
330
}
331
assert((uint) young_index_in_cset == _survivor_length, "post-condition");
332
_g1h->g1_policy()->note_stop_adding_survivor_regions();
333
334
_head = _survivor_head;
335
_length = _survivor_length;
336
if (_survivor_head != NULL) {
337
assert(_survivor_tail != NULL, "cause it shouldn't be");
338
assert(_survivor_length > 0, "invariant");
339
_survivor_tail->set_next_young_region(NULL);
340
}
341
342
// Don't clear the survivor list handles until the start of
343
// the next evacuation pause - we need it in order to re-tag
344
// the survivor regions from this evacuation pause as 'young'
345
// at the start of the next.
346
347
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
348
349
assert(check_list_well_formed(), "young list should be well formed");
350
}
351
352
void YoungList::print() {
353
HeapRegion* lists[] = {_head, _survivor_head};
354
const char* names[] = {"YOUNG", "SURVIVOR"};
355
356
for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
357
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
358
HeapRegion *curr = lists[list];
359
if (curr == NULL)
360
gclog_or_tty->print_cr(" empty");
361
while (curr != NULL) {
362
gclog_or_tty->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
363
HR_FORMAT_PARAMS(curr),
364
curr->prev_top_at_mark_start(),
365
curr->next_top_at_mark_start(),
366
curr->age_in_surv_rate_group_cond());
367
curr = curr->get_next_young_region();
368
}
369
}
370
371
gclog_or_tty->cr();
372
}
373
374
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
375
OtherRegionsTable::invalidate(start_idx, num_regions);
376
}
377
378
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
379
// The from card cache is not the memory that is actually committed. So we cannot
380
// take advantage of the zero_filled parameter.
381
reset_from_card_cache(start_idx, num_regions);
382
}
383
384
void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
385
{
386
// Claim the right to put the region on the dirty cards region list
387
// by installing a self pointer.
388
HeapRegion* next = hr->get_next_dirty_cards_region();
389
if (next == NULL) {
390
HeapRegion* res = (HeapRegion*)
391
Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
392
NULL);
393
if (res == NULL) {
394
HeapRegion* head;
395
do {
396
// Put the region to the dirty cards region list.
397
head = _dirty_cards_region_list;
398
next = (HeapRegion*)
399
Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
400
if (next == head) {
401
assert(hr->get_next_dirty_cards_region() == hr,
402
"hr->get_next_dirty_cards_region() != hr");
403
if (next == NULL) {
404
// The last region in the list points to itself.
405
hr->set_next_dirty_cards_region(hr);
406
} else {
407
hr->set_next_dirty_cards_region(next);
408
}
409
}
410
} while (next != head);
411
}
412
}
413
}
414
415
HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
416
{
417
HeapRegion* head;
418
HeapRegion* hr;
419
do {
420
head = _dirty_cards_region_list;
421
if (head == NULL) {
422
return NULL;
423
}
424
HeapRegion* new_head = head->get_next_dirty_cards_region();
425
if (head == new_head) {
426
// The last region.
427
new_head = NULL;
428
}
429
hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
430
head);
431
} while (hr != head);
432
assert(hr != NULL, "invariant");
433
hr->set_next_dirty_cards_region(NULL);
434
return hr;
435
}
436
437
#ifdef ASSERT
438
// A region is added to the collection set as it is retired
439
// so an address p can point to a region which will be in the
440
// collection set but has not yet been retired. This method
441
// therefore is only accurate during a GC pause after all
442
// regions have been retired. It is used for debugging
443
// to check if an nmethod has references to objects that can
444
// be move during a partial collection. Though it can be
445
// inaccurate, it is sufficient for G1 because the conservative
446
// implementation of is_scavengable() for G1 will indicate that
447
// all nmethods must be scanned during a partial collection.
448
bool G1CollectedHeap::is_in_partial_collection(const void* p) {
449
if (p == NULL) {
450
return false;
451
}
452
return heap_region_containing(p)->in_collection_set();
453
}
454
#endif
455
456
// Returns true if the reference points to an object that
457
// can move in an incremental collection.
458
bool G1CollectedHeap::is_scavengable(const void* p) {
459
HeapRegion* hr = heap_region_containing(p);
460
return !hr->isHumongous();
461
}
462
463
void G1CollectedHeap::check_ct_logs_at_safepoint() {
464
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
465
CardTableModRefBS* ct_bs = g1_barrier_set();
466
467
// Count the dirty cards at the start.
468
CountNonCleanMemRegionClosure count1(this);
469
ct_bs->mod_card_iterate(&count1);
470
int orig_count = count1.n();
471
472
// First clear the logged cards.
473
ClearLoggedCardTableEntryClosure clear;
474
dcqs.apply_closure_to_all_completed_buffers(&clear);
475
dcqs.iterate_closure_all_threads(&clear, false);
476
clear.print_histo();
477
478
// Now ensure that there's no dirty cards.
479
CountNonCleanMemRegionClosure count2(this);
480
ct_bs->mod_card_iterate(&count2);
481
if (count2.n() != 0) {
482
gclog_or_tty->print_cr("Card table has %d entries; %d originally",
483
count2.n(), orig_count);
484
}
485
guarantee(count2.n() == 0, "Card table should be clean.");
486
487
RedirtyLoggedCardTableEntryClosure redirty;
488
dcqs.apply_closure_to_all_completed_buffers(&redirty);
489
dcqs.iterate_closure_all_threads(&redirty, false);
490
gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
491
clear.num_processed(), orig_count);
492
guarantee(redirty.num_processed() == clear.num_processed(),
493
err_msg("Redirtied " SIZE_FORMAT " cards, bug cleared " SIZE_FORMAT,
494
redirty.num_processed(), clear.num_processed()));
495
496
CountNonCleanMemRegionClosure count3(this);
497
ct_bs->mod_card_iterate(&count3);
498
if (count3.n() != orig_count) {
499
gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
500
orig_count, count3.n());
501
guarantee(count3.n() >= orig_count, "Should have restored them all.");
502
}
503
}
504
505
// Private class members.
506
507
G1CollectedHeap* G1CollectedHeap::_g1h;
508
509
// Private methods.
510
511
HeapRegion*
512
G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
513
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
514
while (!_secondary_free_list.is_empty() || free_regions_coming()) {
515
if (!_secondary_free_list.is_empty()) {
516
if (G1ConcRegionFreeingVerbose) {
517
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
518
"secondary_free_list has %u entries",
519
_secondary_free_list.length());
520
}
521
// It looks as if there are free regions available on the
522
// secondary_free_list. Let's move them to the free_list and try
523
// again to allocate from it.
524
append_secondary_free_list();
525
526
assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
527
"empty we should have moved at least one entry to the free_list");
528
HeapRegion* res = _hrm.allocate_free_region(is_old);
529
if (G1ConcRegionFreeingVerbose) {
530
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
531
"allocated " HR_FORMAT " from secondary_free_list",
532
HR_FORMAT_PARAMS(res));
533
}
534
return res;
535
}
536
537
// Wait here until we get notified either when (a) there are no
538
// more free regions coming or (b) some regions have been moved on
539
// the secondary_free_list.
540
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
541
}
542
543
if (G1ConcRegionFreeingVerbose) {
544
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
545
"could not allocate from secondary_free_list");
546
}
547
return NULL;
548
}
549
550
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
551
assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
552
"the only time we use this to allocate a humongous region is "
553
"when we are allocating a single humongous region");
554
555
HeapRegion* res;
556
if (G1StressConcRegionFreeing) {
557
if (!_secondary_free_list.is_empty()) {
558
if (G1ConcRegionFreeingVerbose) {
559
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
560
"forced to look at the secondary_free_list");
561
}
562
res = new_region_try_secondary_free_list(is_old);
563
if (res != NULL) {
564
return res;
565
}
566
}
567
}
568
569
res = _hrm.allocate_free_region(is_old);
570
571
if (res == NULL) {
572
if (G1ConcRegionFreeingVerbose) {
573
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
574
"res == NULL, trying the secondary_free_list");
575
}
576
res = new_region_try_secondary_free_list(is_old);
577
}
578
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
579
// Currently, only attempts to allocate GC alloc regions set
580
// do_expand to true. So, we should only reach here during a
581
// safepoint. If this assumption changes we might have to
582
// reconsider the use of _expand_heap_after_alloc_failure.
583
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
584
585
ergo_verbose1(ErgoHeapSizing,
586
"attempt heap expansion",
587
ergo_format_reason("region allocation request failed")
588
ergo_format_byte("allocation request"),
589
word_size * HeapWordSize);
590
if (expand(word_size * HeapWordSize)) {
591
// Given that expand() succeeded in expanding the heap, and we
592
// always expand the heap by an amount aligned to the heap
593
// region size, the free list should in theory not be empty.
594
// In either case allocate_free_region() will check for NULL.
595
res = _hrm.allocate_free_region(is_old);
596
} else {
597
_expand_heap_after_alloc_failure = false;
598
}
599
}
600
return res;
601
}
602
603
HeapWord*
604
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
605
uint num_regions,
606
size_t word_size,
607
AllocationContext_t context) {
608
assert(first != G1_NO_HRM_INDEX, "pre-condition");
609
assert(isHumongous(word_size), "word_size should be humongous");
610
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
611
612
// Index of last region in the series + 1.
613
uint last = first + num_regions;
614
615
// We need to initialize the region(s) we just discovered. This is
616
// a bit tricky given that it can happen concurrently with
617
// refinement threads refining cards on these regions and
618
// potentially wanting to refine the BOT as they are scanning
619
// those cards (this can happen shortly after a cleanup; see CR
620
// 6991377). So we have to set up the region(s) carefully and in
621
// a specific order.
622
623
// The word size sum of all the regions we will allocate.
624
size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
625
assert(word_size <= word_size_sum, "sanity");
626
627
// This will be the "starts humongous" region.
628
HeapRegion* first_hr = region_at(first);
629
// The header of the new object will be placed at the bottom of
630
// the first region.
631
HeapWord* new_obj = first_hr->bottom();
632
// This will be the new end of the first region in the series that
633
// should also match the end of the last region in the series.
634
HeapWord* new_end = new_obj + word_size_sum;
635
// This will be the new top of the first region that will reflect
636
// this allocation.
637
HeapWord* new_top = new_obj + word_size;
638
639
// First, we need to zero the header of the space that we will be
640
// allocating. When we update top further down, some refinement
641
// threads might try to scan the region. By zeroing the header we
642
// ensure that any thread that will try to scan the region will
643
// come across the zero klass word and bail out.
644
//
645
// NOTE: It would not have been correct to have used
646
// CollectedHeap::fill_with_object() and make the space look like
647
// an int array. The thread that is doing the allocation will
648
// later update the object header to a potentially different array
649
// type and, for a very short period of time, the klass and length
650
// fields will be inconsistent. This could cause a refinement
651
// thread to calculate the object size incorrectly.
652
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
653
654
// We will set up the first region as "starts humongous". This
655
// will also update the BOT covering all the regions to reflect
656
// that there is a single object that starts at the bottom of the
657
// first region.
658
first_hr->set_startsHumongous(new_top, new_end);
659
first_hr->set_allocation_context(context);
660
// Then, if there are any, we will set up the "continues
661
// humongous" regions.
662
HeapRegion* hr = NULL;
663
for (uint i = first + 1; i < last; ++i) {
664
hr = region_at(i);
665
hr->set_continuesHumongous(first_hr);
666
hr->set_allocation_context(context);
667
}
668
// If we have "continues humongous" regions (hr != NULL), then the
669
// end of the last one should match new_end.
670
assert(hr == NULL || hr->end() == new_end, "sanity");
671
672
// Up to this point no concurrent thread would have been able to
673
// do any scanning on any region in this series. All the top
674
// fields still point to bottom, so the intersection between
675
// [bottom,top] and [card_start,card_end] will be empty. Before we
676
// update the top fields, we'll do a storestore to make sure that
677
// no thread sees the update to top before the zeroing of the
678
// object header and the BOT initialization.
679
OrderAccess::storestore();
680
681
// Now that the BOT and the object header have been initialized,
682
// we can update top of the "starts humongous" region.
683
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
684
"new_top should be in this region");
685
first_hr->set_top(new_top);
686
if (_hr_printer.is_active()) {
687
HeapWord* bottom = first_hr->bottom();
688
HeapWord* end = first_hr->orig_end();
689
if ((first + 1) == last) {
690
// the series has a single humongous region
691
_hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
692
} else {
693
// the series has more than one humongous regions
694
_hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
695
}
696
}
697
698
// Now, we will update the top fields of the "continues humongous"
699
// regions. The reason we need to do this is that, otherwise,
700
// these regions would look empty and this will confuse parts of
701
// G1. For example, the code that looks for a consecutive number
702
// of empty regions will consider them empty and try to
703
// re-allocate them. We can extend is_empty() to also include
704
// !continuesHumongous(), but it is easier to just update the top
705
// fields here. The way we set top for all regions (i.e., top ==
706
// end for all regions but the last one, top == new_top for the
707
// last one) is actually used when we will free up the humongous
708
// region in free_humongous_region().
709
hr = NULL;
710
for (uint i = first + 1; i < last; ++i) {
711
hr = region_at(i);
712
if ((i + 1) == last) {
713
// last continues humongous region
714
assert(hr->bottom() < new_top && new_top <= hr->end(),
715
"new_top should fall on this region");
716
hr->set_top(new_top);
717
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
718
} else {
719
// not last one
720
assert(new_top > hr->end(), "new_top should be above this region");
721
hr->set_top(hr->end());
722
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
723
}
724
}
725
// If we have continues humongous regions (hr != NULL), then the
726
// end of the last one should match new_end and its top should
727
// match new_top.
728
assert(hr == NULL ||
729
(hr->end() == new_end && hr->top() == new_top), "sanity");
730
check_bitmaps("Humongous Region Allocation", first_hr);
731
732
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
733
_allocator->increase_used(first_hr->used());
734
_humongous_set.add(first_hr);
735
736
return new_obj;
737
}
738
739
// If could fit into free regions w/o expansion, try.
740
// Otherwise, if can expand, do so.
741
// Otherwise, if using ex regions might help, try with ex given back.
742
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
743
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
744
745
verify_region_sets_optional();
746
747
uint first = G1_NO_HRM_INDEX;
748
uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
749
750
if (obj_regions == 1) {
751
// Only one region to allocate, try to use a fast path by directly allocating
752
// from the free lists. Do not try to expand here, we will potentially do that
753
// later.
754
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
755
if (hr != NULL) {
756
first = hr->hrm_index();
757
}
758
} else {
759
// We can't allocate humongous regions spanning more than one region while
760
// cleanupComplete() is running, since some of the regions we find to be
761
// empty might not yet be added to the free list. It is not straightforward
762
// to know in which list they are on so that we can remove them. We only
763
// need to do this if we need to allocate more than one region to satisfy the
764
// current humongous allocation request. If we are only allocating one region
765
// we use the one-region region allocation code (see above), that already
766
// potentially waits for regions from the secondary free list.
767
wait_while_free_regions_coming();
768
append_secondary_free_list_if_not_empty_with_lock();
769
770
// Policy: Try only empty regions (i.e. already committed first). Maybe we
771
// are lucky enough to find some.
772
first = _hrm.find_contiguous_only_empty(obj_regions);
773
if (first != G1_NO_HRM_INDEX) {
774
_hrm.allocate_free_regions_starting_at(first, obj_regions);
775
}
776
}
777
778
if (first == G1_NO_HRM_INDEX) {
779
// Policy: We could not find enough regions for the humongous object in the
780
// free list. Look through the heap to find a mix of free and uncommitted regions.
781
// If so, try expansion.
782
first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
783
if (first != G1_NO_HRM_INDEX) {
784
// We found something. Make sure these regions are committed, i.e. expand
785
// the heap. Alternatively we could do a defragmentation GC.
786
ergo_verbose1(ErgoHeapSizing,
787
"attempt heap expansion",
788
ergo_format_reason("humongous allocation request failed")
789
ergo_format_byte("allocation request"),
790
word_size * HeapWordSize);
791
792
_hrm.expand_at(first, obj_regions);
793
g1_policy()->record_new_heap_size(num_regions());
794
795
#ifdef ASSERT
796
for (uint i = first; i < first + obj_regions; ++i) {
797
HeapRegion* hr = region_at(i);
798
assert(hr->is_free(), "sanity");
799
assert(hr->is_empty(), "sanity");
800
assert(is_on_master_free_list(hr), "sanity");
801
}
802
#endif
803
_hrm.allocate_free_regions_starting_at(first, obj_regions);
804
} else {
805
// Policy: Potentially trigger a defragmentation GC.
806
}
807
}
808
809
HeapWord* result = NULL;
810
if (first != G1_NO_HRM_INDEX) {
811
result = humongous_obj_allocate_initialize_regions(first, obj_regions,
812
word_size, context);
813
assert(result != NULL, "it should always return a valid result");
814
815
// A successful humongous object allocation changes the used space
816
// information of the old generation so we need to recalculate the
817
// sizes and update the jstat counters here.
818
g1mm()->update_sizes();
819
}
820
821
verify_region_sets_optional();
822
823
return result;
824
}
825
826
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
827
assert_heap_not_locked_and_not_at_safepoint();
828
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
829
830
uint dummy_gc_count_before;
831
uint dummy_gclocker_retry_count = 0;
832
return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
833
}
834
835
HeapWord*
836
G1CollectedHeap::mem_allocate(size_t word_size,
837
bool* gc_overhead_limit_was_exceeded) {
838
assert_heap_not_locked_and_not_at_safepoint();
839
840
// Loop until the allocation is satisfied, or unsatisfied after GC.
841
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
842
uint gc_count_before;
843
844
HeapWord* result = NULL;
845
if (!isHumongous(word_size)) {
846
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
847
} else {
848
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
849
}
850
if (result != NULL) {
851
return result;
852
}
853
854
// Create the garbage collection operation...
855
VM_G1CollectForAllocation op(gc_count_before, word_size);
856
op.set_allocation_context(AllocationContext::current());
857
858
// ...and get the VM thread to execute it.
859
VMThread::execute(&op);
860
861
if (op.prologue_succeeded() && op.pause_succeeded()) {
862
// If the operation was successful we'll return the result even
863
// if it is NULL. If the allocation attempt failed immediately
864
// after a Full GC, it's unlikely we'll be able to allocate now.
865
HeapWord* result = op.result();
866
if (result != NULL && !isHumongous(word_size)) {
867
// Allocations that take place on VM operations do not do any
868
// card dirtying and we have to do it here. We only have to do
869
// this for non-humongous allocations, though.
870
dirty_young_block(result, word_size);
871
}
872
return result;
873
} else {
874
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
875
return NULL;
876
}
877
assert(op.result() == NULL,
878
"the result should be NULL if the VM op did not succeed");
879
}
880
881
// Give a warning if we seem to be looping forever.
882
if ((QueuedAllocationWarningCount > 0) &&
883
(try_count % QueuedAllocationWarningCount == 0)) {
884
warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
885
}
886
}
887
888
ShouldNotReachHere();
889
return NULL;
890
}
891
892
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
893
AllocationContext_t context,
894
uint* gc_count_before_ret,
895
uint* gclocker_retry_count_ret) {
896
// Make sure you read the note in attempt_allocation_humongous().
897
898
assert_heap_not_locked_and_not_at_safepoint();
899
assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
900
"be called for humongous allocation requests");
901
902
// We should only get here after the first-level allocation attempt
903
// (attempt_allocation()) failed to allocate.
904
905
// We will loop until a) we manage to successfully perform the
906
// allocation or b) we successfully schedule a collection which
907
// fails to perform the allocation. b) is the only case when we'll
908
// return NULL.
909
HeapWord* result = NULL;
910
for (int try_count = 1; /* we'll return */; try_count += 1) {
911
bool should_try_gc;
912
uint gc_count_before;
913
914
{
915
MutexLockerEx x(Heap_lock);
916
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
917
false /* bot_updates */);
918
if (result != NULL) {
919
return result;
920
}
921
922
// If we reach here, attempt_allocation_locked() above failed to
923
// allocate a new region. So the mutator alloc region should be NULL.
924
assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
925
926
if (GC_locker::is_active_and_needs_gc()) {
927
if (g1_policy()->can_expand_young_list()) {
928
// No need for an ergo verbose message here,
929
// can_expand_young_list() does this when it returns true.
930
result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
931
false /* bot_updates */);
932
if (result != NULL) {
933
return result;
934
}
935
}
936
should_try_gc = false;
937
} else {
938
// The GCLocker may not be active but the GCLocker initiated
939
// GC may not yet have been performed (GCLocker::needs_gc()
940
// returns true). In this case we do not try this GC and
941
// wait until the GCLocker initiated GC is performed, and
942
// then retry the allocation.
943
if (GC_locker::needs_gc()) {
944
should_try_gc = false;
945
} else {
946
// Read the GC count while still holding the Heap_lock.
947
gc_count_before = total_collections();
948
should_try_gc = true;
949
}
950
}
951
}
952
953
if (should_try_gc) {
954
bool succeeded;
955
result = do_collection_pause(word_size, gc_count_before, &succeeded,
956
GCCause::_g1_inc_collection_pause);
957
if (result != NULL) {
958
assert(succeeded, "only way to get back a non-NULL result");
959
return result;
960
}
961
962
if (succeeded) {
963
// If we get here we successfully scheduled a collection which
964
// failed to allocate. No point in trying to allocate
965
// further. We'll just return NULL.
966
MutexLockerEx x(Heap_lock);
967
*gc_count_before_ret = total_collections();
968
return NULL;
969
}
970
} else {
971
if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
972
MutexLockerEx x(Heap_lock);
973
*gc_count_before_ret = total_collections();
974
return NULL;
975
}
976
// The GCLocker is either active or the GCLocker initiated
977
// GC has not yet been performed. Stall until it is and
978
// then retry the allocation.
979
GC_locker::stall_until_clear();
980
(*gclocker_retry_count_ret) += 1;
981
}
982
983
// We can reach here if we were unsuccessful in scheduling a
984
// collection (because another thread beat us to it) or if we were
985
// stalled due to the GC locker. In either can we should retry the
986
// allocation attempt in case another thread successfully
987
// performed a collection and reclaimed enough space. We do the
988
// first attempt (without holding the Heap_lock) here and the
989
// follow-on attempt will be at the start of the next loop
990
// iteration (after taking the Heap_lock).
991
result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
992
false /* bot_updates */);
993
if (result != NULL) {
994
return result;
995
}
996
997
// Give a warning if we seem to be looping forever.
998
if ((QueuedAllocationWarningCount > 0) &&
999
(try_count % QueuedAllocationWarningCount == 0)) {
1000
warning("G1CollectedHeap::attempt_allocation_slow() "
1001
"retries %d times", try_count);
1002
}
1003
}
1004
1005
ShouldNotReachHere();
1006
return NULL;
1007
}
1008
1009
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1010
uint* gc_count_before_ret,
1011
uint* gclocker_retry_count_ret) {
1012
// The structure of this method has a lot of similarities to
1013
// attempt_allocation_slow(). The reason these two were not merged
1014
// into a single one is that such a method would require several "if
1015
// allocation is not humongous do this, otherwise do that"
1016
// conditional paths which would obscure its flow. In fact, an early
1017
// version of this code did use a unified method which was harder to
1018
// follow and, as a result, it had subtle bugs that were hard to
1019
// track down. So keeping these two methods separate allows each to
1020
// be more readable. It will be good to keep these two in sync as
1021
// much as possible.
1022
1023
assert_heap_not_locked_and_not_at_safepoint();
1024
assert(isHumongous(word_size), "attempt_allocation_humongous() "
1025
"should only be called for humongous allocations");
1026
1027
// Humongous objects can exhaust the heap quickly, so we should check if we
1028
// need to start a marking cycle at each humongous object allocation. We do
1029
// the check before we do the actual allocation. The reason for doing it
1030
// before the allocation is that we avoid having to keep track of the newly
1031
// allocated memory while we do a GC.
1032
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1033
word_size)) {
1034
collect(GCCause::_g1_humongous_allocation);
1035
}
1036
1037
// We will loop until a) we manage to successfully perform the
1038
// allocation or b) we successfully schedule a collection which
1039
// fails to perform the allocation. b) is the only case when we'll
1040
// return NULL.
1041
HeapWord* result = NULL;
1042
for (int try_count = 1; /* we'll return */; try_count += 1) {
1043
bool should_try_gc;
1044
uint gc_count_before;
1045
1046
{
1047
MutexLockerEx x(Heap_lock);
1048
1049
// Given that humongous objects are not allocated in young
1050
// regions, we'll first try to do the allocation without doing a
1051
// collection hoping that there's enough space in the heap.
1052
result = humongous_obj_allocate(word_size, AllocationContext::current());
1053
if (result != NULL) {
1054
return result;
1055
}
1056
1057
if (GC_locker::is_active_and_needs_gc()) {
1058
should_try_gc = false;
1059
} else {
1060
// The GCLocker may not be active but the GCLocker initiated
1061
// GC may not yet have been performed (GCLocker::needs_gc()
1062
// returns true). In this case we do not try this GC and
1063
// wait until the GCLocker initiated GC is performed, and
1064
// then retry the allocation.
1065
if (GC_locker::needs_gc()) {
1066
should_try_gc = false;
1067
} else {
1068
// Read the GC count while still holding the Heap_lock.
1069
gc_count_before = total_collections();
1070
should_try_gc = true;
1071
}
1072
}
1073
}
1074
1075
if (should_try_gc) {
1076
// If we failed to allocate the humongous object, we should try to
1077
// do a collection pause (if we're allowed) in case it reclaims
1078
// enough space for the allocation to succeed after the pause.
1079
1080
bool succeeded;
1081
result = do_collection_pause(word_size, gc_count_before, &succeeded,
1082
GCCause::_g1_humongous_allocation);
1083
if (result != NULL) {
1084
assert(succeeded, "only way to get back a non-NULL result");
1085
return result;
1086
}
1087
1088
if (succeeded) {
1089
// If we get here we successfully scheduled a collection which
1090
// failed to allocate. No point in trying to allocate
1091
// further. We'll just return NULL.
1092
MutexLockerEx x(Heap_lock);
1093
*gc_count_before_ret = total_collections();
1094
return NULL;
1095
}
1096
} else {
1097
if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1098
MutexLockerEx x(Heap_lock);
1099
*gc_count_before_ret = total_collections();
1100
return NULL;
1101
}
1102
// The GCLocker is either active or the GCLocker initiated
1103
// GC has not yet been performed. Stall until it is and
1104
// then retry the allocation.
1105
GC_locker::stall_until_clear();
1106
(*gclocker_retry_count_ret) += 1;
1107
}
1108
1109
// We can reach here if we were unsuccessful in scheduling a
1110
// collection (because another thread beat us to it) or if we were
1111
// stalled due to the GC locker. In either can we should retry the
1112
// allocation attempt in case another thread successfully
1113
// performed a collection and reclaimed enough space. Give a
1114
// warning if we seem to be looping forever.
1115
1116
if ((QueuedAllocationWarningCount > 0) &&
1117
(try_count % QueuedAllocationWarningCount == 0)) {
1118
warning("G1CollectedHeap::attempt_allocation_humongous() "
1119
"retries %d times", try_count);
1120
}
1121
}
1122
1123
ShouldNotReachHere();
1124
return NULL;
1125
}
1126
1127
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1128
AllocationContext_t context,
1129
bool expect_null_mutator_alloc_region) {
1130
assert_at_safepoint(true /* should_be_vm_thread */);
1131
assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1132
!expect_null_mutator_alloc_region,
1133
"the current alloc region was unexpectedly found to be non-NULL");
1134
1135
if (!isHumongous(word_size)) {
1136
return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1137
false /* bot_updates */);
1138
} else {
1139
HeapWord* result = humongous_obj_allocate(word_size, context);
1140
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1141
g1_policy()->set_initiate_conc_mark_if_possible();
1142
}
1143
return result;
1144
}
1145
1146
ShouldNotReachHere();
1147
}
1148
1149
class PostMCRemSetClearClosure: public HeapRegionClosure {
1150
G1CollectedHeap* _g1h;
1151
ModRefBarrierSet* _mr_bs;
1152
public:
1153
PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1154
_g1h(g1h), _mr_bs(mr_bs) {}
1155
1156
bool doHeapRegion(HeapRegion* r) {
1157
HeapRegionRemSet* hrrs = r->rem_set();
1158
1159
if (r->continuesHumongous()) {
1160
// We'll assert that the strong code root list and RSet is empty
1161
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1162
assert(hrrs->occupied() == 0, "RSet should be empty");
1163
return false;
1164
}
1165
1166
_g1h->reset_gc_time_stamps(r);
1167
hrrs->clear();
1168
// You might think here that we could clear just the cards
1169
// corresponding to the used region. But no: if we leave a dirty card
1170
// in a region we might allocate into, then it would prevent that card
1171
// from being enqueued, and cause it to be missed.
1172
// Re: the performance cost: we shouldn't be doing full GC anyway!
1173
_mr_bs->clear(MemRegion(r->bottom(), r->end()));
1174
1175
return false;
1176
}
1177
};
1178
1179
void G1CollectedHeap::clear_rsets_post_compaction() {
1180
PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1181
heap_region_iterate(&rs_clear);
1182
}
1183
1184
class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1185
G1CollectedHeap* _g1h;
1186
UpdateRSOopClosure _cl;
1187
int _worker_i;
1188
public:
1189
RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1190
_cl(g1->g1_rem_set(), worker_i),
1191
_worker_i(worker_i),
1192
_g1h(g1)
1193
{ }
1194
1195
bool doHeapRegion(HeapRegion* r) {
1196
if (!r->continuesHumongous()) {
1197
_cl.set_from(r);
1198
r->oop_iterate(&_cl);
1199
}
1200
return false;
1201
}
1202
};
1203
1204
class ParRebuildRSTask: public AbstractGangTask {
1205
G1CollectedHeap* _g1;
1206
public:
1207
ParRebuildRSTask(G1CollectedHeap* g1)
1208
: AbstractGangTask("ParRebuildRSTask"),
1209
_g1(g1)
1210
{ }
1211
1212
void work(uint worker_id) {
1213
RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1214
_g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1215
_g1->workers()->active_workers(),
1216
HeapRegion::RebuildRSClaimValue);
1217
}
1218
};
1219
1220
class PostCompactionPrinterClosure: public HeapRegionClosure {
1221
private:
1222
G1HRPrinter* _hr_printer;
1223
public:
1224
bool doHeapRegion(HeapRegion* hr) {
1225
assert(!hr->is_young(), "not expecting to find young regions");
1226
if (hr->is_free()) {
1227
// We only generate output for non-empty regions.
1228
} else if (hr->startsHumongous()) {
1229
if (hr->region_num() == 1) {
1230
// single humongous region
1231
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1232
} else {
1233
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1234
}
1235
} else if (hr->continuesHumongous()) {
1236
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1237
} else if (hr->is_old()) {
1238
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
1239
} else {
1240
ShouldNotReachHere();
1241
}
1242
return false;
1243
}
1244
1245
PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1246
: _hr_printer(hr_printer) { }
1247
};
1248
1249
void G1CollectedHeap::print_hrm_post_compaction() {
1250
PostCompactionPrinterClosure cl(hr_printer());
1251
heap_region_iterate(&cl);
1252
}
1253
1254
bool G1CollectedHeap::do_collection(bool explicit_gc,
1255
bool clear_all_soft_refs,
1256
size_t word_size) {
1257
assert_at_safepoint(true /* should_be_vm_thread */);
1258
1259
if (GC_locker::check_active_before_gc()) {
1260
return false;
1261
}
1262
1263
STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1264
gc_timer->register_gc_start();
1265
1266
SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1267
gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1268
1269
SvcGCMarker sgcm(SvcGCMarker::FULL);
1270
ResourceMark rm;
1271
1272
print_heap_before_gc();
1273
trace_heap_before_gc(gc_tracer);
1274
1275
size_t metadata_prev_used = MetaspaceAux::used_bytes();
1276
1277
verify_region_sets_optional();
1278
1279
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1280
collector_policy()->should_clear_all_soft_refs();
1281
1282
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1283
1284
{
1285
IsGCActiveMark x;
1286
1287
// Timing
1288
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1289
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1290
1291
{
1292
GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1293
TraceCollectorStats tcs(g1mm()->full_collection_counters());
1294
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1295
1296
double start = os::elapsedTime();
1297
g1_policy()->record_full_collection_start();
1298
1299
// Note: When we have a more flexible GC logging framework that
1300
// allows us to add optional attributes to a GC log record we
1301
// could consider timing and reporting how long we wait in the
1302
// following two methods.
1303
wait_while_free_regions_coming();
1304
// If we start the compaction before the CM threads finish
1305
// scanning the root regions we might trip them over as we'll
1306
// be moving objects / updating references. So let's wait until
1307
// they are done. By telling them to abort, they should complete
1308
// early.
1309
_cm->root_regions()->abort();
1310
_cm->root_regions()->wait_until_scan_finished();
1311
append_secondary_free_list_if_not_empty_with_lock();
1312
1313
gc_prologue(true);
1314
increment_total_collections(true /* full gc */);
1315
increment_old_marking_cycles_started();
1316
1317
assert(used() == recalculate_used(), "Should be equal");
1318
1319
verify_before_gc();
1320
1321
check_bitmaps("Full GC Start");
1322
pre_full_gc_dump(gc_timer);
1323
1324
COMPILER2_PRESENT(DerivedPointerTable::clear());
1325
1326
// Disable discovery and empty the discovered lists
1327
// for the CM ref processor.
1328
ref_processor_cm()->disable_discovery();
1329
ref_processor_cm()->abandon_partial_discovery();
1330
ref_processor_cm()->verify_no_references_recorded();
1331
1332
// Abandon current iterations of concurrent marking and concurrent
1333
// refinement, if any are in progress. We have to do this before
1334
// wait_until_scan_finished() below.
1335
concurrent_mark()->abort();
1336
1337
// Make sure we'll choose a new allocation region afterwards.
1338
_allocator->release_mutator_alloc_region();
1339
_allocator->abandon_gc_alloc_regions();
1340
g1_rem_set()->cleanupHRRS();
1341
1342
// We should call this after we retire any currently active alloc
1343
// regions so that all the ALLOC / RETIRE events are generated
1344
// before the start GC event.
1345
_hr_printer.start_gc(true /* full */, (size_t) total_collections());
1346
1347
// We may have added regions to the current incremental collection
1348
// set between the last GC or pause and now. We need to clear the
1349
// incremental collection set and then start rebuilding it afresh
1350
// after this full GC.
1351
abandon_collection_set(g1_policy()->inc_cset_head());
1352
g1_policy()->clear_incremental_cset();
1353
g1_policy()->stop_incremental_cset_building();
1354
1355
tear_down_region_sets(false /* free_list_only */);
1356
g1_policy()->set_gcs_are_young(true);
1357
1358
// See the comments in g1CollectedHeap.hpp and
1359
// G1CollectedHeap::ref_processing_init() about
1360
// how reference processing currently works in G1.
1361
1362
// Temporarily make discovery by the STW ref processor single threaded (non-MT).
1363
ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1364
1365
// Temporarily clear the STW ref processor's _is_alive_non_header field.
1366
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1367
1368
ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1369
ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1370
1371
// Do collection work
1372
{
1373
HandleMark hm; // Discard invalid handles created during gc
1374
G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1375
}
1376
1377
assert(num_free_regions() == 0, "we should not have added any free regions");
1378
rebuild_region_sets(false /* free_list_only */);
1379
1380
// Enqueue any discovered reference objects that have
1381
// not been removed from the discovered lists.
1382
ref_processor_stw()->enqueue_discovered_references();
1383
1384
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1385
1386
MemoryService::track_memory_usage();
1387
1388
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1389
ref_processor_stw()->verify_no_references_recorded();
1390
1391
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
1392
ClassLoaderDataGraph::purge();
1393
MetaspaceAux::verify_metrics();
1394
1395
// Note: since we've just done a full GC, concurrent
1396
// marking is no longer active. Therefore we need not
1397
// re-enable reference discovery for the CM ref processor.
1398
// That will be done at the start of the next marking cycle.
1399
assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1400
ref_processor_cm()->verify_no_references_recorded();
1401
1402
reset_gc_time_stamp();
1403
// Since everything potentially moved, we will clear all remembered
1404
// sets, and clear all cards. Later we will rebuild remembered
1405
// sets. We will also reset the GC time stamps of the regions.
1406
clear_rsets_post_compaction();
1407
check_gc_time_stamps();
1408
1409
// Resize the heap if necessary.
1410
resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1411
1412
if (_hr_printer.is_active()) {
1413
// We should do this after we potentially resize the heap so
1414
// that all the COMMIT / UNCOMMIT events are generated before
1415
// the end GC event.
1416
1417
print_hrm_post_compaction();
1418
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
1419
}
1420
1421
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1422
if (hot_card_cache->use_cache()) {
1423
hot_card_cache->reset_card_counts();
1424
hot_card_cache->reset_hot_cache();
1425
}
1426
1427
// Rebuild remembered sets of all regions.
1428
if (G1CollectedHeap::use_parallel_gc_threads()) {
1429
uint n_workers =
1430
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1431
workers()->active_workers(),
1432
Threads::number_of_non_daemon_threads());
1433
assert(UseDynamicNumberOfGCThreads ||
1434
n_workers == workers()->total_workers(),
1435
"If not dynamic should be using all the workers");
1436
workers()->set_active_workers(n_workers);
1437
// Set parallel threads in the heap (_n_par_threads) only
1438
// before a parallel phase and always reset it to 0 after
1439
// the phase so that the number of parallel threads does
1440
// no get carried forward to a serial phase where there
1441
// may be code that is "possibly_parallel".
1442
set_par_threads(n_workers);
1443
1444
ParRebuildRSTask rebuild_rs_task(this);
1445
assert(check_heap_region_claim_values(
1446
HeapRegion::InitialClaimValue), "sanity check");
1447
assert(UseDynamicNumberOfGCThreads ||
1448
workers()->active_workers() == workers()->total_workers(),
1449
"Unless dynamic should use total workers");
1450
// Use the most recent number of active workers
1451
assert(workers()->active_workers() > 0,
1452
"Active workers not properly set");
1453
set_par_threads(workers()->active_workers());
1454
workers()->run_task(&rebuild_rs_task);
1455
set_par_threads(0);
1456
assert(check_heap_region_claim_values(
1457
HeapRegion::RebuildRSClaimValue), "sanity check");
1458
reset_heap_region_claim_values();
1459
} else {
1460
RebuildRSOutOfRegionClosure rebuild_rs(this);
1461
heap_region_iterate(&rebuild_rs);
1462
}
1463
1464
// Rebuild the strong code root lists for each region
1465
rebuild_strong_code_roots();
1466
1467
// Purge code root memory
1468
purge_code_root_memory();
1469
1470
if (true) { // FIXME
1471
MetaspaceGC::compute_new_size();
1472
}
1473
1474
#ifdef TRACESPINNING
1475
ParallelTaskTerminator::print_termination_counts();
1476
#endif
1477
1478
// Discard all rset updates
1479
JavaThread::dirty_card_queue_set().abandon_logs();
1480
assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1481
1482
_young_list->reset_sampled_info();
1483
// At this point there should be no regions in the
1484
// entire heap tagged as young.
1485
assert(check_young_list_empty(true /* check_heap */),
1486
"young list should be empty at this point");
1487
1488
// Update the number of full collections that have been completed.
1489
increment_old_marking_cycles_completed(false /* concurrent */);
1490
1491
_hrm.verify_optional();
1492
verify_region_sets_optional();
1493
1494
verify_after_gc();
1495
1496
// Clear the previous marking bitmap, if needed for bitmap verification.
1497
// Note we cannot do this when we clear the next marking bitmap in
1498
// ConcurrentMark::abort() above since VerifyDuringGC verifies the
1499
// objects marked during a full GC against the previous bitmap.
1500
// But we need to clear it before calling check_bitmaps below since
1501
// the full GC has compacted objects and updated TAMS but not updated
1502
// the prev bitmap.
1503
if (G1VerifyBitmaps) {
1504
((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1505
}
1506
check_bitmaps("Full GC End");
1507
1508
// Start a new incremental collection set for the next pause
1509
assert(g1_policy()->collection_set() == NULL, "must be");
1510
g1_policy()->start_incremental_cset_building();
1511
1512
clear_cset_fast_test();
1513
1514
_allocator->init_mutator_alloc_region();
1515
1516
double end = os::elapsedTime();
1517
g1_policy()->record_full_collection_end();
1518
1519
if (G1Log::fine()) {
1520
g1_policy()->print_heap_transition();
1521
}
1522
1523
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
1524
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1525
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1526
// before any GC notifications are raised.
1527
g1mm()->update_sizes();
1528
1529
gc_epilogue(true);
1530
}
1531
1532
if (G1Log::finer()) {
1533
g1_policy()->print_detailed_heap_transition(true /* full */);
1534
}
1535
1536
print_heap_after_gc();
1537
trace_heap_after_gc(gc_tracer);
1538
1539
post_full_gc_dump(gc_timer);
1540
1541
gc_timer->register_gc_end();
1542
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1543
}
1544
1545
return true;
1546
}
1547
1548
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1549
// do_collection() will return whether it succeeded in performing
1550
// the GC. Currently, there is no facility on the
1551
// do_full_collection() API to notify the caller than the collection
1552
// did not succeed (e.g., because it was locked out by the GC
1553
// locker). So, right now, we'll ignore the return value.
1554
bool dummy = do_collection(true, /* explicit_gc */
1555
clear_all_soft_refs,
1556
0 /* word_size */);
1557
}
1558
1559
// This code is mostly copied from TenuredGeneration.
1560
void
1561
G1CollectedHeap::
1562
resize_if_necessary_after_full_collection(size_t word_size) {
1563
// Include the current allocation, if any, and bytes that will be
1564
// pre-allocated to support collections, as "used".
1565
const size_t used_after_gc = used();
1566
const size_t capacity_after_gc = capacity();
1567
const size_t free_after_gc = capacity_after_gc - used_after_gc;
1568
1569
// This is enforced in arguments.cpp.
1570
assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1571
"otherwise the code below doesn't make sense");
1572
1573
// We don't have floating point command-line arguments
1574
const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1575
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1576
const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1577
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1578
1579
const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1580
const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1581
1582
// We have to be careful here as these two calculations can overflow
1583
// 32-bit size_t's.
1584
double used_after_gc_d = (double) used_after_gc;
1585
double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1586
double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1587
1588
// Let's make sure that they are both under the max heap size, which
1589
// by default will make them fit into a size_t.
1590
double desired_capacity_upper_bound = (double) max_heap_size;
1591
minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1592
desired_capacity_upper_bound);
1593
maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1594
desired_capacity_upper_bound);
1595
1596
// We can now safely turn them into size_t's.
1597
size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1598
size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1599
1600
// This assert only makes sense here, before we adjust them
1601
// with respect to the min and max heap size.
1602
assert(minimum_desired_capacity <= maximum_desired_capacity,
1603
err_msg("minimum_desired_capacity = " SIZE_FORMAT ", "
1604
"maximum_desired_capacity = " SIZE_FORMAT,
1605
minimum_desired_capacity, maximum_desired_capacity));
1606
1607
// Should not be greater than the heap max size. No need to adjust
1608
// it with respect to the heap min size as it's a lower bound (i.e.,
1609
// we'll try to make the capacity larger than it, not smaller).
1610
minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1611
// Should not be less than the heap min size. No need to adjust it
1612
// with respect to the heap max size as it's an upper bound (i.e.,
1613
// we'll try to make the capacity smaller than it, not greater).
1614
maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1615
1616
if (capacity_after_gc < minimum_desired_capacity) {
1617
// Don't expand unless it's significant
1618
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1619
ergo_verbose4(ErgoHeapSizing,
1620
"attempt heap expansion",
1621
ergo_format_reason("capacity lower than "
1622
"min desired capacity after Full GC")
1623
ergo_format_byte("capacity")
1624
ergo_format_byte("occupancy")
1625
ergo_format_byte_perc("min desired capacity"),
1626
capacity_after_gc, used_after_gc,
1627
minimum_desired_capacity, (double) MinHeapFreeRatio);
1628
expand(expand_bytes);
1629
1630
// No expansion, now see if we want to shrink
1631
} else if (capacity_after_gc > maximum_desired_capacity) {
1632
// Capacity too large, compute shrinking size
1633
size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1634
ergo_verbose4(ErgoHeapSizing,
1635
"attempt heap shrinking",
1636
ergo_format_reason("capacity higher than "
1637
"max desired capacity after Full GC")
1638
ergo_format_byte("capacity")
1639
ergo_format_byte("occupancy")
1640
ergo_format_byte_perc("max desired capacity"),
1641
capacity_after_gc, used_after_gc,
1642
maximum_desired_capacity, (double) MaxHeapFreeRatio);
1643
shrink(shrink_bytes);
1644
}
1645
}
1646
1647
1648
HeapWord*
1649
G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1650
AllocationContext_t context,
1651
bool* succeeded) {
1652
assert_at_safepoint(true /* should_be_vm_thread */);
1653
1654
*succeeded = true;
1655
// Let's attempt the allocation first.
1656
HeapWord* result =
1657
attempt_allocation_at_safepoint(word_size,
1658
context,
1659
false /* expect_null_mutator_alloc_region */);
1660
if (result != NULL) {
1661
assert(*succeeded, "sanity");
1662
return result;
1663
}
1664
1665
// In a G1 heap, we're supposed to keep allocation from failing by
1666
// incremental pauses. Therefore, at least for now, we'll favor
1667
// expansion over collection. (This might change in the future if we can
1668
// do something smarter than full collection to satisfy a failed alloc.)
1669
result = expand_and_allocate(word_size, context);
1670
if (result != NULL) {
1671
assert(*succeeded, "sanity");
1672
return result;
1673
}
1674
1675
// Expansion didn't work, we'll try to do a Full GC.
1676
bool gc_succeeded = do_collection(false, /* explicit_gc */
1677
false, /* clear_all_soft_refs */
1678
word_size);
1679
if (!gc_succeeded) {
1680
*succeeded = false;
1681
return NULL;
1682
}
1683
1684
// Retry the allocation
1685
result = attempt_allocation_at_safepoint(word_size,
1686
context,
1687
true /* expect_null_mutator_alloc_region */);
1688
if (result != NULL) {
1689
assert(*succeeded, "sanity");
1690
return result;
1691
}
1692
1693
// Then, try a Full GC that will collect all soft references.
1694
gc_succeeded = do_collection(false, /* explicit_gc */
1695
true, /* clear_all_soft_refs */
1696
word_size);
1697
if (!gc_succeeded) {
1698
*succeeded = false;
1699
return NULL;
1700
}
1701
1702
// Retry the allocation once more
1703
result = attempt_allocation_at_safepoint(word_size,
1704
context,
1705
true /* expect_null_mutator_alloc_region */);
1706
if (result != NULL) {
1707
assert(*succeeded, "sanity");
1708
return result;
1709
}
1710
1711
assert(!collector_policy()->should_clear_all_soft_refs(),
1712
"Flag should have been handled and cleared prior to this point");
1713
1714
// What else? We might try synchronous finalization later. If the total
1715
// space available is large enough for the allocation, then a more
1716
// complete compaction phase than we've tried so far might be
1717
// appropriate.
1718
assert(*succeeded, "sanity");
1719
return NULL;
1720
}
1721
1722
// Attempting to expand the heap sufficiently
1723
// to support an allocation of the given "word_size". If
1724
// successful, perform the allocation and return the address of the
1725
// allocated block, or else "NULL".
1726
1727
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1728
assert_at_safepoint(true /* should_be_vm_thread */);
1729
1730
verify_region_sets_optional();
1731
1732
size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1733
ergo_verbose1(ErgoHeapSizing,
1734
"attempt heap expansion",
1735
ergo_format_reason("allocation request failed")
1736
ergo_format_byte("allocation request"),
1737
word_size * HeapWordSize);
1738
if (expand(expand_bytes)) {
1739
_hrm.verify_optional();
1740
verify_region_sets_optional();
1741
return attempt_allocation_at_safepoint(word_size,
1742
context,
1743
false /* expect_null_mutator_alloc_region */);
1744
}
1745
return NULL;
1746
}
1747
1748
bool G1CollectedHeap::expand(size_t expand_bytes) {
1749
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1750
aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1751
HeapRegion::GrainBytes);
1752
ergo_verbose2(ErgoHeapSizing,
1753
"expand the heap",
1754
ergo_format_byte("requested expansion amount")
1755
ergo_format_byte("attempted expansion amount"),
1756
expand_bytes, aligned_expand_bytes);
1757
1758
if (is_maximal_no_gc()) {
1759
ergo_verbose0(ErgoHeapSizing,
1760
"did not expand the heap",
1761
ergo_format_reason("heap already fully expanded"));
1762
return false;
1763
}
1764
1765
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1766
assert(regions_to_expand > 0, "Must expand by at least one region");
1767
1768
uint expanded_by = _hrm.expand_by(regions_to_expand);
1769
1770
if (expanded_by > 0) {
1771
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1772
assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1773
g1_policy()->record_new_heap_size(num_regions());
1774
} else {
1775
ergo_verbose0(ErgoHeapSizing,
1776
"did not expand the heap",
1777
ergo_format_reason("heap expansion operation failed"));
1778
// The expansion of the virtual storage space was unsuccessful.
1779
// Let's see if it was because we ran out of swap.
1780
if (G1ExitOnExpansionFailure &&
1781
_hrm.available() >= regions_to_expand) {
1782
// We had head room...
1783
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1784
}
1785
}
1786
return regions_to_expand > 0;
1787
}
1788
1789
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1790
size_t aligned_shrink_bytes =
1791
ReservedSpace::page_align_size_down(shrink_bytes);
1792
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1793
HeapRegion::GrainBytes);
1794
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1795
1796
uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1797
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1798
1799
ergo_verbose3(ErgoHeapSizing,
1800
"shrink the heap",
1801
ergo_format_byte("requested shrinking amount")
1802
ergo_format_byte("aligned shrinking amount")
1803
ergo_format_byte("attempted shrinking amount"),
1804
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1805
if (num_regions_removed > 0) {
1806
g1_policy()->record_new_heap_size(num_regions());
1807
} else {
1808
ergo_verbose0(ErgoHeapSizing,
1809
"did not shrink the heap",
1810
ergo_format_reason("heap shrinking operation failed"));
1811
}
1812
}
1813
1814
void G1CollectedHeap::shrink(size_t shrink_bytes) {
1815
verify_region_sets_optional();
1816
1817
// We should only reach here at the end of a Full GC which means we
1818
// should not not be holding to any GC alloc regions. The method
1819
// below will make sure of that and do any remaining clean up.
1820
_allocator->abandon_gc_alloc_regions();
1821
1822
// Instead of tearing down / rebuilding the free lists here, we
1823
// could instead use the remove_all_pending() method on free_list to
1824
// remove only the ones that we need to remove.
1825
tear_down_region_sets(true /* free_list_only */);
1826
shrink_helper(shrink_bytes);
1827
rebuild_region_sets(true /* free_list_only */);
1828
1829
_hrm.verify_optional();
1830
verify_region_sets_optional();
1831
}
1832
1833
// Public methods.
1834
1835
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1836
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1837
#endif // _MSC_VER
1838
1839
1840
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1841
SharedHeap(policy_),
1842
_g1_policy(policy_),
1843
_dirty_card_queue_set(false),
1844
_into_cset_dirty_card_queue_set(false),
1845
_is_alive_closure_cm(this),
1846
_is_alive_closure_stw(this),
1847
_ref_processor_cm(NULL),
1848
_ref_processor_stw(NULL),
1849
_bot_shared(NULL),
1850
_evac_failure_scan_stack(NULL),
1851
_mark_in_progress(false),
1852
_cg1r(NULL),
1853
_g1mm(NULL),
1854
_refine_cte_cl(NULL),
1855
_full_collection(false),
1856
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1857
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1858
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1859
_humongous_reclaim_candidates(),
1860
_has_humongous_reclaim_candidates(false),
1861
_free_regions_coming(false),
1862
_young_list(new YoungList(this)),
1863
_gc_time_stamp(0),
1864
_survivor_plab_stats(YoungPLABSize, PLABWeight),
1865
_old_plab_stats(OldPLABSize, PLABWeight),
1866
_expand_heap_after_alloc_failure(true),
1867
_surviving_young_words(NULL),
1868
_old_marking_cycles_started(0),
1869
_old_marking_cycles_completed(0),
1870
_concurrent_cycle_started(false),
1871
_heap_summary_sent(false),
1872
_in_cset_fast_test(),
1873
_dirty_cards_region_list(NULL),
1874
_worker_cset_start_region(NULL),
1875
_worker_cset_start_region_time_stamp(NULL),
1876
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1877
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1878
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1879
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1880
1881
_g1h = this;
1882
1883
_allocator = G1Allocator::create_allocator(_g1h);
1884
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1885
1886
int n_queues = MAX2((int)ParallelGCThreads, 1);
1887
_task_queues = new RefToScanQueueSet(n_queues);
1888
1889
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1890
assert(n_rem_sets > 0, "Invariant.");
1891
1892
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1893
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1894
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1895
1896
for (int i = 0; i < n_queues; i++) {
1897
RefToScanQueue* q = new RefToScanQueue();
1898
q->initialize();
1899
_task_queues->register_queue(i, q);
1900
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1901
}
1902
clear_cset_start_regions();
1903
1904
// Initialize the G1EvacuationFailureALot counters and flags.
1905
NOT_PRODUCT(reset_evacuation_should_fail();)
1906
1907
guarantee(_task_queues != NULL, "task_queues allocation failure.");
1908
}
1909
1910
G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1911
size_t size,
1912
size_t translation_factor) {
1913
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1914
// Allocate a new reserved space, preferring to use large pages.
1915
ReservedSpace rs(size, preferred_page_size);
1916
G1RegionToSpaceMapper* result =
1917
G1RegionToSpaceMapper::create_mapper(rs,
1918
size,
1919
rs.alignment(),
1920
HeapRegion::GrainBytes,
1921
translation_factor,
1922
mtGC);
1923
if (TracePageSizes) {
1924
gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1925
description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1926
}
1927
return result;
1928
}
1929
1930
jint G1CollectedHeap::initialize() {
1931
CollectedHeap::pre_initialize();
1932
os::enable_vtime();
1933
1934
G1Log::init();
1935
1936
// Necessary to satisfy locking discipline assertions.
1937
1938
MutexLocker x(Heap_lock);
1939
1940
// We have to initialize the printer before committing the heap, as
1941
// it will be used then.
1942
_hr_printer.set_active(G1PrintHeapRegions);
1943
1944
// While there are no constraints in the GC code that HeapWordSize
1945
// be any particular value, there are multiple other areas in the
1946
// system which believe this to be true (e.g. oop->object_size in some
1947
// cases incorrectly returns the size in wordSize units rather than
1948
// HeapWordSize).
1949
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1950
1951
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1952
size_t max_byte_size = collector_policy()->max_heap_byte_size();
1953
size_t heap_alignment = collector_policy()->heap_alignment();
1954
1955
// Ensure that the sizes are properly aligned.
1956
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1957
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1958
Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1959
1960
_refine_cte_cl = new RefineCardTableEntryClosure();
1961
1962
_cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
1963
1964
// Reserve the maximum.
1965
1966
// When compressed oops are enabled, the preferred heap base
1967
// is calculated by subtracting the requested size from the
1968
// 32Gb boundary and using the result as the base address for
1969
// heap reservation. If the requested size is not aligned to
1970
// HeapRegion::GrainBytes (i.e. the alignment that is passed
1971
// into the ReservedHeapSpace constructor) then the actual
1972
// base of the reserved heap may end up differing from the
1973
// address that was requested (i.e. the preferred heap base).
1974
// If this happens then we could end up using a non-optimal
1975
// compressed oops mode.
1976
1977
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1978
heap_alignment);
1979
1980
// It is important to do this in a way such that concurrent readers can't
1981
// temporarily think something is in the heap. (I've actually seen this
1982
// happen in asserts: DLD.)
1983
_reserved.set_word_size(0);
1984
_reserved.set_start((HeapWord*)heap_rs.base());
1985
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1986
1987
// Create the gen rem set (and barrier set) for the entire reserved region.
1988
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
1989
set_barrier_set(rem_set()->bs());
1990
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
1991
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
1992
return JNI_ENOMEM;
1993
}
1994
1995
// Also create a G1 rem set.
1996
_g1_rem_set = new G1RemSet(this, g1_barrier_set());
1997
1998
// Carve out the G1 part of the heap.
1999
2000
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2001
G1RegionToSpaceMapper* heap_storage =
2002
G1RegionToSpaceMapper::create_mapper(g1_rs,
2003
g1_rs.size(),
2004
UseLargePages ? os::large_page_size() : os::vm_page_size(),
2005
HeapRegion::GrainBytes,
2006
1,
2007
mtJavaHeap);
2008
heap_storage->set_mapping_changed_listener(&_listener);
2009
2010
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
2011
G1RegionToSpaceMapper* bot_storage =
2012
create_aux_memory_mapper("Block offset table",
2013
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
2014
G1BlockOffsetSharedArray::N_bytes);
2015
2016
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
2017
G1RegionToSpaceMapper* cardtable_storage =
2018
create_aux_memory_mapper("Card table",
2019
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
2020
G1BlockOffsetSharedArray::N_bytes);
2021
2022
G1RegionToSpaceMapper* card_counts_storage =
2023
create_aux_memory_mapper("Card counts table",
2024
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
2025
G1BlockOffsetSharedArray::N_bytes);
2026
2027
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
2028
G1RegionToSpaceMapper* prev_bitmap_storage =
2029
create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
2030
G1RegionToSpaceMapper* next_bitmap_storage =
2031
create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
2032
2033
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
2034
g1_barrier_set()->initialize(cardtable_storage);
2035
// Do later initialization work for concurrent refinement.
2036
_cg1r->init(card_counts_storage);
2037
2038
// 6843694 - ensure that the maximum region index can fit
2039
// in the remembered set structures.
2040
const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2041
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2042
2043
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2044
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2045
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2046
"too many cards per region");
2047
2048
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2049
2050
_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2051
2052
_g1h = this;
2053
2054
{
2055
HeapWord* start = _hrm.reserved().start();
2056
HeapWord* end = _hrm.reserved().end();
2057
size_t granularity = HeapRegion::GrainBytes;
2058
2059
_in_cset_fast_test.initialize(start, end, granularity);
2060
_humongous_reclaim_candidates.initialize(start, end, granularity);
2061
}
2062
2063
// Create the ConcurrentMark data structure and thread.
2064
// (Must do this late, so that "max_regions" is defined.)
2065
_cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2066
if (_cm == NULL || !_cm->completed_initialization()) {
2067
vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2068
return JNI_ENOMEM;
2069
}
2070
_cmThread = _cm->cmThread();
2071
2072
// Initialize the from_card cache structure of HeapRegionRemSet.
2073
HeapRegionRemSet::init_heap(max_regions());
2074
2075
// Now expand into the initial heap size.
2076
if (!expand(init_byte_size)) {
2077
vm_shutdown_during_initialization("Failed to allocate initial heap.");
2078
return JNI_ENOMEM;
2079
}
2080
2081
// Perform any initialization actions delegated to the policy.
2082
g1_policy()->init();
2083
2084
JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2085
SATB_Q_FL_lock,
2086
G1SATBProcessCompletedThreshold,
2087
Shared_SATB_Q_lock);
2088
2089
JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
2090
DirtyCardQ_CBL_mon,
2091
DirtyCardQ_FL_lock,
2092
concurrent_g1_refine()->yellow_zone(),
2093
concurrent_g1_refine()->red_zone(),
2094
Shared_DirtyCardQ_lock);
2095
2096
dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
2097
DirtyCardQ_CBL_mon,
2098
DirtyCardQ_FL_lock,
2099
-1, // never trigger processing
2100
-1, // no limit on length
2101
Shared_DirtyCardQ_lock,
2102
&JavaThread::dirty_card_queue_set());
2103
2104
// Initialize the card queue set used to hold cards containing
2105
// references into the collection set.
2106
_into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2107
DirtyCardQ_CBL_mon,
2108
DirtyCardQ_FL_lock,
2109
-1, // never trigger processing
2110
-1, // no limit on length
2111
Shared_DirtyCardQ_lock,
2112
&JavaThread::dirty_card_queue_set());
2113
2114
// In case we're keeping closure specialization stats, initialize those
2115
// counts and that mechanism.
2116
SpecializationStats::clear();
2117
2118
// Here we allocate the dummy HeapRegion that is required by the
2119
// G1AllocRegion class.
2120
HeapRegion* dummy_region = _hrm.get_dummy_region();
2121
2122
// We'll re-use the same region whether the alloc region will
2123
// require BOT updates or not and, if it doesn't, then a non-young
2124
// region will complain that it cannot support allocations without
2125
// BOT updates. So we'll tag the dummy region as eden to avoid that.
2126
dummy_region->set_eden();
2127
// Make sure it's full.
2128
dummy_region->set_top(dummy_region->end());
2129
G1AllocRegion::setup(this, dummy_region);
2130
2131
_allocator->init_mutator_alloc_region();
2132
2133
// Do create of the monitoring and management support so that
2134
// values in the heap have been properly initialized.
2135
_g1mm = new G1MonitoringSupport(this);
2136
2137
G1StringDedup::initialize();
2138
2139
return JNI_OK;
2140
}
2141
2142
void G1CollectedHeap::stop() {
2143
// Stop all concurrent threads. We do this to make sure these threads
2144
// do not continue to execute and access resources (e.g. gclog_or_tty)
2145
// that are destroyed during shutdown.
2146
_cg1r->stop();
2147
_cmThread->stop();
2148
if (G1StringDedup::is_enabled()) {
2149
G1StringDedup::stop();
2150
}
2151
}
2152
2153
size_t G1CollectedHeap::conservative_max_heap_alignment() {
2154
return HeapRegion::max_region_size();
2155
}
2156
2157
void G1CollectedHeap::ref_processing_init() {
2158
// Reference processing in G1 currently works as follows:
2159
//
2160
// * There are two reference processor instances. One is
2161
// used to record and process discovered references
2162
// during concurrent marking; the other is used to
2163
// record and process references during STW pauses
2164
// (both full and incremental).
2165
// * Both ref processors need to 'span' the entire heap as
2166
// the regions in the collection set may be dotted around.
2167
//
2168
// * For the concurrent marking ref processor:
2169
// * Reference discovery is enabled at initial marking.
2170
// * Reference discovery is disabled and the discovered
2171
// references processed etc during remarking.
2172
// * Reference discovery is MT (see below).
2173
// * Reference discovery requires a barrier (see below).
2174
// * Reference processing may or may not be MT
2175
// (depending on the value of ParallelRefProcEnabled
2176
// and ParallelGCThreads).
2177
// * A full GC disables reference discovery by the CM
2178
// ref processor and abandons any entries on it's
2179
// discovered lists.
2180
//
2181
// * For the STW processor:
2182
// * Non MT discovery is enabled at the start of a full GC.
2183
// * Processing and enqueueing during a full GC is non-MT.
2184
// * During a full GC, references are processed after marking.
2185
//
2186
// * Discovery (may or may not be MT) is enabled at the start
2187
// of an incremental evacuation pause.
2188
// * References are processed near the end of a STW evacuation pause.
2189
// * For both types of GC:
2190
// * Discovery is atomic - i.e. not concurrent.
2191
// * Reference discovery will not need a barrier.
2192
2193
SharedHeap::ref_processing_init();
2194
MemRegion mr = reserved_region();
2195
2196
// Concurrent Mark ref processor
2197
_ref_processor_cm =
2198
new ReferenceProcessor(mr, // span
2199
ParallelRefProcEnabled && (ParallelGCThreads > 1),
2200
// mt processing
2201
(int) ParallelGCThreads,
2202
// degree of mt processing
2203
(ParallelGCThreads > 1) || (ConcGCThreads > 1),
2204
// mt discovery
2205
(int) MAX2(ParallelGCThreads, ConcGCThreads),
2206
// degree of mt discovery
2207
false,
2208
// Reference discovery is not atomic
2209
&_is_alive_closure_cm);
2210
// is alive closure
2211
// (for efficiency/performance)
2212
2213
// STW ref processor
2214
_ref_processor_stw =
2215
new ReferenceProcessor(mr, // span
2216
ParallelRefProcEnabled && (ParallelGCThreads > 1),
2217
// mt processing
2218
MAX2((int)ParallelGCThreads, 1),
2219
// degree of mt processing
2220
(ParallelGCThreads > 1),
2221
// mt discovery
2222
MAX2((int)ParallelGCThreads, 1),
2223
// degree of mt discovery
2224
true,
2225
// Reference discovery is atomic
2226
&_is_alive_closure_stw);
2227
// is alive closure
2228
// (for efficiency/performance)
2229
}
2230
2231
size_t G1CollectedHeap::capacity() const {
2232
return _hrm.length() * HeapRegion::GrainBytes;
2233
}
2234
2235
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2236
assert(!hr->continuesHumongous(), "pre-condition");
2237
hr->reset_gc_time_stamp();
2238
if (hr->startsHumongous()) {
2239
uint first_index = hr->hrm_index() + 1;
2240
uint last_index = hr->last_hc_index();
2241
for (uint i = first_index; i < last_index; i += 1) {
2242
HeapRegion* chr = region_at(i);
2243
assert(chr->continuesHumongous(), "sanity");
2244
chr->reset_gc_time_stamp();
2245
}
2246
}
2247
}
2248
2249
#ifndef PRODUCT
2250
class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2251
private:
2252
unsigned _gc_time_stamp;
2253
bool _failures;
2254
2255
public:
2256
CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2257
_gc_time_stamp(gc_time_stamp), _failures(false) { }
2258
2259
virtual bool doHeapRegion(HeapRegion* hr) {
2260
unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2261
if (_gc_time_stamp != region_gc_time_stamp) {
2262
gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2263
"expected %d", HR_FORMAT_PARAMS(hr),
2264
region_gc_time_stamp, _gc_time_stamp);
2265
_failures = true;
2266
}
2267
return false;
2268
}
2269
2270
bool failures() { return _failures; }
2271
};
2272
2273
void G1CollectedHeap::check_gc_time_stamps() {
2274
CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2275
heap_region_iterate(&cl);
2276
guarantee(!cl.failures(), "all GC time stamps should have been reset");
2277
}
2278
#endif // PRODUCT
2279
2280
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2281
DirtyCardQueue* into_cset_dcq,
2282
bool concurrent,
2283
uint worker_i) {
2284
// Clean cards in the hot card cache
2285
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2286
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2287
2288
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2289
size_t n_completed_buffers = 0;
2290
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2291
n_completed_buffers++;
2292
}
2293
g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2294
dcqs.clear_n_completed_buffers();
2295
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2296
}
2297
2298
2299
// Computes the sum of the storage used by the various regions.
2300
size_t G1CollectedHeap::used() const {
2301
return _allocator->used();
2302
}
2303
2304
size_t G1CollectedHeap::used_unlocked() const {
2305
return _allocator->used_unlocked();
2306
}
2307
2308
class SumUsedClosure: public HeapRegionClosure {
2309
size_t _used;
2310
public:
2311
SumUsedClosure() : _used(0) {}
2312
bool doHeapRegion(HeapRegion* r) {
2313
if (!r->continuesHumongous()) {
2314
_used += r->used();
2315
}
2316
return false;
2317
}
2318
size_t result() { return _used; }
2319
};
2320
2321
size_t G1CollectedHeap::recalculate_used() const {
2322
double recalculate_used_start = os::elapsedTime();
2323
2324
SumUsedClosure blk;
2325
heap_region_iterate(&blk);
2326
2327
g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2328
return blk.result();
2329
}
2330
2331
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2332
switch (cause) {
2333
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2334
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2335
case GCCause::_g1_humongous_allocation: return true;
2336
case GCCause::_update_allocation_context_stats_inc: return true;
2337
case GCCause::_wb_conc_mark: return true;
2338
default: return false;
2339
}
2340
}
2341
2342
#ifndef PRODUCT
2343
void G1CollectedHeap::allocate_dummy_regions() {
2344
// Let's fill up most of the region
2345
size_t word_size = HeapRegion::GrainWords - 1024;
2346
// And as a result the region we'll allocate will be humongous.
2347
guarantee(isHumongous(word_size), "sanity");
2348
2349
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2350
// Let's use the existing mechanism for the allocation
2351
HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2352
AllocationContext::system());
2353
if (dummy_obj != NULL) {
2354
MemRegion mr(dummy_obj, word_size);
2355
CollectedHeap::fill_with_object(mr);
2356
} else {
2357
// If we can't allocate once, we probably cannot allocate
2358
// again. Let's get out of the loop.
2359
break;
2360
}
2361
}
2362
}
2363
#endif // !PRODUCT
2364
2365
void G1CollectedHeap::increment_old_marking_cycles_started() {
2366
assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2367
_old_marking_cycles_started == _old_marking_cycles_completed + 1,
2368
err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2369
_old_marking_cycles_started, _old_marking_cycles_completed));
2370
2371
_old_marking_cycles_started++;
2372
}
2373
2374
void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2375
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2376
2377
// We assume that if concurrent == true, then the caller is a
2378
// concurrent thread that was joined the Suspendible Thread
2379
// Set. If there's ever a cheap way to check this, we should add an
2380
// assert here.
2381
2382
// Given that this method is called at the end of a Full GC or of a
2383
// concurrent cycle, and those can be nested (i.e., a Full GC can
2384
// interrupt a concurrent cycle), the number of full collections
2385
// completed should be either one (in the case where there was no
2386
// nesting) or two (when a Full GC interrupted a concurrent cycle)
2387
// behind the number of full collections started.
2388
2389
// This is the case for the inner caller, i.e. a Full GC.
2390
assert(concurrent ||
2391
(_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2392
(_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2393
err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2394
"is inconsistent with _old_marking_cycles_completed = %u",
2395
_old_marking_cycles_started, _old_marking_cycles_completed));
2396
2397
// This is the case for the outer caller, i.e. the concurrent cycle.
2398
assert(!concurrent ||
2399
(_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2400
err_msg("for outer caller (concurrent cycle): "
2401
"_old_marking_cycles_started = %u "
2402
"is inconsistent with _old_marking_cycles_completed = %u",
2403
_old_marking_cycles_started, _old_marking_cycles_completed));
2404
2405
_old_marking_cycles_completed += 1;
2406
2407
// We need to clear the "in_progress" flag in the CM thread before
2408
// we wake up any waiters (especially when ExplicitInvokesConcurrent
2409
// is set) so that if a waiter requests another System.gc() it doesn't
2410
// incorrectly see that a marking cycle is still in progress.
2411
if (concurrent) {
2412
_cmThread->set_idle();
2413
}
2414
2415
// This notify_all() will ensure that a thread that called
2416
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2417
// and it's waiting for a full GC to finish will be woken up. It is
2418
// waiting in VM_G1IncCollectionPause::doit_epilogue().
2419
FullGCCount_lock->notify_all();
2420
}
2421
2422
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2423
_concurrent_cycle_started = true;
2424
_gc_timer_cm->register_gc_start(start_time);
2425
2426
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2427
trace_heap_before_gc(_gc_tracer_cm);
2428
}
2429
2430
void G1CollectedHeap::register_concurrent_cycle_end() {
2431
if (_concurrent_cycle_started) {
2432
if (_cm->has_aborted()) {
2433
_gc_tracer_cm->report_concurrent_mode_failure();
2434
}
2435
2436
_gc_timer_cm->register_gc_end();
2437
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2438
2439
// Clear state variables to prepare for the next concurrent cycle.
2440
_concurrent_cycle_started = false;
2441
_heap_summary_sent = false;
2442
}
2443
}
2444
2445
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2446
if (_concurrent_cycle_started) {
2447
// This function can be called when:
2448
// the cleanup pause is run
2449
// the concurrent cycle is aborted before the cleanup pause.
2450
// the concurrent cycle is aborted after the cleanup pause,
2451
// but before the concurrent cycle end has been registered.
2452
// Make sure that we only send the heap information once.
2453
if (!_heap_summary_sent) {
2454
trace_heap_after_gc(_gc_tracer_cm);
2455
_heap_summary_sent = true;
2456
}
2457
}
2458
}
2459
2460
G1YCType G1CollectedHeap::yc_type() {
2461
bool is_young = g1_policy()->gcs_are_young();
2462
bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2463
bool is_during_mark = mark_in_progress();
2464
2465
if (is_initial_mark) {
2466
return InitialMark;
2467
} else if (is_during_mark) {
2468
return DuringMark;
2469
} else if (is_young) {
2470
return Normal;
2471
} else {
2472
return Mixed;
2473
}
2474
}
2475
2476
void G1CollectedHeap::collect(GCCause::Cause cause) {
2477
assert_heap_not_locked();
2478
2479
uint gc_count_before;
2480
uint old_marking_count_before;
2481
uint full_gc_count_before;
2482
bool retry_gc;
2483
2484
do {
2485
retry_gc = false;
2486
2487
{
2488
MutexLocker ml(Heap_lock);
2489
2490
// Read the GC count while holding the Heap_lock
2491
gc_count_before = total_collections();
2492
full_gc_count_before = total_full_collections();
2493
old_marking_count_before = _old_marking_cycles_started;
2494
}
2495
2496
if (should_do_concurrent_full_gc(cause)) {
2497
// Schedule an initial-mark evacuation pause that will start a
2498
// concurrent cycle. We're setting word_size to 0 which means that
2499
// we are not requesting a post-GC allocation.
2500
VM_G1IncCollectionPause op(gc_count_before,
2501
0, /* word_size */
2502
true, /* should_initiate_conc_mark */
2503
g1_policy()->max_pause_time_ms(),
2504
cause);
2505
op.set_allocation_context(AllocationContext::current());
2506
2507
VMThread::execute(&op);
2508
if (!op.pause_succeeded()) {
2509
if (old_marking_count_before == _old_marking_cycles_started) {
2510
retry_gc = op.should_retry_gc();
2511
} else {
2512
// A Full GC happened while we were trying to schedule the
2513
// initial-mark GC. No point in starting a new cycle given
2514
// that the whole heap was collected anyway.
2515
}
2516
2517
if (retry_gc) {
2518
if (GC_locker::is_active_and_needs_gc()) {
2519
GC_locker::stall_until_clear();
2520
}
2521
}
2522
}
2523
} else if (GC_locker::should_discard(cause, gc_count_before)) {
2524
// Return to be consistent with VMOp failure due to another
2525
// collection slipping in after our gc_count but before our
2526
// request is processed. _gc_locker collections upgraded by
2527
// GCLockerInvokesConcurrent are handled above and never discarded.
2528
return;
2529
} else {
2530
if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2531
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2532
2533
// Schedule a standard evacuation pause. We're setting word_size
2534
// to 0 which means that we are not requesting a post-GC allocation.
2535
VM_G1IncCollectionPause op(gc_count_before,
2536
0, /* word_size */
2537
false, /* should_initiate_conc_mark */
2538
g1_policy()->max_pause_time_ms(),
2539
cause);
2540
VMThread::execute(&op);
2541
} else {
2542
// Schedule a Full GC.
2543
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2544
VMThread::execute(&op);
2545
}
2546
}
2547
} while (retry_gc);
2548
}
2549
2550
bool G1CollectedHeap::is_in(const void* p) const {
2551
if (_hrm.reserved().contains(p)) {
2552
// Given that we know that p is in the reserved space,
2553
// heap_region_containing_raw() should successfully
2554
// return the containing region.
2555
HeapRegion* hr = heap_region_containing_raw(p);
2556
return hr->is_in(p);
2557
} else {
2558
return false;
2559
}
2560
}
2561
2562
#ifdef ASSERT
2563
bool G1CollectedHeap::is_in_exact(const void* p) const {
2564
bool contains = reserved_region().contains(p);
2565
bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2566
if (contains && available) {
2567
return true;
2568
} else {
2569
return false;
2570
}
2571
}
2572
#endif
2573
2574
// Iteration functions.
2575
2576
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2577
2578
class IterateOopClosureRegionClosure: public HeapRegionClosure {
2579
ExtendedOopClosure* _cl;
2580
public:
2581
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2582
bool doHeapRegion(HeapRegion* r) {
2583
if (!r->continuesHumongous()) {
2584
r->oop_iterate(_cl);
2585
}
2586
return false;
2587
}
2588
};
2589
2590
void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2591
IterateOopClosureRegionClosure blk(cl);
2592
heap_region_iterate(&blk);
2593
}
2594
2595
// Iterates an ObjectClosure over all objects within a HeapRegion.
2596
2597
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2598
ObjectClosure* _cl;
2599
public:
2600
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2601
bool doHeapRegion(HeapRegion* r) {
2602
if (! r->continuesHumongous()) {
2603
r->object_iterate(_cl);
2604
}
2605
return false;
2606
}
2607
};
2608
2609
void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2610
IterateObjectClosureRegionClosure blk(cl);
2611
heap_region_iterate(&blk);
2612
}
2613
2614
// Calls a SpaceClosure on a HeapRegion.
2615
2616
class SpaceClosureRegionClosure: public HeapRegionClosure {
2617
SpaceClosure* _cl;
2618
public:
2619
SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2620
bool doHeapRegion(HeapRegion* r) {
2621
_cl->do_space(r);
2622
return false;
2623
}
2624
};
2625
2626
void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2627
SpaceClosureRegionClosure blk(cl);
2628
heap_region_iterate(&blk);
2629
}
2630
2631
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2632
_hrm.iterate(cl);
2633
}
2634
2635
void
2636
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2637
uint worker_id,
2638
uint num_workers,
2639
jint claim_value) const {
2640
_hrm.par_iterate(cl, worker_id, num_workers, claim_value);
2641
}
2642
2643
class ResetClaimValuesClosure: public HeapRegionClosure {
2644
public:
2645
bool doHeapRegion(HeapRegion* r) {
2646
r->set_claim_value(HeapRegion::InitialClaimValue);
2647
return false;
2648
}
2649
};
2650
2651
void G1CollectedHeap::reset_heap_region_claim_values() {
2652
ResetClaimValuesClosure blk;
2653
heap_region_iterate(&blk);
2654
}
2655
2656
void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2657
ResetClaimValuesClosure blk;
2658
collection_set_iterate(&blk);
2659
}
2660
2661
#ifdef ASSERT
2662
// This checks whether all regions in the heap have the correct claim
2663
// value. I also piggy-backed on this a check to ensure that the
2664
// humongous_start_region() information on "continues humongous"
2665
// regions is correct.
2666
2667
class CheckClaimValuesClosure : public HeapRegionClosure {
2668
private:
2669
jint _claim_value;
2670
uint _failures;
2671
HeapRegion* _sh_region;
2672
2673
public:
2674
CheckClaimValuesClosure(jint claim_value) :
2675
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2676
bool doHeapRegion(HeapRegion* r) {
2677
if (r->claim_value() != _claim_value) {
2678
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2679
"claim value = %d, should be %d",
2680
HR_FORMAT_PARAMS(r),
2681
r->claim_value(), _claim_value);
2682
++_failures;
2683
}
2684
if (!r->isHumongous()) {
2685
_sh_region = NULL;
2686
} else if (r->startsHumongous()) {
2687
_sh_region = r;
2688
} else if (r->continuesHumongous()) {
2689
if (r->humongous_start_region() != _sh_region) {
2690
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2691
"HS = " PTR_FORMAT ", should be " PTR_FORMAT,
2692
HR_FORMAT_PARAMS(r),
2693
r->humongous_start_region(),
2694
_sh_region);
2695
++_failures;
2696
}
2697
}
2698
return false;
2699
}
2700
uint failures() { return _failures; }
2701
};
2702
2703
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2704
CheckClaimValuesClosure cl(claim_value);
2705
heap_region_iterate(&cl);
2706
return cl.failures() == 0;
2707
}
2708
2709
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2710
private:
2711
jint _claim_value;
2712
uint _failures;
2713
2714
public:
2715
CheckClaimValuesInCSetHRClosure(jint claim_value) :
2716
_claim_value(claim_value), _failures(0) { }
2717
2718
uint failures() { return _failures; }
2719
2720
bool doHeapRegion(HeapRegion* hr) {
2721
assert(hr->in_collection_set(), "how?");
2722
assert(!hr->isHumongous(), "H-region in CSet");
2723
if (hr->claim_value() != _claim_value) {
2724
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2725
"claim value = %d, should be %d",
2726
HR_FORMAT_PARAMS(hr),
2727
hr->claim_value(), _claim_value);
2728
_failures += 1;
2729
}
2730
return false;
2731
}
2732
};
2733
2734
bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2735
CheckClaimValuesInCSetHRClosure cl(claim_value);
2736
collection_set_iterate(&cl);
2737
return cl.failures() == 0;
2738
}
2739
#endif // ASSERT
2740
2741
// Clear the cached CSet starting regions and (more importantly)
2742
// the time stamps. Called when we reset the GC time stamp.
2743
void G1CollectedHeap::clear_cset_start_regions() {
2744
assert(_worker_cset_start_region != NULL, "sanity");
2745
assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2746
2747
int n_queues = MAX2((int)ParallelGCThreads, 1);
2748
for (int i = 0; i < n_queues; i++) {
2749
_worker_cset_start_region[i] = NULL;
2750
_worker_cset_start_region_time_stamp[i] = 0;
2751
}
2752
}
2753
2754
// Given the id of a worker, obtain or calculate a suitable
2755
// starting region for iterating over the current collection set.
2756
HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
2757
assert(get_gc_time_stamp() > 0, "should have been updated by now");
2758
2759
HeapRegion* result = NULL;
2760
unsigned gc_time_stamp = get_gc_time_stamp();
2761
2762
if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2763
// Cached starting region for current worker was set
2764
// during the current pause - so it's valid.
2765
// Note: the cached starting heap region may be NULL
2766
// (when the collection set is empty).
2767
result = _worker_cset_start_region[worker_i];
2768
assert(result == NULL || result->in_collection_set(), "sanity");
2769
return result;
2770
}
2771
2772
// The cached entry was not valid so let's calculate
2773
// a suitable starting heap region for this worker.
2774
2775
// We want the parallel threads to start their collection
2776
// set iteration at different collection set regions to
2777
// avoid contention.
2778
// If we have:
2779
// n collection set regions
2780
// p threads
2781
// Then thread t will start at region floor ((t * n) / p)
2782
2783
result = g1_policy()->collection_set();
2784
if (G1CollectedHeap::use_parallel_gc_threads()) {
2785
uint cs_size = g1_policy()->cset_region_length();
2786
uint active_workers = workers()->active_workers();
2787
assert(UseDynamicNumberOfGCThreads ||
2788
active_workers == workers()->total_workers(),
2789
"Unless dynamic should use total workers");
2790
2791
uint end_ind = (cs_size * worker_i) / active_workers;
2792
uint start_ind = 0;
2793
2794
if (worker_i > 0 &&
2795
_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2796
// Previous workers starting region is valid
2797
// so let's iterate from there
2798
start_ind = (cs_size * (worker_i - 1)) / active_workers;
2799
OrderAccess::loadload();
2800
result = _worker_cset_start_region[worker_i - 1];
2801
}
2802
2803
for (uint i = start_ind; i < end_ind; i++) {
2804
result = result->next_in_collection_set();
2805
}
2806
}
2807
2808
// Note: the calculated starting heap region may be NULL
2809
// (when the collection set is empty).
2810
assert(result == NULL || result->in_collection_set(), "sanity");
2811
assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2812
"should be updated only once per pause");
2813
_worker_cset_start_region[worker_i] = result;
2814
OrderAccess::storestore();
2815
_worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2816
return result;
2817
}
2818
2819
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2820
HeapRegion* r = g1_policy()->collection_set();
2821
while (r != NULL) {
2822
HeapRegion* next = r->next_in_collection_set();
2823
if (cl->doHeapRegion(r)) {
2824
cl->incomplete();
2825
return;
2826
}
2827
r = next;
2828
}
2829
}
2830
2831
void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2832
HeapRegionClosure *cl) {
2833
if (r == NULL) {
2834
// The CSet is empty so there's nothing to do.
2835
return;
2836
}
2837
2838
assert(r->in_collection_set(),
2839
"Start region must be a member of the collection set.");
2840
HeapRegion* cur = r;
2841
while (cur != NULL) {
2842
HeapRegion* next = cur->next_in_collection_set();
2843
if (cl->doHeapRegion(cur) && false) {
2844
cl->incomplete();
2845
return;
2846
}
2847
cur = next;
2848
}
2849
cur = g1_policy()->collection_set();
2850
while (cur != r) {
2851
HeapRegion* next = cur->next_in_collection_set();
2852
if (cl->doHeapRegion(cur) && false) {
2853
cl->incomplete();
2854
return;
2855
}
2856
cur = next;
2857
}
2858
}
2859
2860
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2861
HeapRegion* result = _hrm.next_region_in_heap(from);
2862
while (result != NULL && result->isHumongous()) {
2863
result = _hrm.next_region_in_heap(result);
2864
}
2865
return result;
2866
}
2867
2868
Space* G1CollectedHeap::space_containing(const void* addr) const {
2869
return heap_region_containing(addr);
2870
}
2871
2872
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2873
Space* sp = space_containing(addr);
2874
return sp->block_start(addr);
2875
}
2876
2877
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2878
Space* sp = space_containing(addr);
2879
return sp->block_size(addr);
2880
}
2881
2882
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2883
Space* sp = space_containing(addr);
2884
return sp->block_is_obj(addr);
2885
}
2886
2887
bool G1CollectedHeap::supports_tlab_allocation() const {
2888
return true;
2889
}
2890
2891
size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2892
return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
2893
}
2894
2895
size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2896
return young_list()->eden_used_bytes();
2897
}
2898
2899
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2900
// must be smaller than the humongous object limit.
2901
size_t G1CollectedHeap::max_tlab_size() const {
2902
return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
2903
}
2904
2905
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2906
// Return the remaining space in the cur alloc region, but not less than
2907
// the min TLAB size.
2908
2909
// Also, this value can be at most the humongous object threshold,
2910
// since we can't allow tlabs to grow big enough to accommodate
2911
// humongous objects.
2912
2913
HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
2914
size_t max_tlab = max_tlab_size() * wordSize;
2915
if (hr == NULL) {
2916
return max_tlab;
2917
} else {
2918
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2919
}
2920
}
2921
2922
size_t G1CollectedHeap::max_capacity() const {
2923
return _hrm.reserved().byte_size();
2924
}
2925
2926
jlong G1CollectedHeap::millis_since_last_gc() {
2927
// assert(false, "NYI");
2928
return 0;
2929
}
2930
2931
void G1CollectedHeap::prepare_for_verify() {
2932
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2933
ensure_parsability(false);
2934
}
2935
g1_rem_set()->prepare_for_verify();
2936
}
2937
2938
bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2939
VerifyOption vo) {
2940
switch (vo) {
2941
case VerifyOption_G1UsePrevMarking:
2942
return hr->obj_allocated_since_prev_marking(obj);
2943
case VerifyOption_G1UseNextMarking:
2944
return hr->obj_allocated_since_next_marking(obj);
2945
case VerifyOption_G1UseMarkWord:
2946
return false;
2947
default:
2948
ShouldNotReachHere();
2949
}
2950
return false; // keep some compilers happy
2951
}
2952
2953
HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
2954
switch (vo) {
2955
case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
2956
case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
2957
case VerifyOption_G1UseMarkWord: return NULL;
2958
default: ShouldNotReachHere();
2959
}
2960
return NULL; // keep some compilers happy
2961
}
2962
2963
bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
2964
switch (vo) {
2965
case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
2966
case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
2967
case VerifyOption_G1UseMarkWord: return obj->is_gc_marked();
2968
default: ShouldNotReachHere();
2969
}
2970
return false; // keep some compilers happy
2971
}
2972
2973
const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
2974
switch (vo) {
2975
case VerifyOption_G1UsePrevMarking: return "PTAMS";
2976
case VerifyOption_G1UseNextMarking: return "NTAMS";
2977
case VerifyOption_G1UseMarkWord: return "NONE";
2978
default: ShouldNotReachHere();
2979
}
2980
return NULL; // keep some compilers happy
2981
}
2982
2983
class VerifyRootsClosure: public OopClosure {
2984
private:
2985
G1CollectedHeap* _g1h;
2986
VerifyOption _vo;
2987
bool _failures;
2988
public:
2989
// _vo == UsePrevMarking -> use "prev" marking information,
2990
// _vo == UseNextMarking -> use "next" marking information,
2991
// _vo == UseMarkWord -> use mark word from object header.
2992
VerifyRootsClosure(VerifyOption vo) :
2993
_g1h(G1CollectedHeap::heap()),
2994
_vo(vo),
2995
_failures(false) { }
2996
2997
bool failures() { return _failures; }
2998
2999
template <class T> void do_oop_nv(T* p) {
3000
T heap_oop = oopDesc::load_heap_oop(p);
3001
if (!oopDesc::is_null(heap_oop)) {
3002
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3003
if (_g1h->is_obj_dead_cond(obj, _vo)) {
3004
gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
3005
"points to dead obj " PTR_FORMAT, p, (void*) obj);
3006
if (_vo == VerifyOption_G1UseMarkWord) {
3007
gclog_or_tty->print_cr(" Mark word: " PTR_FORMAT, (void*)(obj->mark()));
3008
}
3009
obj->print_on(gclog_or_tty);
3010
_failures = true;
3011
}
3012
}
3013
}
3014
3015
void do_oop(oop* p) { do_oop_nv(p); }
3016
void do_oop(narrowOop* p) { do_oop_nv(p); }
3017
};
3018
3019
class G1VerifyCodeRootOopClosure: public OopClosure {
3020
G1CollectedHeap* _g1h;
3021
OopClosure* _root_cl;
3022
nmethod* _nm;
3023
VerifyOption _vo;
3024
bool _failures;
3025
3026
template <class T> void do_oop_work(T* p) {
3027
// First verify that this root is live
3028
_root_cl->do_oop(p);
3029
3030
if (!G1VerifyHeapRegionCodeRoots) {
3031
// We're not verifying the code roots attached to heap region.
3032
return;
3033
}
3034
3035
// Don't check the code roots during marking verification in a full GC
3036
if (_vo == VerifyOption_G1UseMarkWord) {
3037
return;
3038
}
3039
3040
// Now verify that the current nmethod (which contains p) is
3041
// in the code root list of the heap region containing the
3042
// object referenced by p.
3043
3044
T heap_oop = oopDesc::load_heap_oop(p);
3045
if (!oopDesc::is_null(heap_oop)) {
3046
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3047
3048
// Now fetch the region containing the object
3049
HeapRegion* hr = _g1h->heap_region_containing(obj);
3050
HeapRegionRemSet* hrrs = hr->rem_set();
3051
// Verify that the strong code root list for this region
3052
// contains the nmethod
3053
if (!hrrs->strong_code_roots_list_contains(_nm)) {
3054
gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
3055
"from nmethod " PTR_FORMAT " not in strong "
3056
"code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
3057
p, _nm, hr->bottom(), hr->end());
3058
_failures = true;
3059
}
3060
}
3061
}
3062
3063
public:
3064
G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
3065
_g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
3066
3067
void do_oop(oop* p) { do_oop_work(p); }
3068
void do_oop(narrowOop* p) { do_oop_work(p); }
3069
3070
void set_nmethod(nmethod* nm) { _nm = nm; }
3071
bool failures() { return _failures; }
3072
};
3073
3074
class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
3075
G1VerifyCodeRootOopClosure* _oop_cl;
3076
3077
public:
3078
G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
3079
_oop_cl(oop_cl) {}
3080
3081
void do_code_blob(CodeBlob* cb) {
3082
nmethod* nm = cb->as_nmethod_or_null();
3083
if (nm != NULL) {
3084
_oop_cl->set_nmethod(nm);
3085
nm->oops_do(_oop_cl);
3086
}
3087
}
3088
};
3089
3090
class YoungRefCounterClosure : public OopClosure {
3091
G1CollectedHeap* _g1h;
3092
int _count;
3093
public:
3094
YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3095
void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
3096
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3097
3098
int count() { return _count; }
3099
void reset_count() { _count = 0; };
3100
};
3101
3102
class VerifyKlassClosure: public KlassClosure {
3103
YoungRefCounterClosure _young_ref_counter_closure;
3104
OopClosure *_oop_closure;
3105
public:
3106
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3107
void do_klass(Klass* k) {
3108
k->oops_do(_oop_closure);
3109
3110
_young_ref_counter_closure.reset_count();
3111
k->oops_do(&_young_ref_counter_closure);
3112
if (_young_ref_counter_closure.count() > 0) {
3113
guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3114
}
3115
}
3116
};
3117
3118
class VerifyLivenessOopClosure: public OopClosure {
3119
G1CollectedHeap* _g1h;
3120
VerifyOption _vo;
3121
public:
3122
VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3123
_g1h(g1h), _vo(vo)
3124
{ }
3125
void do_oop(narrowOop *p) { do_oop_work(p); }
3126
void do_oop( oop *p) { do_oop_work(p); }
3127
3128
template <class T> void do_oop_work(T *p) {
3129
oop obj = oopDesc::load_decode_heap_oop(p);
3130
guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3131
"Dead object referenced by a not dead object");
3132
}
3133
};
3134
3135
class VerifyObjsInRegionClosure: public ObjectClosure {
3136
private:
3137
G1CollectedHeap* _g1h;
3138
size_t _live_bytes;
3139
HeapRegion *_hr;
3140
VerifyOption _vo;
3141
public:
3142
// _vo == UsePrevMarking -> use "prev" marking information,
3143
// _vo == UseNextMarking -> use "next" marking information,
3144
// _vo == UseMarkWord -> use mark word from object header.
3145
VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
3146
: _live_bytes(0), _hr(hr), _vo(vo) {
3147
_g1h = G1CollectedHeap::heap();
3148
}
3149
void do_object(oop o) {
3150
VerifyLivenessOopClosure isLive(_g1h, _vo);
3151
assert(o != NULL, "Huh?");
3152
if (!_g1h->is_obj_dead_cond(o, _vo)) {
3153
// If the object is alive according to the mark word,
3154
// then verify that the marking information agrees.
3155
// Note we can't verify the contra-positive of the
3156
// above: if the object is dead (according to the mark
3157
// word), it may not be marked, or may have been marked
3158
// but has since became dead, or may have been allocated
3159
// since the last marking.
3160
if (_vo == VerifyOption_G1UseMarkWord) {
3161
guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3162
}
3163
3164
o->oop_iterate_no_header(&isLive);
3165
if (!_hr->obj_allocated_since_prev_marking(o)) {
3166
size_t obj_size = o->size(); // Make sure we don't overflow
3167
_live_bytes += (obj_size * HeapWordSize);
3168
}
3169
}
3170
}
3171
size_t live_bytes() { return _live_bytes; }
3172
};
3173
3174
class PrintObjsInRegionClosure : public ObjectClosure {
3175
HeapRegion *_hr;
3176
G1CollectedHeap *_g1;
3177
public:
3178
PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3179
_g1 = G1CollectedHeap::heap();
3180
};
3181
3182
void do_object(oop o) {
3183
if (o != NULL) {
3184
HeapWord *start = (HeapWord *) o;
3185
size_t word_sz = o->size();
3186
gclog_or_tty->print("\nPrinting obj " PTR_FORMAT " of size " SIZE_FORMAT
3187
" isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3188
(void*) o, word_sz,
3189
_g1->isMarkedPrev(o),
3190
_g1->isMarkedNext(o),
3191
_hr->obj_allocated_since_prev_marking(o));
3192
HeapWord *end = start + word_sz;
3193
HeapWord *cur;
3194
int *val;
3195
for (cur = start; cur < end; cur++) {
3196
val = (int *) cur;
3197
gclog_or_tty->print("\t " PTR_FORMAT ":" PTR_FORMAT "\n", val, *val);
3198
}
3199
}
3200
}
3201
};
3202
3203
class VerifyRegionClosure: public HeapRegionClosure {
3204
private:
3205
bool _par;
3206
VerifyOption _vo;
3207
bool _failures;
3208
public:
3209
// _vo == UsePrevMarking -> use "prev" marking information,
3210
// _vo == UseNextMarking -> use "next" marking information,
3211
// _vo == UseMarkWord -> use mark word from object header.
3212
VerifyRegionClosure(bool par, VerifyOption vo)
3213
: _par(par),
3214
_vo(vo),
3215
_failures(false) {}
3216
3217
bool failures() {
3218
return _failures;
3219
}
3220
3221
bool doHeapRegion(HeapRegion* r) {
3222
if (!r->continuesHumongous()) {
3223
bool failures = false;
3224
r->verify(_vo, &failures);
3225
if (failures) {
3226
_failures = true;
3227
} else {
3228
VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3229
r->object_iterate(&not_dead_yet_cl);
3230
if (_vo != VerifyOption_G1UseNextMarking) {
3231
if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3232
gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3233
"max_live_bytes " SIZE_FORMAT " "
3234
"< calculated " SIZE_FORMAT,
3235
r->bottom(), r->end(),
3236
r->max_live_bytes(),
3237
not_dead_yet_cl.live_bytes());
3238
_failures = true;
3239
}
3240
} else {
3241
// When vo == UseNextMarking we cannot currently do a sanity
3242
// check on the live bytes as the calculation has not been
3243
// finalized yet.
3244
}
3245
}
3246
}
3247
return false; // stop the region iteration if we hit a failure
3248
}
3249
};
3250
3251
// This is the task used for parallel verification of the heap regions
3252
3253
class G1ParVerifyTask: public AbstractGangTask {
3254
private:
3255
G1CollectedHeap* _g1h;
3256
VerifyOption _vo;
3257
bool _failures;
3258
3259
public:
3260
// _vo == UsePrevMarking -> use "prev" marking information,
3261
// _vo == UseNextMarking -> use "next" marking information,
3262
// _vo == UseMarkWord -> use mark word from object header.
3263
G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3264
AbstractGangTask("Parallel verify task"),
3265
_g1h(g1h),
3266
_vo(vo),
3267
_failures(false) { }
3268
3269
bool failures() {
3270
return _failures;
3271
}
3272
3273
void work(uint worker_id) {
3274
HandleMark hm;
3275
VerifyRegionClosure blk(true, _vo);
3276
_g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3277
_g1h->workers()->active_workers(),
3278
HeapRegion::ParVerifyClaimValue);
3279
if (blk.failures()) {
3280
_failures = true;
3281
}
3282
}
3283
};
3284
3285
void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3286
if (SafepointSynchronize::is_at_safepoint()) {
3287
assert(Thread::current()->is_VM_thread(),
3288
"Expected to be executed serially by the VM thread at this point");
3289
3290
if (!silent) { gclog_or_tty->print("Roots "); }
3291
VerifyRootsClosure rootsCl(vo);
3292
VerifyKlassClosure klassCl(this, &rootsCl);
3293
CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3294
3295
// We apply the relevant closures to all the oops in the
3296
// system dictionary, class loader data graph, the string table
3297
// and the nmethods in the code cache.
3298
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3299
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3300
3301
{
3302
G1RootProcessor root_processor(this);
3303
root_processor.process_all_roots(&rootsCl,
3304
&cldCl,
3305
&blobsCl);
3306
}
3307
3308
bool failures = rootsCl.failures() || codeRootsCl.failures();
3309
3310
if (vo != VerifyOption_G1UseMarkWord) {
3311
// If we're verifying during a full GC then the region sets
3312
// will have been torn down at the start of the GC. Therefore
3313
// verifying the region sets will fail. So we only verify
3314
// the region sets when not in a full GC.
3315
if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3316
verify_region_sets();
3317
}
3318
3319
if (!silent) { gclog_or_tty->print("HeapRegions "); }
3320
if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3321
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3322
"sanity check");
3323
3324
G1ParVerifyTask task(this, vo);
3325
assert(UseDynamicNumberOfGCThreads ||
3326
workers()->active_workers() == workers()->total_workers(),
3327
"If not dynamic should be using all the workers");
3328
int n_workers = workers()->active_workers();
3329
set_par_threads(n_workers);
3330
workers()->run_task(&task);
3331
set_par_threads(0);
3332
if (task.failures()) {
3333
failures = true;
3334
}
3335
3336
// Checks that the expected amount of parallel work was done.
3337
// The implication is that n_workers is > 0.
3338
assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3339
"sanity check");
3340
3341
reset_heap_region_claim_values();
3342
3343
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3344
"sanity check");
3345
} else {
3346
VerifyRegionClosure blk(false, vo);
3347
heap_region_iterate(&blk);
3348
if (blk.failures()) {
3349
failures = true;
3350
}
3351
}
3352
if (!silent) gclog_or_tty->print("RemSet ");
3353
rem_set()->verify();
3354
3355
if (G1StringDedup::is_enabled()) {
3356
if (!silent) gclog_or_tty->print("StrDedup ");
3357
G1StringDedup::verify();
3358
}
3359
3360
if (failures) {
3361
gclog_or_tty->print_cr("Heap:");
3362
// It helps to have the per-region information in the output to
3363
// help us track down what went wrong. This is why we call
3364
// print_extended_on() instead of print_on().
3365
print_extended_on(gclog_or_tty);
3366
gclog_or_tty->cr();
3367
#ifndef PRODUCT
3368
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3369
concurrent_mark()->print_reachable("at-verification-failure",
3370
vo, false /* all */);
3371
}
3372
#endif
3373
gclog_or_tty->flush();
3374
}
3375
guarantee(!failures, "there should not have been any failures");
3376
} else {
3377
if (!silent) {
3378
gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3379
if (G1StringDedup::is_enabled()) {
3380
gclog_or_tty->print(", StrDedup");
3381
}
3382
gclog_or_tty->print(") ");
3383
}
3384
}
3385
}
3386
3387
void G1CollectedHeap::verify(bool silent) {
3388
verify(silent, VerifyOption_G1UsePrevMarking);
3389
}
3390
3391
double G1CollectedHeap::verify(bool guard, const char* msg) {
3392
double verify_time_ms = 0.0;
3393
3394
if (guard && total_collections() >= VerifyGCStartAt) {
3395
double verify_start = os::elapsedTime();
3396
HandleMark hm; // Discard invalid handles created during verification
3397
prepare_for_verify();
3398
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3399
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3400
}
3401
3402
return verify_time_ms;
3403
}
3404
3405
void G1CollectedHeap::verify_before_gc() {
3406
double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3407
g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3408
}
3409
3410
void G1CollectedHeap::verify_after_gc() {
3411
double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3412
g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3413
}
3414
3415
class PrintRegionClosure: public HeapRegionClosure {
3416
outputStream* _st;
3417
public:
3418
PrintRegionClosure(outputStream* st) : _st(st) {}
3419
bool doHeapRegion(HeapRegion* r) {
3420
r->print_on(_st);
3421
return false;
3422
}
3423
};
3424
3425
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3426
const HeapRegion* hr,
3427
const VerifyOption vo) const {
3428
switch (vo) {
3429
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3430
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3431
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3432
default: ShouldNotReachHere();
3433
}
3434
return false; // keep some compilers happy
3435
}
3436
3437
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3438
const VerifyOption vo) const {
3439
switch (vo) {
3440
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3441
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3442
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3443
default: ShouldNotReachHere();
3444
}
3445
return false; // keep some compilers happy
3446
}
3447
3448
void G1CollectedHeap::print_on(outputStream* st) const {
3449
st->print(" %-20s", "garbage-first heap");
3450
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3451
capacity()/K, used_unlocked()/K);
3452
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3453
_hrm.reserved().start(),
3454
_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
3455
_hrm.reserved().end());
3456
st->cr();
3457
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3458
uint young_regions = _young_list->length();
3459
st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3460
(size_t) young_regions * HeapRegion::GrainBytes / K);
3461
uint survivor_regions = g1_policy()->recorded_survivor_regions();
3462
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3463
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
3464
st->cr();
3465
MetaspaceAux::print_on(st);
3466
}
3467
3468
void G1CollectedHeap::print_extended_on(outputStream* st) const {
3469
print_on(st);
3470
3471
// Print the per-region information.
3472
st->cr();
3473
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3474
"HS=humongous(starts), HC=humongous(continues), "
3475
"CS=collection set, F=free, TS=gc time stamp, "
3476
"PTAMS=previous top-at-mark-start, "
3477
"NTAMS=next top-at-mark-start)");
3478
PrintRegionClosure blk(st);
3479
heap_region_iterate(&blk);
3480
}
3481
3482
void G1CollectedHeap::print_on_error(outputStream* st) const {
3483
this->CollectedHeap::print_on_error(st);
3484
3485
if (_cm != NULL) {
3486
st->cr();
3487
_cm->print_on_error(st);
3488
}
3489
}
3490
3491
void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3492
if (G1CollectedHeap::use_parallel_gc_threads()) {
3493
workers()->print_worker_threads_on(st);
3494
}
3495
_cmThread->print_on(st);
3496
st->cr();
3497
_cm->print_worker_threads_on(st);
3498
_cg1r->print_worker_threads_on(st);
3499
if (G1StringDedup::is_enabled()) {
3500
G1StringDedup::print_worker_threads_on(st);
3501
}
3502
}
3503
3504
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3505
if (G1CollectedHeap::use_parallel_gc_threads()) {
3506
workers()->threads_do(tc);
3507
}
3508
tc->do_thread(_cmThread);
3509
_cg1r->threads_do(tc);
3510
if (G1StringDedup::is_enabled()) {
3511
G1StringDedup::threads_do(tc);
3512
}
3513
}
3514
3515
void G1CollectedHeap::print_tracing_info() const {
3516
// We'll overload this to mean "trace GC pause statistics."
3517
if (TraceGen0Time || TraceGen1Time) {
3518
// The "G1CollectorPolicy" is keeping track of these stats, so delegate
3519
// to that.
3520
g1_policy()->print_tracing_info();
3521
}
3522
if (G1SummarizeRSetStats) {
3523
g1_rem_set()->print_summary_info();
3524
}
3525
if (G1SummarizeConcMark) {
3526
concurrent_mark()->print_summary_info();
3527
}
3528
g1_policy()->print_yg_surv_rate_info();
3529
SpecializationStats::print();
3530
}
3531
3532
#ifndef PRODUCT
3533
// Helpful for debugging RSet issues.
3534
3535
class PrintRSetsClosure : public HeapRegionClosure {
3536
private:
3537
const char* _msg;
3538
size_t _occupied_sum;
3539
3540
public:
3541
bool doHeapRegion(HeapRegion* r) {
3542
HeapRegionRemSet* hrrs = r->rem_set();
3543
size_t occupied = hrrs->occupied();
3544
_occupied_sum += occupied;
3545
3546
gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3547
HR_FORMAT_PARAMS(r));
3548
if (occupied == 0) {
3549
gclog_or_tty->print_cr(" RSet is empty");
3550
} else {
3551
hrrs->print();
3552
}
3553
gclog_or_tty->print_cr("----------");
3554
return false;
3555
}
3556
3557
PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3558
gclog_or_tty->cr();
3559
gclog_or_tty->print_cr("========================================");
3560
gclog_or_tty->print_cr("%s", msg);
3561
gclog_or_tty->cr();
3562
}
3563
3564
~PrintRSetsClosure() {
3565
gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3566
gclog_or_tty->print_cr("========================================");
3567
gclog_or_tty->cr();
3568
}
3569
};
3570
3571
void G1CollectedHeap::print_cset_rsets() {
3572
PrintRSetsClosure cl("Printing CSet RSets");
3573
collection_set_iterate(&cl);
3574
}
3575
3576
void G1CollectedHeap::print_all_rsets() {
3577
PrintRSetsClosure cl("Printing All RSets");;
3578
heap_region_iterate(&cl);
3579
}
3580
#endif // PRODUCT
3581
3582
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3583
3584
size_t eden_used_bytes = _young_list->eden_used_bytes();
3585
size_t survivor_used_bytes = _young_list->survivor_used_bytes();
3586
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
3587
3588
size_t eden_capacity_bytes =
3589
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
3590
3591
VirtualSpaceSummary heap_summary = create_heap_space_summary();
3592
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
3593
eden_capacity_bytes, survivor_used_bytes, num_regions());
3594
}
3595
3596
void G1CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
3597
const G1HeapSummary& heap_summary = create_g1_heap_summary();
3598
gc_tracer->report_gc_heap_summary(when, heap_summary);
3599
3600
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3601
gc_tracer->report_metaspace_summary(when, metaspace_summary);
3602
}
3603
3604
G1CollectedHeap* G1CollectedHeap::heap() {
3605
assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3606
"not a garbage-first heap");
3607
return _g1h;
3608
}
3609
3610
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3611
// always_do_update_barrier = false;
3612
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3613
// Fill TLAB's and such
3614
accumulate_statistics_all_tlabs();
3615
ensure_parsability(true);
3616
3617
if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3618
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3619
g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3620
}
3621
}
3622
3623
void G1CollectedHeap::gc_epilogue(bool full) {
3624
3625
if (G1SummarizeRSetStats &&
3626
(G1SummarizeRSetStatsPeriod > 0) &&
3627
// we are at the end of the GC. Total collections has already been increased.
3628
((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3629
g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3630
}
3631
3632
// FIXME: what is this about?
3633
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3634
// is set.
3635
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3636
"derived pointer present"));
3637
// always_do_update_barrier = true;
3638
3639
resize_all_tlabs();
3640
allocation_context_stats().update(full);
3641
3642
// We have just completed a GC. Update the soft reference
3643
// policy with the new heap occupancy
3644
Universe::update_heap_info_at_gc();
3645
}
3646
3647
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3648
uint gc_count_before,
3649
bool* succeeded,
3650
GCCause::Cause gc_cause) {
3651
assert_heap_not_locked_and_not_at_safepoint();
3652
g1_policy()->record_stop_world_start();
3653
VM_G1IncCollectionPause op(gc_count_before,
3654
word_size,
3655
false, /* should_initiate_conc_mark */
3656
g1_policy()->max_pause_time_ms(),
3657
gc_cause);
3658
3659
op.set_allocation_context(AllocationContext::current());
3660
VMThread::execute(&op);
3661
3662
HeapWord* result = op.result();
3663
bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3664
assert(result == NULL || ret_succeeded,
3665
"the result should be NULL if the VM did not succeed");
3666
*succeeded = ret_succeeded;
3667
3668
assert_heap_not_locked();
3669
return result;
3670
}
3671
3672
void
3673
G1CollectedHeap::doConcurrentMark() {
3674
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3675
if (!_cmThread->in_progress()) {
3676
_cmThread->set_started();
3677
CGC_lock->notify();
3678
}
3679
}
3680
3681
size_t G1CollectedHeap::pending_card_num() {
3682
size_t extra_cards = 0;
3683
JavaThread *curr = Threads::first();
3684
while (curr != NULL) {
3685
DirtyCardQueue& dcq = curr->dirty_card_queue();
3686
extra_cards += dcq.size();
3687
curr = curr->next();
3688
}
3689
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3690
size_t buffer_size = dcqs.buffer_size();
3691
size_t buffer_num = dcqs.completed_buffers_num();
3692
3693
// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3694
// in bytes - not the number of 'entries'. We need to convert
3695
// into a number of cards.
3696
return (buffer_size * buffer_num + extra_cards) / oopSize;
3697
}
3698
3699
size_t G1CollectedHeap::cards_scanned() {
3700
return g1_rem_set()->cardsScanned();
3701
}
3702
3703
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3704
private:
3705
size_t _total_humongous;
3706
size_t _candidate_humongous;
3707
3708
DirtyCardQueue _dcq;
3709
3710
// We don't nominate objects with many remembered set entries, on
3711
// the assumption that such objects are likely still live.
3712
bool is_remset_small(HeapRegion* region) const {
3713
HeapRegionRemSet* const rset = region->rem_set();
3714
return G1EagerReclaimHumongousObjectsWithStaleRefs
3715
? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
3716
: rset->is_empty();
3717
}
3718
3719
bool is_typeArray_region(HeapRegion* region) const {
3720
return oop(region->bottom())->is_typeArray();
3721
}
3722
3723
bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3724
assert(region->startsHumongous(), "Must start a humongous object");
3725
3726
// Candidate selection must satisfy the following constraints
3727
// while concurrent marking is in progress:
3728
//
3729
// * In order to maintain SATB invariants, an object must not be
3730
// reclaimed if it was allocated before the start of marking and
3731
// has not had its references scanned. Such an object must have
3732
// its references (including type metadata) scanned to ensure no
3733
// live objects are missed by the marking process. Objects
3734
// allocated after the start of concurrent marking don't need to
3735
// be scanned.
3736
//
3737
// * An object must not be reclaimed if it is on the concurrent
3738
// mark stack. Objects allocated after the start of concurrent
3739
// marking are never pushed on the mark stack.
3740
//
3741
// Nominating only objects allocated after the start of concurrent
3742
// marking is sufficient to meet both constraints. This may miss
3743
// some objects that satisfy the constraints, but the marking data
3744
// structures don't support efficiently performing the needed
3745
// additional tests or scrubbing of the mark stack.
3746
//
3747
// However, we presently only nominate is_typeArray() objects.
3748
// A humongous object containing references induces remembered
3749
// set entries on other regions. In order to reclaim such an
3750
// object, those remembered sets would need to be cleaned up.
3751
//
3752
// We also treat is_typeArray() objects specially, allowing them
3753
// to be reclaimed even if allocated before the start of
3754
// concurrent mark. For this we rely on mark stack insertion to
3755
// exclude is_typeArray() objects, preventing reclaiming an object
3756
// that is in the mark stack. We also rely on the metadata for
3757
// such objects to be built-in and so ensured to be kept live.
3758
// Frequent allocation and drop of large binary blobs is an
3759
// important use case for eager reclaim, and this special handling
3760
// may reduce needed headroom.
3761
3762
return is_typeArray_region(region) && is_remset_small(region);
3763
}
3764
3765
public:
3766
RegisterHumongousWithInCSetFastTestClosure()
3767
: _total_humongous(0),
3768
_candidate_humongous(0),
3769
_dcq(&JavaThread::dirty_card_queue_set()) {
3770
}
3771
3772
virtual bool doHeapRegion(HeapRegion* r) {
3773
if (!r->startsHumongous()) {
3774
return false;
3775
}
3776
G1CollectedHeap* g1h = G1CollectedHeap::heap();
3777
3778
bool is_candidate = humongous_region_is_candidate(g1h, r);
3779
uint rindex = r->hrm_index();
3780
g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
3781
if (is_candidate) {
3782
_candidate_humongous++;
3783
g1h->register_humongous_region_with_in_cset_fast_test(rindex);
3784
// Is_candidate already filters out humongous object with large remembered sets.
3785
// If we have a humongous object with a few remembered sets, we simply flush these
3786
// remembered set entries into the DCQS. That will result in automatic
3787
// re-evaluation of their remembered set entries during the following evacuation
3788
// phase.
3789
if (!r->rem_set()->is_empty()) {
3790
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3791
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3792
G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3793
HeapRegionRemSetIterator hrrs(r->rem_set());
3794
size_t card_index;
3795
while (hrrs.has_next(card_index)) {
3796
jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3797
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3798
*card_ptr = CardTableModRefBS::dirty_card_val();
3799
_dcq.enqueue(card_ptr);
3800
}
3801
}
3802
assert(hrrs.n_yielded() == r->rem_set()->occupied(),
3803
err_msg("Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
3804
hrrs.n_yielded(), r->rem_set()->occupied()));
3805
r->rem_set()->clear_locked();
3806
}
3807
assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3808
}
3809
_total_humongous++;
3810
3811
return false;
3812
}
3813
3814
size_t total_humongous() const { return _total_humongous; }
3815
size_t candidate_humongous() const { return _candidate_humongous; }
3816
3817
void flush_rem_set_entries() { _dcq.flush(); }
3818
};
3819
3820
void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3821
if (!G1EagerReclaimHumongousObjects) {
3822
g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3823
return;
3824
}
3825
double time = os::elapsed_counter();
3826
3827
// Collect reclaim candidate information and register candidates with cset.
3828
RegisterHumongousWithInCSetFastTestClosure cl;
3829
heap_region_iterate(&cl);
3830
3831
time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3832
g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3833
cl.total_humongous(),
3834
cl.candidate_humongous());
3835
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3836
3837
// Finally flush all remembered set entries to re-check into the global DCQS.
3838
cl.flush_rem_set_entries();
3839
}
3840
3841
void
3842
G1CollectedHeap::setup_surviving_young_words() {
3843
assert(_surviving_young_words == NULL, "pre-condition");
3844
uint array_length = g1_policy()->young_cset_region_length();
3845
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3846
if (_surviving_young_words == NULL) {
3847
vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3848
"Not enough space for young surv words summary.");
3849
}
3850
memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3851
#ifdef ASSERT
3852
for (uint i = 0; i < array_length; ++i) {
3853
assert( _surviving_young_words[i] == 0, "memset above" );
3854
}
3855
#endif // !ASSERT
3856
}
3857
3858
void
3859
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3860
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3861
uint array_length = g1_policy()->young_cset_region_length();
3862
for (uint i = 0; i < array_length; ++i) {
3863
_surviving_young_words[i] += surv_young_words[i];
3864
}
3865
}
3866
3867
void
3868
G1CollectedHeap::cleanup_surviving_young_words() {
3869
guarantee( _surviving_young_words != NULL, "pre-condition" );
3870
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3871
_surviving_young_words = NULL;
3872
}
3873
3874
class VerifyRegionRemSetClosure : public HeapRegionClosure {
3875
public:
3876
bool doHeapRegion(HeapRegion* hr) {
3877
if (!hr->continuesHumongous()) {
3878
hr->verify_rem_set();
3879
}
3880
return false;
3881
}
3882
};
3883
3884
#ifdef ASSERT
3885
class VerifyCSetClosure: public HeapRegionClosure {
3886
public:
3887
bool doHeapRegion(HeapRegion* hr) {
3888
// Here we check that the CSet region's RSet is ready for parallel
3889
// iteration. The fields that we'll verify are only manipulated
3890
// when the region is part of a CSet and is collected. Afterwards,
3891
// we reset these fields when we clear the region's RSet (when the
3892
// region is freed) so they are ready when the region is
3893
// re-allocated. The only exception to this is if there's an
3894
// evacuation failure and instead of freeing the region we leave
3895
// it in the heap. In that case, we reset these fields during
3896
// evacuation failure handling.
3897
guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3898
3899
// Here's a good place to add any other checks we'd like to
3900
// perform on CSet regions.
3901
return false;
3902
}
3903
};
3904
#endif // ASSERT
3905
3906
#if TASKQUEUE_STATS
3907
void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3908
st->print_raw_cr("GC Task Stats");
3909
st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3910
st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3911
}
3912
3913
void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3914
print_taskqueue_stats_hdr(st);
3915
3916
TaskQueueStats totals;
3917
const int n = workers() != NULL ? workers()->total_workers() : 1;
3918
for (int i = 0; i < n; ++i) {
3919
st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3920
totals += task_queue(i)->stats;
3921
}
3922
st->print_raw("tot "); totals.print(st); st->cr();
3923
3924
DEBUG_ONLY(totals.verify());
3925
}
3926
3927
void G1CollectedHeap::reset_taskqueue_stats() {
3928
const int n = workers() != NULL ? workers()->total_workers() : 1;
3929
for (int i = 0; i < n; ++i) {
3930
task_queue(i)->stats.reset();
3931
}
3932
}
3933
#endif // TASKQUEUE_STATS
3934
3935
void G1CollectedHeap::log_gc_header() {
3936
if (!G1Log::fine()) {
3937
return;
3938
}
3939
3940
gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3941
3942
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3943
.append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3944
.append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3945
3946
gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3947
}
3948
3949
void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3950
if (!G1Log::fine()) {
3951
return;
3952
}
3953
3954
if (G1Log::finer()) {
3955
if (evacuation_failed()) {
3956
gclog_or_tty->print(" (to-space exhausted)");
3957
}
3958
gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3959
g1_policy()->phase_times()->note_gc_end();
3960
g1_policy()->phase_times()->print(pause_time_sec);
3961
g1_policy()->print_detailed_heap_transition();
3962
} else {
3963
if (evacuation_failed()) {
3964
gclog_or_tty->print("--");
3965
}
3966
g1_policy()->print_heap_transition();
3967
gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3968
}
3969
gclog_or_tty->flush();
3970
}
3971
3972
bool
3973
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3974
assert_at_safepoint(true /* should_be_vm_thread */);
3975
guarantee(!is_gc_active(), "collection is not reentrant");
3976
3977
if (GC_locker::check_active_before_gc()) {
3978
return false;
3979
}
3980
3981
_gc_timer_stw->register_gc_start();
3982
3983
_gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3984
3985
SvcGCMarker sgcm(SvcGCMarker::MINOR);
3986
ResourceMark rm;
3987
3988
print_heap_before_gc();
3989
trace_heap_before_gc(_gc_tracer_stw);
3990
3991
verify_region_sets_optional();
3992
verify_dirty_young_regions();
3993
3994
// This call will decide whether this pause is an initial-mark
3995
// pause. If it is, during_initial_mark_pause() will return true
3996
// for the duration of this pause.
3997
g1_policy()->decide_on_conc_mark_initiation();
3998
3999
// We do not allow initial-mark to be piggy-backed on a mixed GC.
4000
assert(!g1_policy()->during_initial_mark_pause() ||
4001
g1_policy()->gcs_are_young(), "sanity");
4002
4003
// We also do not allow mixed GCs during marking.
4004
assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
4005
4006
// Record whether this pause is an initial mark. When the current
4007
// thread has completed its logging output and it's safe to signal
4008
// the CM thread, the flag's value in the policy has been reset.
4009
bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
4010
4011
// Inner scope for scope based logging, timers, and stats collection
4012
{
4013
EvacuationInfo evacuation_info;
4014
4015
if (g1_policy()->during_initial_mark_pause()) {
4016
// We are about to start a marking cycle, so we increment the
4017
// full collection counter.
4018
increment_old_marking_cycles_started();
4019
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
4020
}
4021
4022
_gc_tracer_stw->report_yc_type(yc_type());
4023
4024
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
4025
4026
uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
4027
workers()->active_workers(),
4028
Threads::number_of_non_daemon_threads());
4029
assert(UseDynamicNumberOfGCThreads ||
4030
active_workers == workers()->total_workers(),
4031
"If not dynamic should be using all the workers");
4032
workers()->set_active_workers(active_workers);
4033
4034
4035
double pause_start_sec = os::elapsedTime();
4036
g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
4037
log_gc_header();
4038
4039
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
4040
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause(),
4041
yc_type() == Mixed /* allMemoryPoolsAffected */);
4042
4043
// If the secondary_free_list is not empty, append it to the
4044
// free_list. No need to wait for the cleanup operation to finish;
4045
// the region allocation code will check the secondary_free_list
4046
// and wait if necessary. If the G1StressConcRegionFreeing flag is
4047
// set, skip this step so that the region allocation code has to
4048
// get entries from the secondary_free_list.
4049
if (!G1StressConcRegionFreeing) {
4050
append_secondary_free_list_if_not_empty_with_lock();
4051
}
4052
4053
assert(check_young_list_well_formed(), "young list should be well formed");
4054
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
4055
"sanity check");
4056
4057
// Don't dynamically change the number of GC threads this early. A value of
4058
// 0 is used to indicate serial work. When parallel work is done,
4059
// it will be set.
4060
4061
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
4062
IsGCActiveMark x;
4063
4064
gc_prologue(false);
4065
increment_total_collections(false /* full gc */);
4066
increment_gc_time_stamp();
4067
4068
if (VerifyRememberedSets) {
4069
if (!VerifySilently) {
4070
gclog_or_tty->print_cr("[Verifying RemSets before GC]");
4071
}
4072
VerifyRegionRemSetClosure v_cl;
4073
heap_region_iterate(&v_cl);
4074
}
4075
4076
verify_before_gc();
4077
check_bitmaps("GC Start");
4078
4079
COMPILER2_PRESENT(DerivedPointerTable::clear());
4080
4081
// Please see comment in g1CollectedHeap.hpp and
4082
// G1CollectedHeap::ref_processing_init() to see how
4083
// reference processing currently works in G1.
4084
4085
// Enable discovery in the STW reference processor
4086
ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
4087
true /*verify_no_refs*/);
4088
4089
{
4090
// We want to temporarily turn off discovery by the
4091
// CM ref processor, if necessary, and turn it back on
4092
// on again later if we do. Using a scoped
4093
// NoRefDiscovery object will do this.
4094
NoRefDiscovery no_cm_discovery(ref_processor_cm());
4095
4096
// Forget the current alloc region (we might even choose it to be part
4097
// of the collection set!).
4098
_allocator->release_mutator_alloc_region();
4099
4100
// We should call this after we retire the mutator alloc
4101
// region(s) so that all the ALLOC / RETIRE events are generated
4102
// before the start GC event.
4103
_hr_printer.start_gc(false /* full */, (size_t) total_collections());
4104
4105
// This timing is only used by the ergonomics to handle our pause target.
4106
// It is unclear why this should not include the full pause. We will
4107
// investigate this in CR 7178365.
4108
//
4109
// Preserving the old comment here if that helps the investigation:
4110
//
4111
// The elapsed time induced by the start time below deliberately elides
4112
// the possible verification above.
4113
double sample_start_time_sec = os::elapsedTime();
4114
4115
#if YOUNG_LIST_VERBOSE
4116
gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
4117
_young_list->print();
4118
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4119
#endif // YOUNG_LIST_VERBOSE
4120
4121
g1_policy()->record_collection_pause_start(sample_start_time_sec, *_gc_tracer_stw);
4122
4123
double scan_wait_start = os::elapsedTime();
4124
// We have to wait until the CM threads finish scanning the
4125
// root regions as it's the only way to ensure that all the
4126
// objects on them have been correctly scanned before we start
4127
// moving them during the GC.
4128
bool waited = _cm->root_regions()->wait_until_scan_finished();
4129
double wait_time_ms = 0.0;
4130
if (waited) {
4131
double scan_wait_end = os::elapsedTime();
4132
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
4133
}
4134
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4135
4136
#if YOUNG_LIST_VERBOSE
4137
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4138
_young_list->print();
4139
#endif // YOUNG_LIST_VERBOSE
4140
4141
if (g1_policy()->during_initial_mark_pause()) {
4142
concurrent_mark()->checkpointRootsInitialPre();
4143
}
4144
4145
#if YOUNG_LIST_VERBOSE
4146
gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4147
_young_list->print();
4148
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4149
#endif // YOUNG_LIST_VERBOSE
4150
4151
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4152
4153
// Make sure the remembered sets are up to date. This needs to be
4154
// done before register_humongous_regions_with_cset(), because the
4155
// remembered sets are used there to choose eager reclaim candidates.
4156
// If the remembered sets are not up to date we might miss some
4157
// entries that need to be handled.
4158
g1_rem_set()->cleanupHRRS();
4159
4160
register_humongous_regions_with_in_cset_fast_test();
4161
4162
assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
4163
4164
_cm->note_start_of_gc();
4165
// We call this after finalize_cset() to
4166
// ensure that the CSet has been finalized.
4167
_cm->verify_no_cset_oops();
4168
4169
if (_hr_printer.is_active()) {
4170
HeapRegion* hr = g1_policy()->collection_set();
4171
while (hr != NULL) {
4172
_hr_printer.cset(hr);
4173
hr = hr->next_in_collection_set();
4174
}
4175
}
4176
4177
#ifdef ASSERT
4178
VerifyCSetClosure cl;
4179
collection_set_iterate(&cl);
4180
#endif // ASSERT
4181
4182
setup_surviving_young_words();
4183
4184
// Initialize the GC alloc regions.
4185
_allocator->init_gc_alloc_regions(evacuation_info);
4186
4187
// Actually do the work...
4188
evacuate_collection_set(evacuation_info);
4189
4190
free_collection_set(g1_policy()->collection_set(), evacuation_info);
4191
4192
eagerly_reclaim_humongous_regions();
4193
4194
g1_policy()->clear_collection_set();
4195
4196
cleanup_surviving_young_words();
4197
4198
// Start a new incremental collection set for the next pause.
4199
g1_policy()->start_incremental_cset_building();
4200
4201
clear_cset_fast_test();
4202
4203
_young_list->reset_sampled_info();
4204
4205
// Don't check the whole heap at this point as the
4206
// GC alloc regions from this pause have been tagged
4207
// as survivors and moved on to the survivor list.
4208
// Survivor regions will fail the !is_young() check.
4209
assert(check_young_list_empty(false /* check_heap */),
4210
"young list should be empty");
4211
4212
#if YOUNG_LIST_VERBOSE
4213
gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4214
_young_list->print();
4215
#endif // YOUNG_LIST_VERBOSE
4216
4217
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
4218
_young_list->first_survivor_region(),
4219
_young_list->last_survivor_region());
4220
4221
_young_list->reset_auxilary_lists();
4222
4223
if (evacuation_failed()) {
4224
_allocator->set_used(recalculate_used());
4225
uint n_queues = MAX2((int)ParallelGCThreads, 1);
4226
for (uint i = 0; i < n_queues; i++) {
4227
if (_evacuation_failed_info_array[i].has_failed()) {
4228
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
4229
}
4230
}
4231
} else {
4232
// The "used" of the the collection set have already been subtracted
4233
// when they were freed. Add in the bytes evacuated.
4234
_allocator->increase_used(g1_policy()->bytes_copied_during_gc());
4235
}
4236
4237
if (g1_policy()->during_initial_mark_pause()) {
4238
// We have to do this before we notify the CM threads that
4239
// they can start working to make sure that all the
4240
// appropriate initialization is done on the CM object.
4241
concurrent_mark()->checkpointRootsInitialPost();
4242
set_marking_started();
4243
// Note that we don't actually trigger the CM thread at
4244
// this point. We do that later when we're sure that
4245
// the current thread has completed its logging output.
4246
}
4247
4248
allocate_dummy_regions();
4249
4250
#if YOUNG_LIST_VERBOSE
4251
gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
4252
_young_list->print();
4253
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4254
#endif // YOUNG_LIST_VERBOSE
4255
4256
_allocator->init_mutator_alloc_region();
4257
4258
{
4259
size_t expand_bytes = g1_policy()->expansion_amount();
4260
if (expand_bytes > 0) {
4261
size_t bytes_before = capacity();
4262
// No need for an ergo verbose message here,
4263
// expansion_amount() does this when it returns a value > 0.
4264
if (!expand(expand_bytes)) {
4265
// We failed to expand the heap. Cannot do anything about it.
4266
}
4267
}
4268
}
4269
4270
// We redo the verification but now wrt to the new CSet which
4271
// has just got initialized after the previous CSet was freed.
4272
_cm->verify_no_cset_oops();
4273
_cm->note_end_of_gc();
4274
4275
// This timing is only used by the ergonomics to handle our pause target.
4276
// It is unclear why this should not include the full pause. We will
4277
// investigate this in CR 7178365.
4278
double sample_end_time_sec = os::elapsedTime();
4279
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4280
g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
4281
4282
MemoryService::track_memory_usage();
4283
4284
// In prepare_for_verify() below we'll need to scan the deferred
4285
// update buffers to bring the RSets up-to-date if
4286
// G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4287
// the update buffers we'll probably need to scan cards on the
4288
// regions we just allocated to (i.e., the GC alloc
4289
// regions). However, during the last GC we called
4290
// set_saved_mark() on all the GC alloc regions, so card
4291
// scanning might skip the [saved_mark_word()...top()] area of
4292
// those regions (i.e., the area we allocated objects into
4293
// during the last GC). But it shouldn't. Given that
4294
// saved_mark_word() is conditional on whether the GC time stamp
4295
// on the region is current or not, by incrementing the GC time
4296
// stamp here we invalidate all the GC time stamps on all the
4297
// regions and saved_mark_word() will simply return top() for
4298
// all the regions. This is a nicer way of ensuring this rather
4299
// than iterating over the regions and fixing them. In fact, the
4300
// GC time stamp increment here also ensures that
4301
// saved_mark_word() will return top() between pauses, i.e.,
4302
// during concurrent refinement. So we don't need the
4303
// is_gc_active() check to decided which top to use when
4304
// scanning cards (see CR 7039627).
4305
increment_gc_time_stamp();
4306
4307
if (VerifyRememberedSets) {
4308
if (!VerifySilently) {
4309
gclog_or_tty->print_cr("[Verifying RemSets after GC]");
4310
}
4311
VerifyRegionRemSetClosure v_cl;
4312
heap_region_iterate(&v_cl);
4313
}
4314
4315
verify_after_gc();
4316
check_bitmaps("GC End");
4317
4318
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4319
ref_processor_stw()->verify_no_references_recorded();
4320
4321
// CM reference discovery will be re-enabled if necessary.
4322
}
4323
4324
// We should do this after we potentially expand the heap so
4325
// that all the COMMIT events are generated before the end GC
4326
// event, and after we retire the GC alloc regions so that all
4327
// RETIRE events are generated before the end GC event.
4328
_hr_printer.end_gc(false /* full */, (size_t) total_collections());
4329
4330
#ifdef TRACESPINNING
4331
ParallelTaskTerminator::print_termination_counts();
4332
#endif
4333
4334
gc_epilogue(false);
4335
}
4336
4337
// Print the remainder of the GC log output.
4338
log_gc_footer(os::elapsedTime() - pause_start_sec);
4339
4340
// It is not yet to safe to tell the concurrent mark to
4341
// start as we have some optional output below. We don't want the
4342
// output from the concurrent mark thread interfering with this
4343
// logging output either.
4344
4345
_hrm.verify_optional();
4346
verify_region_sets_optional();
4347
4348
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4349
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4350
4351
print_heap_after_gc();
4352
trace_heap_after_gc(_gc_tracer_stw);
4353
4354
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
4355
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4356
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4357
// before any GC notifications are raised.
4358
g1mm()->update_sizes();
4359
4360
_gc_tracer_stw->report_evacuation_info(&evacuation_info);
4361
_gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4362
_gc_timer_stw->register_gc_end();
4363
_gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4364
}
4365
// It should now be safe to tell the concurrent mark thread to start
4366
// without its logging output interfering with the logging output
4367
// that came from the pause.
4368
4369
if (should_start_conc_mark) {
4370
// CAUTION: after the doConcurrentMark() call below,
4371
// the concurrent marking thread(s) could be running
4372
// concurrently with us. Make sure that anything after
4373
// this point does not assume that we are the only GC thread
4374
// running. Note: of course, the actual marking work will
4375
// not start until the safepoint itself is released in
4376
// SuspendibleThreadSet::desynchronize().
4377
doConcurrentMark();
4378
}
4379
4380
return true;
4381
}
4382
4383
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4384
_drain_in_progress = false;
4385
set_evac_failure_closure(cl);
4386
_evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4387
}
4388
4389
void G1CollectedHeap::finalize_for_evac_failure() {
4390
assert(_evac_failure_scan_stack != NULL &&
4391
_evac_failure_scan_stack->length() == 0,
4392
"Postcondition");
4393
assert(!_drain_in_progress, "Postcondition");
4394
delete _evac_failure_scan_stack;
4395
_evac_failure_scan_stack = NULL;
4396
}
4397
4398
void G1CollectedHeap::remove_self_forwarding_pointers() {
4399
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4400
4401
double remove_self_forwards_start = os::elapsedTime();
4402
4403
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4404
4405
if (G1CollectedHeap::use_parallel_gc_threads()) {
4406
set_par_threads();
4407
workers()->run_task(&rsfp_task);
4408
set_par_threads(0);
4409
} else {
4410
rsfp_task.work(0);
4411
}
4412
4413
assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4414
4415
// Reset the claim values in the regions in the collection set.
4416
reset_cset_heap_region_claim_values();
4417
4418
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4419
4420
// Now restore saved marks, if any.
4421
assert(_objs_with_preserved_marks.size() ==
4422
_preserved_marks_of_objs.size(), "Both or none.");
4423
while (!_objs_with_preserved_marks.is_empty()) {
4424
oop obj = _objs_with_preserved_marks.pop();
4425
markOop m = _preserved_marks_of_objs.pop();
4426
obj->set_mark(m);
4427
}
4428
_objs_with_preserved_marks.clear(true);
4429
_preserved_marks_of_objs.clear(true);
4430
4431
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
4432
}
4433
4434
void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4435
_evac_failure_scan_stack->push(obj);
4436
}
4437
4438
void G1CollectedHeap::drain_evac_failure_scan_stack() {
4439
assert(_evac_failure_scan_stack != NULL, "precondition");
4440
4441
while (_evac_failure_scan_stack->length() > 0) {
4442
oop obj = _evac_failure_scan_stack->pop();
4443
_evac_failure_closure->set_region(heap_region_containing(obj));
4444
obj->oop_iterate_backwards(_evac_failure_closure);
4445
}
4446
}
4447
4448
oop
4449
G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4450
oop old) {
4451
assert(obj_in_cs(old),
4452
err_msg("obj: " PTR_FORMAT " should still be in the CSet",
4453
(HeapWord*) old));
4454
markOop m = old->mark();
4455
oop forward_ptr = old->forward_to_atomic(old);
4456
if (forward_ptr == NULL) {
4457
// Forward-to-self succeeded.
4458
assert(_par_scan_state != NULL, "par scan state");
4459
OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4460
uint queue_num = _par_scan_state->queue_num();
4461
4462
_evacuation_failed = true;
4463
_evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4464
if (_evac_failure_closure != cl) {
4465
MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4466
assert(!_drain_in_progress,
4467
"Should only be true while someone holds the lock.");
4468
// Set the global evac-failure closure to the current thread's.
4469
assert(_evac_failure_closure == NULL, "Or locking has failed.");
4470
set_evac_failure_closure(cl);
4471
// Now do the common part.
4472
handle_evacuation_failure_common(old, m);
4473
// Reset to NULL.
4474
set_evac_failure_closure(NULL);
4475
} else {
4476
// The lock is already held, and this is recursive.
4477
assert(_drain_in_progress, "This should only be the recursive case.");
4478
handle_evacuation_failure_common(old, m);
4479
}
4480
return old;
4481
} else {
4482
// Forward-to-self failed. Either someone else managed to allocate
4483
// space for this object (old != forward_ptr) or they beat us in
4484
// self-forwarding it (old == forward_ptr).
4485
assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4486
err_msg("obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
4487
"should not be in the CSet",
4488
(HeapWord*) old, (HeapWord*) forward_ptr));
4489
return forward_ptr;
4490
}
4491
}
4492
4493
void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4494
preserve_mark_if_necessary(old, m);
4495
4496
HeapRegion* r = heap_region_containing(old);
4497
if (!r->evacuation_failed()) {
4498
r->set_evacuation_failed(true);
4499
_hr_printer.evac_failure(r);
4500
}
4501
4502
push_on_evac_failure_scan_stack(old);
4503
4504
if (!_drain_in_progress) {
4505
// prevent recursion in copy_to_survivor_space()
4506
_drain_in_progress = true;
4507
drain_evac_failure_scan_stack();
4508
_drain_in_progress = false;
4509
}
4510
}
4511
4512
void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4513
assert(evacuation_failed(), "Oversaving!");
4514
// We want to call the "for_promotion_failure" version only in the
4515
// case of a promotion failure.
4516
if (m->must_be_preserved_for_promotion_failure(obj)) {
4517
_objs_with_preserved_marks.push(obj);
4518
_preserved_marks_of_objs.push(m);
4519
}
4520
}
4521
4522
void G1ParCopyHelper::mark_object(oop obj) {
4523
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4524
4525
// We know that the object is not moving so it's safe to read its size.
4526
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4527
}
4528
4529
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4530
assert(from_obj->is_forwarded(), "from obj should be forwarded");
4531
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4532
assert(from_obj != to_obj, "should not be self-forwarded");
4533
4534
assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4535
assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4536
4537
// The object might be in the process of being copied by another
4538
// worker so we cannot trust that its to-space image is
4539
// well-formed. So we have to read its size from its from-space
4540
// image which we know should not be changing.
4541
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4542
}
4543
4544
template <class T>
4545
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4546
if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4547
_scanned_klass->record_modified_oops();
4548
}
4549
}
4550
4551
template <G1Barrier barrier, G1Mark do_mark_object>
4552
template <class T>
4553
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4554
T heap_oop = oopDesc::load_heap_oop(p);
4555
4556
if (oopDesc::is_null(heap_oop)) {
4557
return;
4558
}
4559
4560
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4561
4562
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4563
4564
const InCSetState state = _g1->in_cset_state(obj);
4565
if (state.is_in_cset()) {
4566
oop forwardee;
4567
markOop m = obj->mark();
4568
if (m->is_marked()) {
4569
forwardee = (oop) m->decode_pointer();
4570
} else {
4571
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4572
}
4573
assert(forwardee != NULL, "forwardee should not be NULL");
4574
oopDesc::encode_store_heap_oop(p, forwardee);
4575
if (do_mark_object != G1MarkNone && forwardee != obj) {
4576
// If the object is self-forwarded we don't need to explicitly
4577
// mark it, the evacuation failure protocol will do so.
4578
mark_forwarded_object(obj, forwardee);
4579
}
4580
4581
if (barrier == G1BarrierKlass) {
4582
do_klass_barrier(p, forwardee);
4583
}
4584
} else {
4585
if (state.is_humongous()) {
4586
_g1->set_humongous_is_live(obj);
4587
}
4588
// The object is not in collection set. If we're a root scanning
4589
// closure during an initial mark pause then attempt to mark the object.
4590
if (do_mark_object == G1MarkFromRoot) {
4591
mark_object(obj);
4592
}
4593
}
4594
4595
if (barrier == G1BarrierEvac) {
4596
_par_scan_state->update_rs(_from, p, _worker_id);
4597
}
4598
}
4599
4600
template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4601
template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4602
4603
class G1ParEvacuateFollowersClosure : public VoidClosure {
4604
protected:
4605
G1CollectedHeap* _g1h;
4606
G1ParScanThreadState* _par_scan_state;
4607
RefToScanQueueSet* _queues;
4608
ParallelTaskTerminator* _terminator;
4609
4610
G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4611
RefToScanQueueSet* queues() { return _queues; }
4612
ParallelTaskTerminator* terminator() { return _terminator; }
4613
4614
public:
4615
G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4616
G1ParScanThreadState* par_scan_state,
4617
RefToScanQueueSet* queues,
4618
ParallelTaskTerminator* terminator)
4619
: _g1h(g1h), _par_scan_state(par_scan_state),
4620
_queues(queues), _terminator(terminator) {}
4621
4622
void do_void();
4623
4624
private:
4625
inline bool offer_termination();
4626
};
4627
4628
bool G1ParEvacuateFollowersClosure::offer_termination() {
4629
G1ParScanThreadState* const pss = par_scan_state();
4630
pss->start_term_time();
4631
const bool res = terminator()->offer_termination();
4632
pss->end_term_time();
4633
return res;
4634
}
4635
4636
void G1ParEvacuateFollowersClosure::do_void() {
4637
G1ParScanThreadState* const pss = par_scan_state();
4638
pss->trim_queue();
4639
do {
4640
pss->steal_and_trim_queue(queues());
4641
} while (!offer_termination());
4642
}
4643
4644
class G1KlassScanClosure : public KlassClosure {
4645
G1ParCopyHelper* _closure;
4646
bool _process_only_dirty;
4647
int _count;
4648
public:
4649
G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4650
: _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4651
void do_klass(Klass* klass) {
4652
// If the klass has not been dirtied we know that there's
4653
// no references into the young gen and we can skip it.
4654
if (!_process_only_dirty || klass->has_modified_oops()) {
4655
// Clean the klass since we're going to scavenge all the metadata.
4656
klass->clear_modified_oops();
4657
4658
// Tell the closure that this klass is the Klass to scavenge
4659
// and is the one to dirty if oops are left pointing into the young gen.
4660
_closure->set_scanned_klass(klass);
4661
4662
klass->oops_do(_closure);
4663
4664
_closure->set_scanned_klass(NULL);
4665
}
4666
_count++;
4667
}
4668
};
4669
4670
class G1ParTask : public AbstractGangTask {
4671
protected:
4672
G1CollectedHeap* _g1h;
4673
RefToScanQueueSet *_queues;
4674
G1RootProcessor* _root_processor;
4675
ParallelTaskTerminator _terminator;
4676
uint _n_workers;
4677
4678
Mutex _stats_lock;
4679
Mutex* stats_lock() { return &_stats_lock; }
4680
4681
public:
4682
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
4683
: AbstractGangTask("G1 collection"),
4684
_g1h(g1h),
4685
_queues(task_queues),
4686
_root_processor(root_processor),
4687
_terminator(0, _queues),
4688
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4689
{}
4690
4691
RefToScanQueueSet* queues() { return _queues; }
4692
4693
RefToScanQueue *work_queue(int i) {
4694
return queues()->queue(i);
4695
}
4696
4697
ParallelTaskTerminator* terminator() { return &_terminator; }
4698
4699
virtual void set_for_termination(int active_workers) {
4700
_root_processor->set_num_workers(active_workers);
4701
terminator()->reset_for_reuse(active_workers);
4702
_n_workers = active_workers;
4703
}
4704
4705
// Helps out with CLD processing.
4706
//
4707
// During InitialMark we need to:
4708
// 1) Scavenge all CLDs for the young GC.
4709
// 2) Mark all objects directly reachable from strong CLDs.
4710
template <G1Mark do_mark_object>
4711
class G1CLDClosure : public CLDClosure {
4712
G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
4713
G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
4714
G1KlassScanClosure _klass_in_cld_closure;
4715
bool _claim;
4716
4717
public:
4718
G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4719
bool only_young, bool claim)
4720
: _oop_closure(oop_closure),
4721
_oop_in_klass_closure(oop_closure->g1(),
4722
oop_closure->pss(),
4723
oop_closure->rp()),
4724
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4725
_claim(claim) {
4726
4727
}
4728
4729
void do_cld(ClassLoaderData* cld) {
4730
cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4731
}
4732
};
4733
4734
void work(uint worker_id) {
4735
if (worker_id >= _n_workers) return; // no work needed this round
4736
4737
_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
4738
4739
{
4740
ResourceMark rm;
4741
HandleMark hm;
4742
4743
ReferenceProcessor* rp = _g1h->ref_processor_stw();
4744
4745
G1ParScanThreadState pss(_g1h, worker_id, rp);
4746
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4747
4748
pss.set_evac_failure_closure(&evac_failure_cl);
4749
4750
bool only_young = _g1h->g1_policy()->gcs_are_young();
4751
4752
// Non-IM young GC.
4753
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
4754
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4755
only_young, // Only process dirty klasses.
4756
false); // No need to claim CLDs.
4757
// IM young GC.
4758
// Strong roots closures.
4759
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4760
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4761
false, // Process all klasses.
4762
true); // Need to claim CLDs.
4763
// Weak roots closures.
4764
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4765
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4766
false, // Process all klasses.
4767
true); // Need to claim CLDs.
4768
4769
OopClosure* strong_root_cl;
4770
OopClosure* weak_root_cl;
4771
CLDClosure* strong_cld_cl;
4772
CLDClosure* weak_cld_cl;
4773
4774
bool trace_metadata = false;
4775
4776
if (_g1h->g1_policy()->during_initial_mark_pause()) {
4777
// We also need to mark copied objects.
4778
strong_root_cl = &scan_mark_root_cl;
4779
strong_cld_cl = &scan_mark_cld_cl;
4780
if (ClassUnloadingWithConcurrentMark) {
4781
weak_root_cl = &scan_mark_weak_root_cl;
4782
weak_cld_cl = &scan_mark_weak_cld_cl;
4783
trace_metadata = true;
4784
} else {
4785
weak_root_cl = &scan_mark_root_cl;
4786
weak_cld_cl = &scan_mark_cld_cl;
4787
}
4788
} else {
4789
strong_root_cl = &scan_only_root_cl;
4790
weak_root_cl = &scan_only_root_cl;
4791
strong_cld_cl = &scan_only_cld_cl;
4792
weak_cld_cl = &scan_only_cld_cl;
4793
}
4794
4795
pss.start_strong_roots();
4796
4797
_root_processor->evacuate_roots(strong_root_cl,
4798
weak_root_cl,
4799
strong_cld_cl,
4800
weak_cld_cl,
4801
trace_metadata,
4802
worker_id);
4803
4804
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4805
_root_processor->scan_remembered_sets(&push_heap_rs_cl,
4806
weak_root_cl,
4807
worker_id);
4808
pss.end_strong_roots();
4809
4810
{
4811
double start = os::elapsedTime();
4812
G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4813
evac.do_void();
4814
double elapsed_sec = os::elapsedTime() - start;
4815
double term_sec = pss.term_time();
4816
_g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4817
_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4818
_g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4819
}
4820
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
4821
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4822
4823
if (ParallelGCVerbose) {
4824
MutexLocker x(stats_lock());
4825
pss.print_termination_stats(worker_id);
4826
}
4827
4828
assert(pss.queue_is_empty(), "should be empty");
4829
4830
// Close the inner scope so that the ResourceMark and HandleMark
4831
// destructors are executed here and are included as part of the
4832
// "GC Worker Time".
4833
}
4834
_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4835
}
4836
};
4837
4838
class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4839
private:
4840
BoolObjectClosure* _is_alive;
4841
int _initial_string_table_size;
4842
int _initial_symbol_table_size;
4843
4844
bool _process_strings;
4845
int _strings_processed;
4846
int _strings_removed;
4847
4848
bool _process_symbols;
4849
int _symbols_processed;
4850
int _symbols_removed;
4851
4852
bool _do_in_parallel;
4853
public:
4854
G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4855
AbstractGangTask("String/Symbol Unlinking"),
4856
_is_alive(is_alive),
4857
_do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
4858
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4859
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4860
4861
_initial_string_table_size = StringTable::the_table()->table_size();
4862
_initial_symbol_table_size = SymbolTable::the_table()->table_size();
4863
if (process_strings) {
4864
StringTable::clear_parallel_claimed_index();
4865
}
4866
if (process_symbols) {
4867
SymbolTable::clear_parallel_claimed_index();
4868
}
4869
}
4870
4871
~G1StringSymbolTableUnlinkTask() {
4872
guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4873
err_msg("claim value " INT32_FORMAT " after unlink less than initial string table size " INT32_FORMAT,
4874
StringTable::parallel_claimed_index(), _initial_string_table_size));
4875
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4876
err_msg("claim value " INT32_FORMAT " after unlink less than initial symbol table size " INT32_FORMAT,
4877
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4878
4879
if (G1TraceStringSymbolTableScrubbing) {
4880
gclog_or_tty->print_cr("Cleaned string and symbol table, "
4881
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4882
"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4883
strings_processed(), strings_removed(),
4884
symbols_processed(), symbols_removed());
4885
}
4886
}
4887
4888
void work(uint worker_id) {
4889
if (_do_in_parallel) {
4890
int strings_processed = 0;
4891
int strings_removed = 0;
4892
int symbols_processed = 0;
4893
int symbols_removed = 0;
4894
if (_process_strings) {
4895
StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4896
Atomic::add(strings_processed, &_strings_processed);
4897
Atomic::add(strings_removed, &_strings_removed);
4898
}
4899
if (_process_symbols) {
4900
SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4901
Atomic::add(symbols_processed, &_symbols_processed);
4902
Atomic::add(symbols_removed, &_symbols_removed);
4903
}
4904
} else {
4905
if (_process_strings) {
4906
StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
4907
}
4908
if (_process_symbols) {
4909
SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
4910
}
4911
}
4912
}
4913
4914
size_t strings_processed() const { return (size_t)_strings_processed; }
4915
size_t strings_removed() const { return (size_t)_strings_removed; }
4916
4917
size_t symbols_processed() const { return (size_t)_symbols_processed; }
4918
size_t symbols_removed() const { return (size_t)_symbols_removed; }
4919
};
4920
4921
class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4922
private:
4923
static Monitor* _lock;
4924
4925
BoolObjectClosure* const _is_alive;
4926
const bool _unloading_occurred;
4927
const uint _num_workers;
4928
4929
// Variables used to claim nmethods.
4930
nmethod* _first_nmethod;
4931
volatile nmethod* _claimed_nmethod;
4932
4933
// The list of nmethods that need to be processed by the second pass.
4934
volatile nmethod* _postponed_list;
4935
volatile uint _num_entered_barrier;
4936
4937
public:
4938
G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
4939
_is_alive(is_alive),
4940
_unloading_occurred(unloading_occurred),
4941
_num_workers(num_workers),
4942
_first_nmethod(NULL),
4943
_claimed_nmethod(NULL),
4944
_postponed_list(NULL),
4945
_num_entered_barrier(0)
4946
{
4947
nmethod::increase_unloading_clock();
4948
_first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
4949
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
4950
}
4951
4952
~G1CodeCacheUnloadingTask() {
4953
CodeCache::verify_clean_inline_caches();
4954
4955
CodeCache::set_needs_cache_clean(false);
4956
guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
4957
4958
CodeCache::verify_icholder_relocations();
4959
}
4960
4961
private:
4962
void add_to_postponed_list(nmethod* nm) {
4963
nmethod* old;
4964
do {
4965
old = (nmethod*)_postponed_list;
4966
nm->set_unloading_next(old);
4967
} while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
4968
}
4969
4970
void clean_nmethod(nmethod* nm) {
4971
bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
4972
4973
if (postponed) {
4974
// This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
4975
add_to_postponed_list(nm);
4976
}
4977
4978
// Mark that this thread has been cleaned/unloaded.
4979
// After this call, it will be safe to ask if this nmethod was unloaded or not.
4980
nm->set_unloading_clock(nmethod::global_unloading_clock());
4981
}
4982
4983
void clean_nmethod_postponed(nmethod* nm) {
4984
nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
4985
}
4986
4987
static const int MaxClaimNmethods = 16;
4988
4989
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
4990
nmethod* first;
4991
nmethod* last;
4992
4993
do {
4994
*num_claimed_nmethods = 0;
4995
4996
first = last = (nmethod*)_claimed_nmethod;
4997
4998
if (first != NULL) {
4999
for (int i = 0; i < MaxClaimNmethods; i++) {
5000
last = CodeCache::alive_nmethod(CodeCache::next(last));
5001
5002
if (last == NULL) {
5003
break;
5004
}
5005
5006
claimed_nmethods[i] = last;
5007
(*num_claimed_nmethods)++;
5008
}
5009
}
5010
5011
} while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
5012
}
5013
5014
nmethod* claim_postponed_nmethod() {
5015
nmethod* claim;
5016
nmethod* next;
5017
5018
do {
5019
claim = (nmethod*)_postponed_list;
5020
if (claim == NULL) {
5021
return NULL;
5022
}
5023
5024
next = claim->unloading_next();
5025
5026
} while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
5027
5028
return claim;
5029
}
5030
5031
public:
5032
// Mark that we're done with the first pass of nmethod cleaning.
5033
void barrier_mark(uint worker_id) {
5034
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5035
_num_entered_barrier++;
5036
if (_num_entered_barrier == _num_workers) {
5037
ml.notify_all();
5038
}
5039
}
5040
5041
// See if we have to wait for the other workers to
5042
// finish their first-pass nmethod cleaning work.
5043
void barrier_wait(uint worker_id) {
5044
if (_num_entered_barrier < _num_workers) {
5045
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
5046
while (_num_entered_barrier < _num_workers) {
5047
ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
5048
}
5049
}
5050
}
5051
5052
// Cleaning and unloading of nmethods. Some work has to be postponed
5053
// to the second pass, when we know which nmethods survive.
5054
void work_first_pass(uint worker_id) {
5055
// The first nmethods is claimed by the first worker.
5056
if (worker_id == 0 && _first_nmethod != NULL) {
5057
clean_nmethod(_first_nmethod);
5058
_first_nmethod = NULL;
5059
}
5060
5061
int num_claimed_nmethods;
5062
nmethod* claimed_nmethods[MaxClaimNmethods];
5063
5064
while (true) {
5065
claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
5066
5067
if (num_claimed_nmethods == 0) {
5068
break;
5069
}
5070
5071
for (int i = 0; i < num_claimed_nmethods; i++) {
5072
clean_nmethod(claimed_nmethods[i]);
5073
}
5074
}
5075
5076
// The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
5077
// Need to retire the buffers now that this thread has stopped cleaning nmethods.
5078
MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
5079
}
5080
5081
void work_second_pass(uint worker_id) {
5082
nmethod* nm;
5083
// Take care of postponed nmethods.
5084
while ((nm = claim_postponed_nmethod()) != NULL) {
5085
clean_nmethod_postponed(nm);
5086
}
5087
}
5088
};
5089
5090
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
5091
5092
class G1KlassCleaningTask : public StackObj {
5093
BoolObjectClosure* _is_alive;
5094
volatile jint _clean_klass_tree_claimed;
5095
ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
5096
5097
public:
5098
G1KlassCleaningTask(BoolObjectClosure* is_alive) :
5099
_is_alive(is_alive),
5100
_clean_klass_tree_claimed(0),
5101
_klass_iterator() {
5102
}
5103
5104
private:
5105
bool claim_clean_klass_tree_task() {
5106
if (_clean_klass_tree_claimed) {
5107
return false;
5108
}
5109
5110
return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
5111
}
5112
5113
InstanceKlass* claim_next_klass() {
5114
Klass* klass;
5115
do {
5116
klass =_klass_iterator.next_klass();
5117
} while (klass != NULL && !klass->oop_is_instance());
5118
5119
return (InstanceKlass*)klass;
5120
}
5121
5122
public:
5123
5124
void clean_klass(InstanceKlass* ik) {
5125
ik->clean_weak_instanceklass_links(_is_alive);
5126
5127
if (JvmtiExport::has_redefined_a_class()) {
5128
InstanceKlass::purge_previous_versions(ik);
5129
}
5130
}
5131
5132
void work() {
5133
ResourceMark rm;
5134
5135
// One worker will clean the subklass/sibling klass tree.
5136
if (claim_clean_klass_tree_task()) {
5137
Klass::clean_subklass_tree(_is_alive);
5138
}
5139
5140
// All workers will help cleaning the classes,
5141
InstanceKlass* klass;
5142
while ((klass = claim_next_klass()) != NULL) {
5143
clean_klass(klass);
5144
}
5145
}
5146
};
5147
5148
// To minimize the remark pause times, the tasks below are done in parallel.
5149
class G1ParallelCleaningTask : public AbstractGangTask {
5150
private:
5151
G1StringSymbolTableUnlinkTask _string_symbol_task;
5152
G1CodeCacheUnloadingTask _code_cache_task;
5153
G1KlassCleaningTask _klass_cleaning_task;
5154
5155
public:
5156
// The constructor is run in the VMThread.
5157
G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
5158
AbstractGangTask("Parallel Cleaning"),
5159
_string_symbol_task(is_alive, process_strings, process_symbols),
5160
_code_cache_task(num_workers, is_alive, unloading_occurred),
5161
_klass_cleaning_task(is_alive) {
5162
}
5163
5164
void pre_work_verification() {
5165
// The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.
5166
assert(Thread::current()->is_VM_thread()
5167
|| !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5168
}
5169
5170
void post_work_verification() {
5171
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5172
}
5173
5174
// The parallel work done by all worker threads.
5175
void work(uint worker_id) {
5176
pre_work_verification();
5177
5178
// Do first pass of code cache cleaning.
5179
_code_cache_task.work_first_pass(worker_id);
5180
5181
// Let the threads mark that the first pass is done.
5182
_code_cache_task.barrier_mark(worker_id);
5183
5184
// Clean the Strings and Symbols.
5185
_string_symbol_task.work(worker_id);
5186
5187
// Wait for all workers to finish the first code cache cleaning pass.
5188
_code_cache_task.barrier_wait(worker_id);
5189
5190
// Do the second code cache cleaning work, which realize on
5191
// the liveness information gathered during the first pass.
5192
_code_cache_task.work_second_pass(worker_id);
5193
5194
// Clean all klasses that were not unloaded.
5195
_klass_cleaning_task.work();
5196
5197
post_work_verification();
5198
}
5199
};
5200
5201
5202
void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5203
bool process_strings,
5204
bool process_symbols,
5205
bool class_unloading_occurred) {
5206
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5207
workers()->active_workers() : 1);
5208
5209
G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5210
n_workers, class_unloading_occurred);
5211
if (G1CollectedHeap::use_parallel_gc_threads()) {
5212
set_par_threads(n_workers);
5213
workers()->run_task(&g1_unlink_task);
5214
set_par_threads(0);
5215
} else {
5216
g1_unlink_task.work(0);
5217
}
5218
}
5219
5220
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5221
bool process_strings, bool process_symbols) {
5222
{
5223
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5224
_g1h->workers()->active_workers() : 1);
5225
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5226
if (G1CollectedHeap::use_parallel_gc_threads()) {
5227
set_par_threads(n_workers);
5228
workers()->run_task(&g1_unlink_task);
5229
set_par_threads(0);
5230
} else {
5231
g1_unlink_task.work(0);
5232
}
5233
}
5234
5235
if (G1StringDedup::is_enabled()) {
5236
G1StringDedup::unlink(is_alive);
5237
}
5238
}
5239
5240
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5241
private:
5242
DirtyCardQueueSet* _queue;
5243
public:
5244
G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5245
5246
virtual void work(uint worker_id) {
5247
G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
5248
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
5249
5250
RedirtyLoggedCardTableEntryClosure cl;
5251
if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5252
_queue->par_apply_closure_to_all_completed_buffers(&cl);
5253
} else {
5254
_queue->apply_closure_to_all_completed_buffers(&cl);
5255
}
5256
5257
phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
5258
}
5259
};
5260
5261
void G1CollectedHeap::redirty_logged_cards() {
5262
double redirty_logged_cards_start = os::elapsedTime();
5263
5264
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5265
_g1h->workers()->active_workers() : 1);
5266
5267
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5268
dirty_card_queue_set().reset_for_par_iteration();
5269
if (use_parallel_gc_threads()) {
5270
set_par_threads(n_workers);
5271
workers()->run_task(&redirty_task);
5272
set_par_threads(0);
5273
} else {
5274
redirty_task.work(0);
5275
}
5276
5277
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5278
dcq.merge_bufferlists(&dirty_card_queue_set());
5279
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5280
5281
g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5282
}
5283
5284
// Weak Reference Processing support
5285
5286
// An always "is_alive" closure that is used to preserve referents.
5287
// If the object is non-null then it's alive. Used in the preservation
5288
// of referent objects that are pointed to by reference objects
5289
// discovered by the CM ref processor.
5290
class G1AlwaysAliveClosure: public BoolObjectClosure {
5291
G1CollectedHeap* _g1;
5292
public:
5293
G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5294
bool do_object_b(oop p) {
5295
if (p != NULL) {
5296
return true;
5297
}
5298
return false;
5299
}
5300
};
5301
5302
bool G1STWIsAliveClosure::do_object_b(oop p) {
5303
// An object is reachable if it is outside the collection set,
5304
// or is inside and copied.
5305
return !_g1->obj_in_cs(p) || p->is_forwarded();
5306
}
5307
5308
// Non Copying Keep Alive closure
5309
class G1KeepAliveClosure: public OopClosure {
5310
G1CollectedHeap* _g1;
5311
public:
5312
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5313
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5314
void do_oop(oop* p) {
5315
oop obj = *p;
5316
assert(obj != NULL, "the caller should have filtered out NULL values");
5317
5318
const InCSetState cset_state = _g1->in_cset_state(obj);
5319
if (!cset_state.is_in_cset_or_humongous()) {
5320
return;
5321
}
5322
if (cset_state.is_in_cset()) {
5323
assert( obj->is_forwarded(), "invariant" );
5324
*p = obj->forwardee();
5325
} else {
5326
assert(!obj->is_forwarded(), "invariant" );
5327
assert(cset_state.is_humongous(),
5328
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
5329
_g1->set_humongous_is_live(obj);
5330
}
5331
}
5332
};
5333
5334
// Copying Keep Alive closure - can be called from both
5335
// serial and parallel code as long as different worker
5336
// threads utilize different G1ParScanThreadState instances
5337
// and different queues.
5338
5339
class G1CopyingKeepAliveClosure: public OopClosure {
5340
G1CollectedHeap* _g1h;
5341
OopClosure* _copy_non_heap_obj_cl;
5342
G1ParScanThreadState* _par_scan_state;
5343
5344
public:
5345
G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5346
OopClosure* non_heap_obj_cl,
5347
G1ParScanThreadState* pss):
5348
_g1h(g1h),
5349
_copy_non_heap_obj_cl(non_heap_obj_cl),
5350
_par_scan_state(pss)
5351
{}
5352
5353
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5354
virtual void do_oop( oop* p) { do_oop_work(p); }
5355
5356
template <class T> void do_oop_work(T* p) {
5357
oop obj = oopDesc::load_decode_heap_oop(p);
5358
5359
if (_g1h->is_in_cset_or_humongous(obj)) {
5360
// If the referent object has been forwarded (either copied
5361
// to a new location or to itself in the event of an
5362
// evacuation failure) then we need to update the reference
5363
// field and, if both reference and referent are in the G1
5364
// heap, update the RSet for the referent.
5365
//
5366
// If the referent has not been forwarded then we have to keep
5367
// it alive by policy. Therefore we have copy the referent.
5368
//
5369
// If the reference field is in the G1 heap then we can push
5370
// on the PSS queue. When the queue is drained (after each
5371
// phase of reference processing) the object and it's followers
5372
// will be copied, the reference field set to point to the
5373
// new location, and the RSet updated. Otherwise we need to
5374
// use the the non-heap or metadata closures directly to copy
5375
// the referent object and update the pointer, while avoiding
5376
// updating the RSet.
5377
5378
if (_g1h->is_in_g1_reserved(p)) {
5379
_par_scan_state->push_on_queue(p);
5380
} else {
5381
assert(!Metaspace::contains((const void*)p),
5382
err_msg("Unexpectedly found a pointer from metadata: "
5383
PTR_FORMAT, p));
5384
_copy_non_heap_obj_cl->do_oop(p);
5385
}
5386
}
5387
}
5388
};
5389
5390
// Serial drain queue closure. Called as the 'complete_gc'
5391
// closure for each discovered list in some of the
5392
// reference processing phases.
5393
5394
class G1STWDrainQueueClosure: public VoidClosure {
5395
protected:
5396
G1CollectedHeap* _g1h;
5397
G1ParScanThreadState* _par_scan_state;
5398
5399
G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
5400
5401
public:
5402
G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5403
_g1h(g1h),
5404
_par_scan_state(pss)
5405
{ }
5406
5407
void do_void() {
5408
G1ParScanThreadState* const pss = par_scan_state();
5409
pss->trim_queue();
5410
}
5411
};
5412
5413
// Parallel Reference Processing closures
5414
5415
// Implementation of AbstractRefProcTaskExecutor for parallel reference
5416
// processing during G1 evacuation pauses.
5417
5418
class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5419
private:
5420
G1CollectedHeap* _g1h;
5421
RefToScanQueueSet* _queues;
5422
FlexibleWorkGang* _workers;
5423
int _active_workers;
5424
5425
public:
5426
G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5427
FlexibleWorkGang* workers,
5428
RefToScanQueueSet *task_queues,
5429
int n_workers) :
5430
_g1h(g1h),
5431
_queues(task_queues),
5432
_workers(workers),
5433
_active_workers(n_workers)
5434
{
5435
assert(n_workers > 0, "shouldn't call this otherwise");
5436
}
5437
5438
// Executes the given task using concurrent marking worker threads.
5439
virtual void execute(ProcessTask& task);
5440
virtual void execute(EnqueueTask& task);
5441
};
5442
5443
// Gang task for possibly parallel reference processing
5444
5445
class G1STWRefProcTaskProxy: public AbstractGangTask {
5446
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5447
ProcessTask& _proc_task;
5448
G1CollectedHeap* _g1h;
5449
RefToScanQueueSet *_task_queues;
5450
ParallelTaskTerminator* _terminator;
5451
5452
public:
5453
G1STWRefProcTaskProxy(ProcessTask& proc_task,
5454
G1CollectedHeap* g1h,
5455
RefToScanQueueSet *task_queues,
5456
ParallelTaskTerminator* terminator) :
5457
AbstractGangTask("Process reference objects in parallel"),
5458
_proc_task(proc_task),
5459
_g1h(g1h),
5460
_task_queues(task_queues),
5461
_terminator(terminator)
5462
{}
5463
5464
virtual void work(uint worker_id) {
5465
// The reference processing task executed by a single worker.
5466
ResourceMark rm;
5467
HandleMark hm;
5468
5469
G1STWIsAliveClosure is_alive(_g1h);
5470
5471
G1ParScanThreadState pss(_g1h, worker_id, NULL);
5472
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5473
5474
pss.set_evac_failure_closure(&evac_failure_cl);
5475
5476
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5477
5478
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5479
5480
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5481
5482
if (_g1h->g1_policy()->during_initial_mark_pause()) {
5483
// We also need to mark copied objects.
5484
copy_non_heap_cl = &copy_mark_non_heap_cl;
5485
}
5486
5487
// Keep alive closure.
5488
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5489
5490
// Complete GC closure
5491
G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5492
5493
// Call the reference processing task's work routine.
5494
_proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5495
5496
// Note we cannot assert that the refs array is empty here as not all
5497
// of the processing tasks (specifically phase2 - pp2_work) execute
5498
// the complete_gc closure (which ordinarily would drain the queue) so
5499
// the queue may not be empty.
5500
}
5501
};
5502
5503
// Driver routine for parallel reference processing.
5504
// Creates an instance of the ref processing gang
5505
// task and has the worker threads execute it.
5506
void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5507
assert(_workers != NULL, "Need parallel worker threads.");
5508
5509
ParallelTaskTerminator terminator(_active_workers, _queues);
5510
G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5511
5512
_g1h->set_par_threads(_active_workers);
5513
_workers->run_task(&proc_task_proxy);
5514
_g1h->set_par_threads(0);
5515
}
5516
5517
// Gang task for parallel reference enqueueing.
5518
5519
class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5520
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5521
EnqueueTask& _enq_task;
5522
5523
public:
5524
G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5525
AbstractGangTask("Enqueue reference objects in parallel"),
5526
_enq_task(enq_task)
5527
{ }
5528
5529
virtual void work(uint worker_id) {
5530
_enq_task.work(worker_id);
5531
}
5532
};
5533
5534
// Driver routine for parallel reference enqueueing.
5535
// Creates an instance of the ref enqueueing gang
5536
// task and has the worker threads execute it.
5537
5538
void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5539
assert(_workers != NULL, "Need parallel worker threads.");
5540
5541
G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5542
5543
_g1h->set_par_threads(_active_workers);
5544
_workers->run_task(&enq_task_proxy);
5545
_g1h->set_par_threads(0);
5546
}
5547
5548
// End of weak reference support closures
5549
5550
// Abstract task used to preserve (i.e. copy) any referent objects
5551
// that are in the collection set and are pointed to by reference
5552
// objects discovered by the CM ref processor.
5553
5554
class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5555
protected:
5556
G1CollectedHeap* _g1h;
5557
RefToScanQueueSet *_queues;
5558
ParallelTaskTerminator _terminator;
5559
uint _n_workers;
5560
5561
public:
5562
G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5563
AbstractGangTask("ParPreserveCMReferents"),
5564
_g1h(g1h),
5565
_queues(task_queues),
5566
_terminator(workers, _queues),
5567
_n_workers(workers)
5568
{ }
5569
5570
void work(uint worker_id) {
5571
ResourceMark rm;
5572
HandleMark hm;
5573
5574
G1ParScanThreadState pss(_g1h, worker_id, NULL);
5575
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5576
5577
pss.set_evac_failure_closure(&evac_failure_cl);
5578
5579
assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5580
5581
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5582
5583
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5584
5585
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5586
5587
if (_g1h->g1_policy()->during_initial_mark_pause()) {
5588
// We also need to mark copied objects.
5589
copy_non_heap_cl = &copy_mark_non_heap_cl;
5590
}
5591
5592
// Is alive closure
5593
G1AlwaysAliveClosure always_alive(_g1h);
5594
5595
// Copying keep alive closure. Applied to referent objects that need
5596
// to be copied.
5597
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5598
5599
ReferenceProcessor* rp = _g1h->ref_processor_cm();
5600
5601
uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5602
uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5603
5604
// limit is set using max_num_q() - which was set using ParallelGCThreads.
5605
// So this must be true - but assert just in case someone decides to
5606
// change the worker ids.
5607
assert(0 <= worker_id && worker_id < limit, "sanity");
5608
assert(!rp->discovery_is_atomic(), "check this code");
5609
5610
// Select discovered lists [i, i+stride, i+2*stride,...,limit)
5611
for (uint idx = worker_id; idx < limit; idx += stride) {
5612
DiscoveredList& ref_list = rp->discovered_refs()[idx];
5613
5614
DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5615
while (iter.has_next()) {
5616
// Since discovery is not atomic for the CM ref processor, we
5617
// can see some null referent objects.
5618
iter.load_ptrs(DEBUG_ONLY(true));
5619
oop ref = iter.obj();
5620
5621
// This will filter nulls.
5622
if (iter.is_referent_alive()) {
5623
iter.make_referent_alive();
5624
}
5625
iter.move_to_next();
5626
}
5627
}
5628
5629
// Drain the queue - which may cause stealing
5630
G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5631
drain_queue.do_void();
5632
// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5633
assert(pss.queue_is_empty(), "should be");
5634
}
5635
};
5636
5637
// Weak Reference processing during an evacuation pause (part 1).
5638
void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5639
double ref_proc_start = os::elapsedTime();
5640
5641
ReferenceProcessor* rp = _ref_processor_stw;
5642
assert(rp->discovery_enabled(), "should have been enabled");
5643
5644
// Any reference objects, in the collection set, that were 'discovered'
5645
// by the CM ref processor should have already been copied (either by
5646
// applying the external root copy closure to the discovered lists, or
5647
// by following an RSet entry).
5648
//
5649
// But some of the referents, that are in the collection set, that these
5650
// reference objects point to may not have been copied: the STW ref
5651
// processor would have seen that the reference object had already
5652
// been 'discovered' and would have skipped discovering the reference,
5653
// but would not have treated the reference object as a regular oop.
5654
// As a result the copy closure would not have been applied to the
5655
// referent object.
5656
//
5657
// We need to explicitly copy these referent objects - the references
5658
// will be processed at the end of remarking.
5659
//
5660
// We also need to do this copying before we process the reference
5661
// objects discovered by the STW ref processor in case one of these
5662
// referents points to another object which is also referenced by an
5663
// object discovered by the STW ref processor.
5664
5665
assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5666
no_of_gc_workers == workers()->active_workers(),
5667
"Need to reset active GC workers");
5668
5669
set_par_threads(no_of_gc_workers);
5670
G1ParPreserveCMReferentsTask keep_cm_referents(this,
5671
no_of_gc_workers,
5672
_task_queues);
5673
5674
if (G1CollectedHeap::use_parallel_gc_threads()) {
5675
workers()->run_task(&keep_cm_referents);
5676
} else {
5677
keep_cm_referents.work(0);
5678
}
5679
5680
set_par_threads(0);
5681
5682
// Closure to test whether a referent is alive.
5683
G1STWIsAliveClosure is_alive(this);
5684
5685
// Even when parallel reference processing is enabled, the processing
5686
// of JNI refs is serial and performed serially by the current thread
5687
// rather than by a worker. The following PSS will be used for processing
5688
// JNI refs.
5689
5690
// Use only a single queue for this PSS.
5691
G1ParScanThreadState pss(this, 0, NULL);
5692
5693
// We do not embed a reference processor in the copying/scanning
5694
// closures while we're actually processing the discovered
5695
// reference objects.
5696
G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5697
5698
pss.set_evac_failure_closure(&evac_failure_cl);
5699
5700
assert(pss.queue_is_empty(), "pre-condition");
5701
5702
G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5703
5704
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5705
5706
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5707
5708
if (_g1h->g1_policy()->during_initial_mark_pause()) {
5709
// We also need to mark copied objects.
5710
copy_non_heap_cl = &copy_mark_non_heap_cl;
5711
}
5712
5713
// Keep alive closure.
5714
G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5715
5716
// Serial Complete GC closure
5717
G1STWDrainQueueClosure drain_queue(this, &pss);
5718
5719
// Setup the soft refs policy...
5720
rp->setup_policy(false);
5721
5722
ReferenceProcessorStats stats;
5723
if (!rp->processing_is_mt()) {
5724
// Serial reference processing...
5725
stats = rp->process_discovered_references(&is_alive,
5726
&keep_alive,
5727
&drain_queue,
5728
NULL,
5729
_gc_timer_stw,
5730
_gc_tracer_stw->gc_id());
5731
} else {
5732
// Parallel reference processing
5733
assert(rp->num_q() == no_of_gc_workers, "sanity");
5734
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5735
5736
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5737
stats = rp->process_discovered_references(&is_alive,
5738
&keep_alive,
5739
&drain_queue,
5740
&par_task_executor,
5741
_gc_timer_stw,
5742
_gc_tracer_stw->gc_id());
5743
}
5744
5745
_gc_tracer_stw->report_gc_reference_stats(stats);
5746
5747
// We have completed copying any necessary live referent objects.
5748
assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5749
5750
double ref_proc_time = os::elapsedTime() - ref_proc_start;
5751
g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5752
}
5753
5754
// Weak Reference processing during an evacuation pause (part 2).
5755
void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5756
double ref_enq_start = os::elapsedTime();
5757
5758
ReferenceProcessor* rp = _ref_processor_stw;
5759
assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5760
5761
// Now enqueue any remaining on the discovered lists on to
5762
// the pending list.
5763
if (!rp->processing_is_mt()) {
5764
// Serial reference processing...
5765
rp->enqueue_discovered_references();
5766
} else {
5767
// Parallel reference enqueueing
5768
5769
assert(no_of_gc_workers == workers()->active_workers(),
5770
"Need to reset active workers");
5771
assert(rp->num_q() == no_of_gc_workers, "sanity");
5772
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5773
5774
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5775
rp->enqueue_discovered_references(&par_task_executor);
5776
}
5777
5778
rp->verify_no_references_recorded();
5779
assert(!rp->discovery_enabled(), "should have been disabled");
5780
5781
// FIXME
5782
// CM's reference processing also cleans up the string and symbol tables.
5783
// Should we do that here also? We could, but it is a serial operation
5784
// and could significantly increase the pause time.
5785
5786
double ref_enq_time = os::elapsedTime() - ref_enq_start;
5787
g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5788
}
5789
5790
void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5791
_expand_heap_after_alloc_failure = true;
5792
_evacuation_failed = false;
5793
5794
// Should G1EvacuationFailureALot be in effect for this GC?
5795
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5796
5797
g1_rem_set()->prepare_for_oops_into_collection_set_do();
5798
5799
// Disable the hot card cache.
5800
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5801
hot_card_cache->reset_hot_cache_claimed_index();
5802
hot_card_cache->set_use_cache(false);
5803
5804
const uint n_workers = workers()->active_workers();
5805
assert(UseDynamicNumberOfGCThreads ||
5806
n_workers == workers()->total_workers(),
5807
"If not dynamic should be using all the workers");
5808
set_par_threads(n_workers);
5809
5810
init_for_evac_failure(NULL);
5811
5812
rem_set()->prepare_for_younger_refs_iterate(true);
5813
5814
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5815
double start_par_time_sec = os::elapsedTime();
5816
double end_par_time_sec;
5817
5818
{
5819
G1RootProcessor root_processor(this);
5820
G1ParTask g1_par_task(this, _task_queues, &root_processor);
5821
// InitialMark needs claim bits to keep track of the marked-through CLDs.
5822
if (g1_policy()->during_initial_mark_pause()) {
5823
ClassLoaderDataGraph::clear_claimed_marks();
5824
}
5825
5826
if (G1CollectedHeap::use_parallel_gc_threads()) {
5827
// The individual threads will set their evac-failure closures.
5828
if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5829
// These tasks use ShareHeap::_process_strong_tasks
5830
assert(UseDynamicNumberOfGCThreads ||
5831
workers()->active_workers() == workers()->total_workers(),
5832
"If not dynamic should be using all the workers");
5833
workers()->run_task(&g1_par_task);
5834
} else {
5835
g1_par_task.set_for_termination(n_workers);
5836
g1_par_task.work(0);
5837
}
5838
end_par_time_sec = os::elapsedTime();
5839
5840
// Closing the inner scope will execute the destructor
5841
// for the G1RootProcessor object. We record the current
5842
// elapsed time before closing the scope so that time
5843
// taken for the destructor is NOT included in the
5844
// reported parallel time.
5845
}
5846
5847
G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5848
5849
double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5850
phase_times->record_par_time(par_time_ms);
5851
5852
double code_root_fixup_time_ms =
5853
(os::elapsedTime() - end_par_time_sec) * 1000.0;
5854
phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5855
5856
set_par_threads(0);
5857
5858
// Process any discovered reference objects - we have
5859
// to do this _before_ we retire the GC alloc regions
5860
// as we may have to copy some 'reachable' referent
5861
// objects (and their reachable sub-graphs) that were
5862
// not copied during the pause.
5863
process_discovered_references(n_workers);
5864
5865
if (G1StringDedup::is_enabled()) {
5866
double fixup_start = os::elapsedTime();
5867
5868
G1STWIsAliveClosure is_alive(this);
5869
G1KeepAliveClosure keep_alive(this);
5870
G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
5871
5872
double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
5873
phase_times->record_string_dedup_fixup_time(fixup_time_ms);
5874
}
5875
5876
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5877
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5878
5879
// Reset and re-enable the hot card cache.
5880
// Note the counts for the cards in the regions in the
5881
// collection set are reset when the collection set is freed.
5882
hot_card_cache->reset_hot_cache();
5883
hot_card_cache->set_use_cache(true);
5884
5885
purge_code_root_memory();
5886
5887
if (g1_policy()->during_initial_mark_pause()) {
5888
// Reset the claim values set during marking the strong code roots
5889
reset_heap_region_claim_values();
5890
}
5891
5892
finalize_for_evac_failure();
5893
5894
if (evacuation_failed()) {
5895
remove_self_forwarding_pointers();
5896
5897
// Reset the G1EvacuationFailureALot counters and flags
5898
// Note: the values are reset only when an actual
5899
// evacuation failure occurs.
5900
NOT_PRODUCT(reset_evacuation_should_fail();)
5901
}
5902
5903
// Enqueue any remaining references remaining on the STW
5904
// reference processor's discovered lists. We need to do
5905
// this after the card table is cleaned (and verified) as
5906
// the act of enqueueing entries on to the pending list
5907
// will log these updates (and dirty their associated
5908
// cards). We need these updates logged to update any
5909
// RSets.
5910
enqueue_discovered_references(n_workers);
5911
5912
redirty_logged_cards();
5913
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5914
}
5915
5916
void G1CollectedHeap::free_region(HeapRegion* hr,
5917
FreeRegionList* free_list,
5918
bool par,
5919
bool locked) {
5920
assert(!hr->is_free(), "the region should not be free");
5921
assert(!hr->is_empty(), "the region should not be empty");
5922
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5923
assert(free_list != NULL, "pre-condition");
5924
5925
if (G1VerifyBitmaps) {
5926
MemRegion mr(hr->bottom(), hr->end());
5927
concurrent_mark()->clearRangePrevBitmap(mr);
5928
}
5929
5930
// Clear the card counts for this region.
5931
// Note: we only need to do this if the region is not young
5932
// (since we don't refine cards in young regions).
5933
if (!hr->is_young()) {
5934
_cg1r->hot_card_cache()->reset_card_counts(hr);
5935
}
5936
hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5937
free_list->add_ordered(hr);
5938
}
5939
5940
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5941
FreeRegionList* free_list,
5942
bool par) {
5943
assert(hr->startsHumongous(), "this is only for starts humongous regions");
5944
assert(free_list != NULL, "pre-condition");
5945
5946
size_t hr_capacity = hr->capacity();
5947
// We need to read this before we make the region non-humongous,
5948
// otherwise the information will be gone.
5949
uint last_index = hr->last_hc_index();
5950
hr->clear_humongous();
5951
free_region(hr, free_list, par);
5952
5953
uint i = hr->hrm_index() + 1;
5954
while (i < last_index) {
5955
HeapRegion* curr_hr = region_at(i);
5956
assert(curr_hr->continuesHumongous(), "invariant");
5957
curr_hr->clear_humongous();
5958
free_region(curr_hr, free_list, par);
5959
i += 1;
5960
}
5961
}
5962
5963
void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5964
const HeapRegionSetCount& humongous_regions_removed) {
5965
if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5966
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5967
_old_set.bulk_remove(old_regions_removed);
5968
_humongous_set.bulk_remove(humongous_regions_removed);
5969
}
5970
5971
}
5972
5973
void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5974
assert(list != NULL, "list can't be null");
5975
if (!list->is_empty()) {
5976
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5977
_hrm.insert_list_into_free_list(list);
5978
}
5979
}
5980
5981
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5982
_allocator->decrease_used(bytes);
5983
}
5984
5985
class G1ParCleanupCTTask : public AbstractGangTask {
5986
G1SATBCardTableModRefBS* _ct_bs;
5987
G1CollectedHeap* _g1h;
5988
HeapRegion* volatile _su_head;
5989
public:
5990
G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5991
G1CollectedHeap* g1h) :
5992
AbstractGangTask("G1 Par Cleanup CT Task"),
5993
_ct_bs(ct_bs), _g1h(g1h) { }
5994
5995
void work(uint worker_id) {
5996
HeapRegion* r;
5997
while (r = _g1h->pop_dirty_cards_region()) {
5998
clear_cards(r);
5999
}
6000
}
6001
6002
void clear_cards(HeapRegion* r) {
6003
// Cards of the survivors should have already been dirtied.
6004
if (!r->is_survivor()) {
6005
_ct_bs->clear(MemRegion(r->bottom(), r->end()));
6006
}
6007
}
6008
};
6009
6010
#ifndef PRODUCT
6011
class G1VerifyCardTableCleanup: public HeapRegionClosure {
6012
G1CollectedHeap* _g1h;
6013
G1SATBCardTableModRefBS* _ct_bs;
6014
public:
6015
G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
6016
: _g1h(g1h), _ct_bs(ct_bs) { }
6017
virtual bool doHeapRegion(HeapRegion* r) {
6018
if (r->is_survivor()) {
6019
_g1h->verify_dirty_region(r);
6020
} else {
6021
_g1h->verify_not_dirty_region(r);
6022
}
6023
return false;
6024
}
6025
};
6026
6027
void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
6028
// All of the region should be clean.
6029
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6030
MemRegion mr(hr->bottom(), hr->end());
6031
ct_bs->verify_not_dirty_region(mr);
6032
}
6033
6034
void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
6035
// We cannot guarantee that [bottom(),end()] is dirty. Threads
6036
// dirty allocated blocks as they allocate them. The thread that
6037
// retires each region and replaces it with a new one will do a
6038
// maximal allocation to fill in [pre_dummy_top(),end()] but will
6039
// not dirty that area (one less thing to have to do while holding
6040
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
6041
// is dirty.
6042
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6043
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
6044
if (hr->is_young()) {
6045
ct_bs->verify_g1_young_region(mr);
6046
} else {
6047
ct_bs->verify_dirty_region(mr);
6048
}
6049
}
6050
6051
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
6052
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6053
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
6054
verify_dirty_region(hr);
6055
}
6056
}
6057
6058
void G1CollectedHeap::verify_dirty_young_regions() {
6059
verify_dirty_young_list(_young_list->first_region());
6060
}
6061
6062
bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
6063
HeapWord* tams, HeapWord* end) {
6064
guarantee(tams <= end,
6065
err_msg("tams: " PTR_FORMAT " end: " PTR_FORMAT, tams, end));
6066
HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
6067
if (result < end) {
6068
gclog_or_tty->cr();
6069
gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
6070
bitmap_name, result);
6071
gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
6072
bitmap_name, tams, end);
6073
return false;
6074
}
6075
return true;
6076
}
6077
6078
bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
6079
CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
6080
CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
6081
6082
HeapWord* bottom = hr->bottom();
6083
HeapWord* ptams = hr->prev_top_at_mark_start();
6084
HeapWord* ntams = hr->next_top_at_mark_start();
6085
HeapWord* end = hr->end();
6086
6087
bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
6088
6089
bool res_n = true;
6090
// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
6091
// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
6092
// if we happen to be in that state.
6093
if (mark_in_progress() || !_cmThread->in_progress()) {
6094
res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
6095
}
6096
if (!res_p || !res_n) {
6097
gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
6098
HR_FORMAT_PARAMS(hr));
6099
gclog_or_tty->print_cr("#### Caller: %s", caller);
6100
return false;
6101
}
6102
return true;
6103
}
6104
6105
void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
6106
if (!G1VerifyBitmaps) return;
6107
6108
guarantee(verify_bitmaps(caller, hr), "bitmap verification");
6109
}
6110
6111
class G1VerifyBitmapClosure : public HeapRegionClosure {
6112
private:
6113
const char* _caller;
6114
G1CollectedHeap* _g1h;
6115
bool _failures;
6116
6117
public:
6118
G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
6119
_caller(caller), _g1h(g1h), _failures(false) { }
6120
6121
bool failures() { return _failures; }
6122
6123
virtual bool doHeapRegion(HeapRegion* hr) {
6124
if (hr->continuesHumongous()) return false;
6125
6126
bool result = _g1h->verify_bitmaps(_caller, hr);
6127
if (!result) {
6128
_failures = true;
6129
}
6130
return false;
6131
}
6132
};
6133
6134
void G1CollectedHeap::check_bitmaps(const char* caller) {
6135
if (!G1VerifyBitmaps) return;
6136
6137
G1VerifyBitmapClosure cl(caller, this);
6138
heap_region_iterate(&cl);
6139
guarantee(!cl.failures(), "bitmap verification");
6140
}
6141
6142
class G1CheckCSetFastTableClosure : public HeapRegionClosure {
6143
private:
6144
bool _failures;
6145
public:
6146
G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
6147
6148
virtual bool doHeapRegion(HeapRegion* hr) {
6149
uint i = hr->hrm_index();
6150
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
6151
if (hr->isHumongous()) {
6152
if (hr->in_collection_set()) {
6153
gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
6154
_failures = true;
6155
return true;
6156
}
6157
if (cset_state.is_in_cset()) {
6158
gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
6159
_failures = true;
6160
return true;
6161
}
6162
if (hr->continuesHumongous() && cset_state.is_humongous()) {
6163
gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
6164
_failures = true;
6165
return true;
6166
}
6167
} else {
6168
if (cset_state.is_humongous()) {
6169
gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
6170
_failures = true;
6171
return true;
6172
}
6173
if (hr->in_collection_set() != cset_state.is_in_cset()) {
6174
gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
6175
hr->in_collection_set(), cset_state.value(), i);
6176
_failures = true;
6177
return true;
6178
}
6179
if (cset_state.is_in_cset()) {
6180
if (hr->is_young() != (cset_state.is_young())) {
6181
gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
6182
hr->is_young(), cset_state.value(), i);
6183
_failures = true;
6184
return true;
6185
}
6186
if (hr->is_old() != (cset_state.is_old())) {
6187
gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
6188
hr->is_old(), cset_state.value(), i);
6189
_failures = true;
6190
return true;
6191
}
6192
}
6193
}
6194
return false;
6195
}
6196
6197
bool failures() const { return _failures; }
6198
};
6199
6200
bool G1CollectedHeap::check_cset_fast_test() {
6201
G1CheckCSetFastTableClosure cl;
6202
_hrm.iterate(&cl);
6203
return !cl.failures();
6204
}
6205
#endif // PRODUCT
6206
6207
void G1CollectedHeap::cleanUpCardTable() {
6208
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6209
double start = os::elapsedTime();
6210
6211
{
6212
// Iterate over the dirty cards region list.
6213
G1ParCleanupCTTask cleanup_task(ct_bs, this);
6214
6215
if (G1CollectedHeap::use_parallel_gc_threads()) {
6216
set_par_threads();
6217
workers()->run_task(&cleanup_task);
6218
set_par_threads(0);
6219
} else {
6220
while (_dirty_cards_region_list) {
6221
HeapRegion* r = _dirty_cards_region_list;
6222
cleanup_task.clear_cards(r);
6223
_dirty_cards_region_list = r->get_next_dirty_cards_region();
6224
if (_dirty_cards_region_list == r) {
6225
// The last region.
6226
_dirty_cards_region_list = NULL;
6227
}
6228
r->set_next_dirty_cards_region(NULL);
6229
}
6230
}
6231
#ifndef PRODUCT
6232
if (G1VerifyCTCleanup || VerifyAfterGC) {
6233
G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6234
heap_region_iterate(&cleanup_verifier);
6235
}
6236
#endif
6237
}
6238
6239
double elapsed = os::elapsedTime() - start;
6240
g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6241
}
6242
6243
void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6244
size_t pre_used = 0;
6245
FreeRegionList local_free_list("Local List for CSet Freeing");
6246
6247
double young_time_ms = 0.0;
6248
double non_young_time_ms = 0.0;
6249
6250
// Since the collection set is a superset of the the young list,
6251
// all we need to do to clear the young list is clear its
6252
// head and length, and unlink any young regions in the code below
6253
_young_list->clear();
6254
6255
G1CollectorPolicy* policy = g1_policy();
6256
6257
double start_sec = os::elapsedTime();
6258
bool non_young = true;
6259
6260
HeapRegion* cur = cs_head;
6261
int age_bound = -1;
6262
size_t rs_lengths = 0;
6263
6264
while (cur != NULL) {
6265
assert(!is_on_master_free_list(cur), "sanity");
6266
if (non_young) {
6267
if (cur->is_young()) {
6268
double end_sec = os::elapsedTime();
6269
double elapsed_ms = (end_sec - start_sec) * 1000.0;
6270
non_young_time_ms += elapsed_ms;
6271
6272
start_sec = os::elapsedTime();
6273
non_young = false;
6274
}
6275
} else {
6276
if (!cur->is_young()) {
6277
double end_sec = os::elapsedTime();
6278
double elapsed_ms = (end_sec - start_sec) * 1000.0;
6279
young_time_ms += elapsed_ms;
6280
6281
start_sec = os::elapsedTime();
6282
non_young = true;
6283
}
6284
}
6285
6286
rs_lengths += cur->rem_set()->occupied_locked();
6287
6288
HeapRegion* next = cur->next_in_collection_set();
6289
assert(cur->in_collection_set(), "bad CS");
6290
cur->set_next_in_collection_set(NULL);
6291
cur->set_in_collection_set(false);
6292
6293
if (cur->is_young()) {
6294
int index = cur->young_index_in_cset();
6295
assert(index != -1, "invariant");
6296
assert((uint) index < policy->young_cset_region_length(), "invariant");
6297
size_t words_survived = _surviving_young_words[index];
6298
cur->record_surv_words_in_group(words_survived);
6299
6300
// At this point the we have 'popped' cur from the collection set
6301
// (linked via next_in_collection_set()) but it is still in the
6302
// young list (linked via next_young_region()). Clear the
6303
// _next_young_region field.
6304
cur->set_next_young_region(NULL);
6305
} else {
6306
int index = cur->young_index_in_cset();
6307
assert(index == -1, "invariant");
6308
}
6309
6310
assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6311
(!cur->is_young() && cur->young_index_in_cset() == -1),
6312
"invariant" );
6313
6314
if (!cur->evacuation_failed()) {
6315
MemRegion used_mr = cur->used_region();
6316
6317
// And the region is empty.
6318
assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6319
pre_used += cur->used();
6320
free_region(cur, &local_free_list, false /* par */, true /* locked */);
6321
} else {
6322
cur->uninstall_surv_rate_group();
6323
if (cur->is_young()) {
6324
cur->set_young_index_in_cset(-1);
6325
}
6326
cur->set_evacuation_failed(false);
6327
// The region is now considered to be old.
6328
cur->set_old();
6329
_old_set.add(cur);
6330
evacuation_info.increment_collectionset_used_after(cur->used());
6331
}
6332
cur = next;
6333
}
6334
6335
evacuation_info.set_regions_freed(local_free_list.length());
6336
policy->record_max_rs_lengths(rs_lengths);
6337
policy->cset_regions_freed();
6338
6339
double end_sec = os::elapsedTime();
6340
double elapsed_ms = (end_sec - start_sec) * 1000.0;
6341
6342
if (non_young) {
6343
non_young_time_ms += elapsed_ms;
6344
} else {
6345
young_time_ms += elapsed_ms;
6346
}
6347
6348
prepend_to_freelist(&local_free_list);
6349
decrement_summary_bytes(pre_used);
6350
policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6351
policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6352
}
6353
6354
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
6355
private:
6356
FreeRegionList* _free_region_list;
6357
HeapRegionSet* _proxy_set;
6358
HeapRegionSetCount _humongous_regions_removed;
6359
size_t _freed_bytes;
6360
public:
6361
6362
G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
6363
_free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
6364
}
6365
6366
virtual bool doHeapRegion(HeapRegion* r) {
6367
if (!r->startsHumongous()) {
6368
return false;
6369
}
6370
6371
G1CollectedHeap* g1h = G1CollectedHeap::heap();
6372
6373
oop obj = (oop)r->bottom();
6374
CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
6375
6376
// The following checks whether the humongous object is live are sufficient.
6377
// The main additional check (in addition to having a reference from the roots
6378
// or the young gen) is whether the humongous object has a remembered set entry.
6379
//
6380
// A humongous object cannot be live if there is no remembered set for it
6381
// because:
6382
// - there can be no references from within humongous starts regions referencing
6383
// the object because we never allocate other objects into them.
6384
// (I.e. there are no intra-region references that may be missed by the
6385
// remembered set)
6386
// - as soon there is a remembered set entry to the humongous starts region
6387
// (i.e. it has "escaped" to an old object) this remembered set entry will stay
6388
// until the end of a concurrent mark.
6389
//
6390
// It is not required to check whether the object has been found dead by marking
6391
// or not, in fact it would prevent reclamation within a concurrent cycle, as
6392
// all objects allocated during that time are considered live.
6393
// SATB marking is even more conservative than the remembered set.
6394
// So if at this point in the collection there is no remembered set entry,
6395
// nobody has a reference to it.
6396
// At the start of collection we flush all refinement logs, and remembered sets
6397
// are completely up-to-date wrt to references to the humongous object.
6398
//
6399
// Other implementation considerations:
6400
// - never consider object arrays at this time because they would pose
6401
// considerable effort for cleaning up the the remembered sets. This is
6402
// required because stale remembered sets might reference locations that
6403
// are currently allocated into.
6404
uint region_idx = r->hrm_index();
6405
if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
6406
!r->rem_set()->is_empty()) {
6407
6408
if (G1TraceEagerReclaimHumongousObjects) {
6409
gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length " UINT32_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6410
region_idx,
6411
obj->size()*HeapWordSize,
6412
r->bottom(),
6413
r->region_num(),
6414
r->rem_set()->occupied(),
6415
r->rem_set()->strong_code_roots_list_length(),
6416
next_bitmap->isMarked(r->bottom()),
6417
g1h->is_humongous_reclaim_candidate(region_idx),
6418
obj->is_typeArray()
6419
);
6420
}
6421
6422
return false;
6423
}
6424
6425
guarantee(obj->is_typeArray(),
6426
err_msg("Only eagerly reclaiming type arrays is supported, but the object "
6427
PTR_FORMAT " is not.",
6428
r->bottom()));
6429
6430
if (G1TraceEagerReclaimHumongousObjects) {
6431
gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length " UINT32_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
6432
region_idx,
6433
obj->size()*HeapWordSize,
6434
r->bottom(),
6435
r->region_num(),
6436
r->rem_set()->occupied(),
6437
r->rem_set()->strong_code_roots_list_length(),
6438
next_bitmap->isMarked(r->bottom()),
6439
g1h->is_humongous_reclaim_candidate(region_idx),
6440
obj->is_typeArray()
6441
);
6442
}
6443
// Need to clear mark bit of the humongous object if already set.
6444
if (next_bitmap->isMarked(r->bottom())) {
6445
next_bitmap->clear(r->bottom());
6446
}
6447
_freed_bytes += r->used();
6448
r->set_containing_set(NULL);
6449
_humongous_regions_removed.increment(1u, r->capacity());
6450
g1h->free_humongous_region(r, _free_region_list, false);
6451
6452
return false;
6453
}
6454
6455
HeapRegionSetCount& humongous_free_count() {
6456
return _humongous_regions_removed;
6457
}
6458
6459
size_t bytes_freed() const {
6460
return _freed_bytes;
6461
}
6462
6463
size_t humongous_reclaimed() const {
6464
return _humongous_regions_removed.length();
6465
}
6466
};
6467
6468
void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6469
assert_at_safepoint(true);
6470
6471
if (!G1EagerReclaimHumongousObjects ||
6472
(!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
6473
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6474
return;
6475
}
6476
6477
double start_time = os::elapsedTime();
6478
6479
FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6480
6481
G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6482
heap_region_iterate(&cl);
6483
6484
HeapRegionSetCount empty_set;
6485
remove_from_old_sets(empty_set, cl.humongous_free_count());
6486
6487
G1HRPrinter* hr_printer = _g1h->hr_printer();
6488
if (hr_printer->is_active()) {
6489
FreeRegionListIterator iter(&local_cleanup_list);
6490
while (iter.more_available()) {
6491
HeapRegion* hr = iter.get_next();
6492
hr_printer->cleanup(hr);
6493
}
6494
}
6495
6496
prepend_to_freelist(&local_cleanup_list);
6497
decrement_summary_bytes(cl.bytes_freed());
6498
6499
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
6500
cl.humongous_reclaimed());
6501
}
6502
6503
// This routine is similar to the above but does not record
6504
// any policy statistics or update free lists; we are abandoning
6505
// the current incremental collection set in preparation of a
6506
// full collection. After the full GC we will start to build up
6507
// the incremental collection set again.
6508
// This is only called when we're doing a full collection
6509
// and is immediately followed by the tearing down of the young list.
6510
6511
void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6512
HeapRegion* cur = cs_head;
6513
6514
while (cur != NULL) {
6515
HeapRegion* next = cur->next_in_collection_set();
6516
assert(cur->in_collection_set(), "bad CS");
6517
cur->set_next_in_collection_set(NULL);
6518
cur->set_in_collection_set(false);
6519
cur->set_young_index_in_cset(-1);
6520
cur = next;
6521
}
6522
}
6523
6524
void G1CollectedHeap::set_free_regions_coming() {
6525
if (G1ConcRegionFreeingVerbose) {
6526
gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6527
"setting free regions coming");
6528
}
6529
6530
assert(!free_regions_coming(), "pre-condition");
6531
_free_regions_coming = true;
6532
}
6533
6534
void G1CollectedHeap::reset_free_regions_coming() {
6535
assert(free_regions_coming(), "pre-condition");
6536
6537
{
6538
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6539
_free_regions_coming = false;
6540
SecondaryFreeList_lock->notify_all();
6541
}
6542
6543
if (G1ConcRegionFreeingVerbose) {
6544
gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6545
"reset free regions coming");
6546
}
6547
}
6548
6549
void G1CollectedHeap::wait_while_free_regions_coming() {
6550
// Most of the time we won't have to wait, so let's do a quick test
6551
// first before we take the lock.
6552
if (!free_regions_coming()) {
6553
return;
6554
}
6555
6556
if (G1ConcRegionFreeingVerbose) {
6557
gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6558
"waiting for free regions");
6559
}
6560
6561
{
6562
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6563
while (free_regions_coming()) {
6564
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
6565
}
6566
}
6567
6568
if (G1ConcRegionFreeingVerbose) {
6569
gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6570
"done waiting for free regions");
6571
}
6572
}
6573
6574
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6575
assert(heap_lock_held_for_gc(),
6576
"the heap lock should already be held by or for this thread");
6577
_young_list->push_region(hr);
6578
}
6579
6580
class NoYoungRegionsClosure: public HeapRegionClosure {
6581
private:
6582
bool _success;
6583
public:
6584
NoYoungRegionsClosure() : _success(true) { }
6585
bool doHeapRegion(HeapRegion* r) {
6586
if (r->is_young()) {
6587
gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
6588
r->bottom(), r->end());
6589
_success = false;
6590
}
6591
return false;
6592
}
6593
bool success() { return _success; }
6594
};
6595
6596
bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6597
bool ret = _young_list->check_list_empty(check_sample);
6598
6599
if (check_heap) {
6600
NoYoungRegionsClosure closure;
6601
heap_region_iterate(&closure);
6602
ret = ret && closure.success();
6603
}
6604
6605
return ret;
6606
}
6607
6608
class TearDownRegionSetsClosure : public HeapRegionClosure {
6609
private:
6610
HeapRegionSet *_old_set;
6611
6612
public:
6613
TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6614
6615
bool doHeapRegion(HeapRegion* r) {
6616
if (r->is_old()) {
6617
_old_set->remove(r);
6618
} else {
6619
// We ignore free regions, we'll empty the free list afterwards.
6620
// We ignore young regions, we'll empty the young list afterwards.
6621
// We ignore humongous regions, we're not tearing down the
6622
// humongous regions set.
6623
assert(r->is_free() || r->is_young() || r->isHumongous(),
6624
"it cannot be another type");
6625
}
6626
return false;
6627
}
6628
6629
~TearDownRegionSetsClosure() {
6630
assert(_old_set->is_empty(), "post-condition");
6631
}
6632
};
6633
6634
void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6635
assert_at_safepoint(true /* should_be_vm_thread */);
6636
6637
if (!free_list_only) {
6638
TearDownRegionSetsClosure cl(&_old_set);
6639
heap_region_iterate(&cl);
6640
6641
// Note that emptying the _young_list is postponed and instead done as
6642
// the first step when rebuilding the regions sets again. The reason for
6643
// this is that during a full GC string deduplication needs to know if
6644
// a collected region was young or old when the full GC was initiated.
6645
}
6646
_hrm.remove_all_free_regions();
6647
}
6648
6649
class RebuildRegionSetsClosure : public HeapRegionClosure {
6650
private:
6651
bool _free_list_only;
6652
HeapRegionSet* _old_set;
6653
HeapRegionManager* _hrm;
6654
size_t _total_used;
6655
6656
public:
6657
RebuildRegionSetsClosure(bool free_list_only,
6658
HeapRegionSet* old_set, HeapRegionManager* hrm) :
6659
_free_list_only(free_list_only),
6660
_old_set(old_set), _hrm(hrm), _total_used(0) {
6661
assert(_hrm->num_free_regions() == 0, "pre-condition");
6662
if (!free_list_only) {
6663
assert(_old_set->is_empty(), "pre-condition");
6664
}
6665
}
6666
6667
bool doHeapRegion(HeapRegion* r) {
6668
if (r->continuesHumongous()) {
6669
return false;
6670
}
6671
6672
if (r->is_empty()) {
6673
// Add free regions to the free list
6674
r->set_free();
6675
r->set_allocation_context(AllocationContext::system());
6676
_hrm->insert_into_free_list(r);
6677
} else if (!_free_list_only) {
6678
assert(!r->is_young(), "we should not come across young regions");
6679
6680
if (r->isHumongous()) {
6681
// We ignore humongous regions, we left the humongous set unchanged
6682
} else {
6683
// Objects that were compacted would have ended up on regions
6684
// that were previously old or free.
6685
assert(r->is_free() || r->is_old(), "invariant");
6686
// We now consider them old, so register as such.
6687
r->set_old();
6688
_old_set->add(r);
6689
}
6690
_total_used += r->used();
6691
}
6692
6693
return false;
6694
}
6695
6696
size_t total_used() {
6697
return _total_used;
6698
}
6699
};
6700
6701
void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6702
assert_at_safepoint(true /* should_be_vm_thread */);
6703
6704
if (!free_list_only) {
6705
_young_list->empty_list();
6706
}
6707
6708
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6709
heap_region_iterate(&cl);
6710
6711
if (!free_list_only) {
6712
_allocator->set_used(cl.total_used());
6713
}
6714
assert(_allocator->used_unlocked() == recalculate_used(),
6715
err_msg("inconsistent _allocator->used_unlocked(), "
6716
"value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
6717
_allocator->used_unlocked(), recalculate_used()));
6718
}
6719
6720
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6721
_refine_cte_cl->set_concurrent(concurrent);
6722
}
6723
6724
bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6725
HeapRegion* hr = heap_region_containing(p);
6726
return hr->is_in(p);
6727
}
6728
6729
// Methods for the mutator alloc region
6730
6731
HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6732
bool force) {
6733
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6734
assert(!force || g1_policy()->can_expand_young_list(),
6735
"if force is true we should be able to expand the young list");
6736
bool young_list_full = g1_policy()->is_young_list_full();
6737
if (force || !young_list_full) {
6738
HeapRegion* new_alloc_region = new_region(word_size,
6739
false /* is_old */,
6740
false /* do_expand */);
6741
if (new_alloc_region != NULL) {
6742
set_region_short_lived_locked(new_alloc_region);
6743
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6744
check_bitmaps("Mutator Region Allocation", new_alloc_region);
6745
return new_alloc_region;
6746
}
6747
}
6748
return NULL;
6749
}
6750
6751
void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6752
size_t allocated_bytes) {
6753
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6754
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6755
6756
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6757
_allocator->increase_used(allocated_bytes);
6758
_hr_printer.retire(alloc_region);
6759
// We update the eden sizes here, when the region is retired,
6760
// instead of when it's allocated, since this is the point that its
6761
// used space has been recored in _summary_bytes_used.
6762
g1mm()->update_eden_size();
6763
}
6764
6765
void G1CollectedHeap::set_par_threads() {
6766
// Don't change the number of workers. Use the value previously set
6767
// in the workgroup.
6768
assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6769
uint n_workers = workers()->active_workers();
6770
assert(UseDynamicNumberOfGCThreads ||
6771
n_workers == workers()->total_workers(),
6772
"Otherwise should be using the total number of workers");
6773
if (n_workers == 0) {
6774
assert(false, "Should have been set in prior evacuation pause.");
6775
n_workers = ParallelGCThreads;
6776
workers()->set_active_workers(n_workers);
6777
}
6778
set_par_threads(n_workers);
6779
}
6780
6781
// Methods for the GC alloc regions
6782
6783
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6784
uint count,
6785
InCSetState dest) {
6786
assert(FreeList_lock->owned_by_self(), "pre-condition");
6787
6788
if (count < g1_policy()->max_regions(dest)) {
6789
const bool is_survivor = (dest.is_young());
6790
HeapRegion* new_alloc_region = new_region(word_size,
6791
!is_survivor,
6792
true /* do_expand */);
6793
if (new_alloc_region != NULL) {
6794
// We really only need to do this for old regions given that we
6795
// should never scan survivors. But it doesn't hurt to do it
6796
// for survivors too.
6797
new_alloc_region->record_timestamp();
6798
if (is_survivor) {
6799
new_alloc_region->set_survivor();
6800
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6801
check_bitmaps("Survivor Region Allocation", new_alloc_region);
6802
} else {
6803
new_alloc_region->set_old();
6804
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6805
check_bitmaps("Old Region Allocation", new_alloc_region);
6806
}
6807
bool during_im = g1_policy()->during_initial_mark_pause();
6808
new_alloc_region->note_start_of_copying(during_im);
6809
return new_alloc_region;
6810
}
6811
}
6812
return NULL;
6813
}
6814
6815
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6816
size_t allocated_bytes,
6817
InCSetState dest) {
6818
bool during_im = g1_policy()->during_initial_mark_pause();
6819
alloc_region->note_end_of_copying(during_im);
6820
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6821
if (dest.is_young()) {
6822
young_list()->add_survivor_region(alloc_region);
6823
} else {
6824
_old_set.add(alloc_region);
6825
}
6826
_hr_printer.retire(alloc_region);
6827
}
6828
6829
// Heap region set verification
6830
6831
class VerifyRegionListsClosure : public HeapRegionClosure {
6832
private:
6833
HeapRegionSet* _old_set;
6834
HeapRegionSet* _humongous_set;
6835
HeapRegionManager* _hrm;
6836
6837
public:
6838
HeapRegionSetCount _old_count;
6839
HeapRegionSetCount _humongous_count;
6840
HeapRegionSetCount _free_count;
6841
6842
VerifyRegionListsClosure(HeapRegionSet* old_set,
6843
HeapRegionSet* humongous_set,
6844
HeapRegionManager* hrm) :
6845
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6846
_old_count(), _humongous_count(), _free_count(){ }
6847
6848
bool doHeapRegion(HeapRegion* hr) {
6849
if (hr->continuesHumongous()) {
6850
return false;
6851
}
6852
6853
if (hr->is_young()) {
6854
// TODO
6855
} else if (hr->startsHumongous()) {
6856
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6857
_humongous_count.increment(1u, hr->capacity());
6858
} else if (hr->is_empty()) {
6859
assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6860
_free_count.increment(1u, hr->capacity());
6861
} else if (hr->is_old()) {
6862
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6863
_old_count.increment(1u, hr->capacity());
6864
} else {
6865
ShouldNotReachHere();
6866
}
6867
return false;
6868
}
6869
6870
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6871
guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6872
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6873
old_set->total_capacity_bytes(), _old_count.capacity()));
6874
6875
guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6876
guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6877
humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6878
6879
guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6880
guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6881
free_list->total_capacity_bytes(), _free_count.capacity()));
6882
}
6883
};
6884
6885
void G1CollectedHeap::verify_region_sets() {
6886
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6887
6888
// First, check the explicit lists.
6889
_hrm.verify();
6890
{
6891
// Given that a concurrent operation might be adding regions to
6892
// the secondary free list we have to take the lock before
6893
// verifying it.
6894
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6895
_secondary_free_list.verify_list();
6896
}
6897
6898
// If a concurrent region freeing operation is in progress it will
6899
// be difficult to correctly attributed any free regions we come
6900
// across to the correct free list given that they might belong to
6901
// one of several (free_list, secondary_free_list, any local lists,
6902
// etc.). So, if that's the case we will skip the rest of the
6903
// verification operation. Alternatively, waiting for the concurrent
6904
// operation to complete will have a non-trivial effect on the GC's
6905
// operation (no concurrent operation will last longer than the
6906
// interval between two calls to verification) and it might hide
6907
// any issues that we would like to catch during testing.
6908
if (free_regions_coming()) {
6909
return;
6910
}
6911
6912
// Make sure we append the secondary_free_list on the free_list so
6913
// that all free regions we will come across can be safely
6914
// attributed to the free_list.
6915
append_secondary_free_list_if_not_empty_with_lock();
6916
6917
// Finally, make sure that the region accounting in the lists is
6918
// consistent with what we see in the heap.
6919
6920
VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6921
heap_region_iterate(&cl);
6922
cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6923
}
6924
6925
// Optimized nmethod scanning
6926
6927
class RegisterNMethodOopClosure: public OopClosure {
6928
G1CollectedHeap* _g1h;
6929
nmethod* _nm;
6930
6931
template <class T> void do_oop_work(T* p) {
6932
T heap_oop = oopDesc::load_heap_oop(p);
6933
if (!oopDesc::is_null(heap_oop)) {
6934
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6935
HeapRegion* hr = _g1h->heap_region_containing(obj);
6936
assert(!hr->continuesHumongous(),
6937
err_msg("trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6938
" starting at " HR_FORMAT,
6939
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6940
6941
// HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6942
hr->add_strong_code_root_locked(_nm);
6943
}
6944
}
6945
6946
public:
6947
RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6948
_g1h(g1h), _nm(nm) {}
6949
6950
void do_oop(oop* p) { do_oop_work(p); }
6951
void do_oop(narrowOop* p) { do_oop_work(p); }
6952
};
6953
6954
class UnregisterNMethodOopClosure: public OopClosure {
6955
G1CollectedHeap* _g1h;
6956
nmethod* _nm;
6957
6958
template <class T> void do_oop_work(T* p) {
6959
T heap_oop = oopDesc::load_heap_oop(p);
6960
if (!oopDesc::is_null(heap_oop)) {
6961
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6962
HeapRegion* hr = _g1h->heap_region_containing(obj);
6963
assert(!hr->continuesHumongous(),
6964
err_msg("trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6965
" starting at " HR_FORMAT,
6966
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6967
6968
hr->remove_strong_code_root(_nm);
6969
}
6970
}
6971
6972
public:
6973
UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6974
_g1h(g1h), _nm(nm) {}
6975
6976
void do_oop(oop* p) { do_oop_work(p); }
6977
void do_oop(narrowOop* p) { do_oop_work(p); }
6978
};
6979
6980
void G1CollectedHeap::register_nmethod(nmethod* nm) {
6981
CollectedHeap::register_nmethod(nm);
6982
6983
guarantee(nm != NULL, "sanity");
6984
RegisterNMethodOopClosure reg_cl(this, nm);
6985
nm->oops_do(&reg_cl);
6986
}
6987
6988
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6989
CollectedHeap::unregister_nmethod(nm);
6990
6991
guarantee(nm != NULL, "sanity");
6992
UnregisterNMethodOopClosure reg_cl(this, nm);
6993
nm->oops_do(&reg_cl, true);
6994
}
6995
6996
void G1CollectedHeap::purge_code_root_memory() {
6997
double purge_start = os::elapsedTime();
6998
G1CodeRootSet::purge();
6999
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
7000
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
7001
}
7002
7003
class RebuildStrongCodeRootClosure: public CodeBlobClosure {
7004
G1CollectedHeap* _g1h;
7005
7006
public:
7007
RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
7008
_g1h(g1h) {}
7009
7010
void do_code_blob(CodeBlob* cb) {
7011
nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
7012
if (nm == NULL) {
7013
return;
7014
}
7015
7016
if (ScavengeRootsInCode) {
7017
_g1h->register_nmethod(nm);
7018
}
7019
}
7020
};
7021
7022
void G1CollectedHeap::rebuild_strong_code_roots() {
7023
RebuildStrongCodeRootClosure blob_cl(this);
7024
CodeCache::blobs_do(&blob_cl);
7025
}
7026
7027