Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
38920 views
1
/*
2
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/classLoaderData.hpp"
27
#include "classfile/symbolTable.hpp"
28
#include "classfile/systemDictionary.hpp"
29
#include "code/codeCache.hpp"
30
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
31
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
32
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
33
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
34
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
35
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
36
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
37
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
38
#include "gc_implementation/parNew/parNewGeneration.hpp"
39
#include "gc_implementation/shared/collectorCounters.hpp"
40
#include "gc_implementation/shared/gcTimer.hpp"
41
#include "gc_implementation/shared/gcTrace.hpp"
42
#include "gc_implementation/shared/gcTraceTime.hpp"
43
#include "gc_implementation/shared/isGCActiveMark.hpp"
44
#include "gc_interface/collectedHeap.inline.hpp"
45
#include "memory/allocation.hpp"
46
#include "memory/cardTableRS.hpp"
47
#include "memory/collectorPolicy.hpp"
48
#include "memory/gcLocker.inline.hpp"
49
#include "memory/genCollectedHeap.hpp"
50
#include "memory/genMarkSweep.hpp"
51
#include "memory/genOopClosures.inline.hpp"
52
#include "memory/iterator.inline.hpp"
53
#include "memory/padded.hpp"
54
#include "memory/referencePolicy.hpp"
55
#include "memory/resourceArea.hpp"
56
#include "memory/tenuredGeneration.hpp"
57
#include "oops/oop.inline.hpp"
58
#include "prims/jvmtiExport.hpp"
59
#include "runtime/globals_extension.hpp"
60
#include "runtime/handles.inline.hpp"
61
#include "runtime/java.hpp"
62
#include "runtime/orderAccess.inline.hpp"
63
#include "runtime/vmThread.hpp"
64
#include "services/memoryService.hpp"
65
#include "services/runtimeService.hpp"
66
67
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
68
69
// statics
70
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
71
bool CMSCollector::_full_gc_requested = false;
72
GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
73
74
//////////////////////////////////////////////////////////////////
75
// In support of CMS/VM thread synchronization
76
//////////////////////////////////////////////////////////////////
77
// We split use of the CGC_lock into 2 "levels".
78
// The low-level locking is of the usual CGC_lock monitor. We introduce
79
// a higher level "token" (hereafter "CMS token") built on top of the
80
// low level monitor (hereafter "CGC lock").
81
// The token-passing protocol gives priority to the VM thread. The
82
// CMS-lock doesn't provide any fairness guarantees, but clients
83
// should ensure that it is only held for very short, bounded
84
// durations.
85
//
86
// When either of the CMS thread or the VM thread is involved in
87
// collection operations during which it does not want the other
88
// thread to interfere, it obtains the CMS token.
89
//
90
// If either thread tries to get the token while the other has
91
// it, that thread waits. However, if the VM thread and CMS thread
92
// both want the token, then the VM thread gets priority while the
93
// CMS thread waits. This ensures, for instance, that the "concurrent"
94
// phases of the CMS thread's work do not block out the VM thread
95
// for long periods of time as the CMS thread continues to hog
96
// the token. (See bug 4616232).
97
//
98
// The baton-passing functions are, however, controlled by the
99
// flags _foregroundGCShouldWait and _foregroundGCIsActive,
100
// and here the low-level CMS lock, not the high level token,
101
// ensures mutual exclusion.
102
//
103
// Two important conditions that we have to satisfy:
104
// 1. if a thread does a low-level wait on the CMS lock, then it
105
// relinquishes the CMS token if it were holding that token
106
// when it acquired the low-level CMS lock.
107
// 2. any low-level notifications on the low-level lock
108
// should only be sent when a thread has relinquished the token.
109
//
110
// In the absence of either property, we'd have potential deadlock.
111
//
112
// We protect each of the CMS (concurrent and sequential) phases
113
// with the CMS _token_, not the CMS _lock_.
114
//
115
// The only code protected by CMS lock is the token acquisition code
116
// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
117
// baton-passing code.
118
//
119
// Unfortunately, i couldn't come up with a good abstraction to factor and
120
// hide the naked CGC_lock manipulation in the baton-passing code
121
// further below. That's something we should try to do. Also, the proof
122
// of correctness of this 2-level locking scheme is far from obvious,
123
// and potentially quite slippery. We have an uneasy supsicion, for instance,
124
// that there may be a theoretical possibility of delay/starvation in the
125
// low-level lock/wait/notify scheme used for the baton-passing because of
126
// potential intereference with the priority scheme embodied in the
127
// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
128
// invocation further below and marked with "XXX 20011219YSR".
129
// Indeed, as we note elsewhere, this may become yet more slippery
130
// in the presence of multiple CMS and/or multiple VM threads. XXX
131
132
class CMSTokenSync: public StackObj {
133
private:
134
bool _is_cms_thread;
135
public:
136
CMSTokenSync(bool is_cms_thread):
137
_is_cms_thread(is_cms_thread) {
138
assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
139
"Incorrect argument to constructor");
140
ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
141
}
142
143
~CMSTokenSync() {
144
assert(_is_cms_thread ?
145
ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
146
ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
147
"Incorrect state");
148
ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
149
}
150
};
151
152
// Convenience class that does a CMSTokenSync, and then acquires
153
// upto three locks.
154
class CMSTokenSyncWithLocks: public CMSTokenSync {
155
private:
156
// Note: locks are acquired in textual declaration order
157
// and released in the opposite order
158
MutexLockerEx _locker1, _locker2, _locker3;
159
public:
160
CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
161
Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
162
CMSTokenSync(is_cms_thread),
163
_locker1(mutex1, Mutex::_no_safepoint_check_flag),
164
_locker2(mutex2, Mutex::_no_safepoint_check_flag),
165
_locker3(mutex3, Mutex::_no_safepoint_check_flag)
166
{ }
167
};
168
169
170
// Wrapper class to temporarily disable icms during a foreground cms collection.
171
class ICMSDisabler: public StackObj {
172
public:
173
// The ctor disables icms and wakes up the thread so it notices the change;
174
// the dtor re-enables icms. Note that the CMSCollector methods will check
175
// CMSIncrementalMode.
176
ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
177
~ICMSDisabler() { CMSCollector::enable_icms(); }
178
};
179
180
//////////////////////////////////////////////////////////////////
181
// Concurrent Mark-Sweep Generation /////////////////////////////
182
//////////////////////////////////////////////////////////////////
183
184
NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
185
186
// This struct contains per-thread things necessary to support parallel
187
// young-gen collection.
188
class CMSParGCThreadState: public CHeapObj<mtGC> {
189
public:
190
CFLS_LAB lab;
191
PromotionInfo promo;
192
193
// Constructor.
194
CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
195
promo.setSpace(cfls);
196
}
197
};
198
199
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
200
ReservedSpace rs, size_t initial_byte_size, int level,
201
CardTableRS* ct, bool use_adaptive_freelists,
202
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
203
CardGeneration(rs, initial_byte_size, level, ct),
204
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
205
_debug_collection_type(Concurrent_collection_type),
206
_did_compact(false)
207
{
208
HeapWord* bottom = (HeapWord*) _virtual_space.low();
209
HeapWord* end = (HeapWord*) _virtual_space.high();
210
211
_direct_allocated_words = 0;
212
NOT_PRODUCT(
213
_numObjectsPromoted = 0;
214
_numWordsPromoted = 0;
215
_numObjectsAllocated = 0;
216
_numWordsAllocated = 0;
217
)
218
219
_cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
220
use_adaptive_freelists,
221
dictionaryChoice);
222
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
223
if (_cmsSpace == NULL) {
224
vm_exit_during_initialization(
225
"CompactibleFreeListSpace allocation failure");
226
}
227
_cmsSpace->_gen = this;
228
229
_gc_stats = new CMSGCStats();
230
231
// Verify the assumption that FreeChunk::_prev and OopDesc::_klass
232
// offsets match. The ability to tell free chunks from objects
233
// depends on this property.
234
debug_only(
235
FreeChunk* junk = NULL;
236
assert(UseCompressedClassPointers ||
237
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
238
"Offset of FreeChunk::_prev within FreeChunk must match"
239
" that of OopDesc::_klass within OopDesc");
240
)
241
if (CollectedHeap::use_parallel_gc_threads()) {
242
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
243
_par_gc_thread_states =
244
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
245
if (_par_gc_thread_states == NULL) {
246
vm_exit_during_initialization("Could not allocate par gc structs");
247
}
248
for (uint i = 0; i < ParallelGCThreads; i++) {
249
_par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
250
if (_par_gc_thread_states[i] == NULL) {
251
vm_exit_during_initialization("Could not allocate par gc structs");
252
}
253
}
254
} else {
255
_par_gc_thread_states = NULL;
256
}
257
_incremental_collection_failed = false;
258
// The "dilatation_factor" is the expansion that can occur on
259
// account of the fact that the minimum object size in the CMS
260
// generation may be larger than that in, say, a contiguous young
261
// generation.
262
// Ideally, in the calculation below, we'd compute the dilatation
263
// factor as: MinChunkSize/(promoting_gen's min object size)
264
// Since we do not have such a general query interface for the
265
// promoting generation, we'll instead just use the mimimum
266
// object size (which today is a header's worth of space);
267
// note that all arithmetic is in units of HeapWords.
268
assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
269
assert(_dilatation_factor >= 1.0, "from previous assert");
270
}
271
272
273
// The field "_initiating_occupancy" represents the occupancy percentage
274
// at which we trigger a new collection cycle. Unless explicitly specified
275
// via CMSInitiatingOccupancyFraction (argument "io" below), it
276
// is calculated by:
277
//
278
// Let "f" be MinHeapFreeRatio in
279
//
280
// _intiating_occupancy = 100-f +
281
// f * (CMSTriggerRatio/100)
282
// where CMSTriggerRatio is the argument "tr" below.
283
//
284
// That is, if we assume the heap is at its desired maximum occupancy at the
285
// end of a collection, we let CMSTriggerRatio of the (purported) free
286
// space be allocated before initiating a new collection cycle.
287
//
288
void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
289
assert(io <= 100 && tr <= 100, "Check the arguments");
290
if (io >= 0) {
291
_initiating_occupancy = (double)io / 100.0;
292
} else {
293
_initiating_occupancy = ((100 - MinHeapFreeRatio) +
294
(double)(tr * MinHeapFreeRatio) / 100.0)
295
/ 100.0;
296
}
297
}
298
299
void ConcurrentMarkSweepGeneration::ref_processor_init() {
300
assert(collector() != NULL, "no collector");
301
collector()->ref_processor_init();
302
}
303
304
void CMSCollector::ref_processor_init() {
305
if (_ref_processor == NULL) {
306
// Allocate and initialize a reference processor
307
_ref_processor =
308
new ReferenceProcessor(_span, // span
309
(ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
310
(int) ParallelGCThreads, // mt processing degree
311
_cmsGen->refs_discovery_is_mt(), // mt discovery
312
(int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
313
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
314
&_is_alive_closure); // closure for liveness info
315
// Initialize the _ref_processor field of CMSGen
316
_cmsGen->set_ref_processor(_ref_processor);
317
318
}
319
}
320
321
CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
322
GenCollectedHeap* gch = GenCollectedHeap::heap();
323
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
324
"Wrong type of heap");
325
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
326
gch->gen_policy()->size_policy();
327
assert(sp->is_gc_cms_adaptive_size_policy(),
328
"Wrong type of size policy");
329
return sp;
330
}
331
332
CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
333
CMSGCAdaptivePolicyCounters* results =
334
(CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
335
assert(
336
results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
337
"Wrong gc policy counter kind");
338
return results;
339
}
340
341
342
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
343
344
const char* gen_name = "old";
345
346
// Generation Counters - generation 1, 1 subspace
347
_gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
348
349
_space_counters = new GSpaceCounters(gen_name, 0,
350
_virtual_space.reserved_size(),
351
this, _gen_counters);
352
}
353
354
CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
355
_cms_gen(cms_gen)
356
{
357
assert(alpha <= 100, "bad value");
358
_saved_alpha = alpha;
359
360
// Initialize the alphas to the bootstrap value of 100.
361
_gc0_alpha = _cms_alpha = 100;
362
363
_cms_begin_time.update();
364
_cms_end_time.update();
365
366
_gc0_duration = 0.0;
367
_gc0_period = 0.0;
368
_gc0_promoted = 0;
369
370
_cms_duration = 0.0;
371
_cms_period = 0.0;
372
_cms_allocated = 0;
373
374
_cms_used_at_gc0_begin = 0;
375
_cms_used_at_gc0_end = 0;
376
_allow_duty_cycle_reduction = false;
377
_valid_bits = 0;
378
_icms_duty_cycle = CMSIncrementalDutyCycle;
379
}
380
381
double CMSStats::cms_free_adjustment_factor(size_t free) const {
382
// TBD: CR 6909490
383
return 1.0;
384
}
385
386
void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
387
}
388
389
// If promotion failure handling is on use
390
// the padded average size of the promotion for each
391
// young generation collection.
392
double CMSStats::time_until_cms_gen_full() const {
393
size_t cms_free = _cms_gen->cmsSpace()->free();
394
GenCollectedHeap* gch = GenCollectedHeap::heap();
395
size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
396
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
397
if (cms_free > expected_promotion) {
398
// Start a cms collection if there isn't enough space to promote
399
// for the next minor collection. Use the padded average as
400
// a safety factor.
401
cms_free -= expected_promotion;
402
403
// Adjust by the safety factor.
404
double cms_free_dbl = (double)cms_free;
405
double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
406
// Apply a further correction factor which tries to adjust
407
// for recent occurance of concurrent mode failures.
408
cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
409
cms_free_dbl = cms_free_dbl * cms_adjustment;
410
411
if (PrintGCDetails && Verbose) {
412
gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
413
SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
414
cms_free, expected_promotion);
415
gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
416
cms_free_dbl, cms_consumption_rate() + 1.0);
417
}
418
// Add 1 in case the consumption rate goes to zero.
419
return cms_free_dbl / (cms_consumption_rate() + 1.0);
420
}
421
return 0.0;
422
}
423
424
// Compare the duration of the cms collection to the
425
// time remaining before the cms generation is empty.
426
// Note that the time from the start of the cms collection
427
// to the start of the cms sweep (less than the total
428
// duration of the cms collection) can be used. This
429
// has been tried and some applications experienced
430
// promotion failures early in execution. This was
431
// possibly because the averages were not accurate
432
// enough at the beginning.
433
double CMSStats::time_until_cms_start() const {
434
// We add "gc0_period" to the "work" calculation
435
// below because this query is done (mostly) at the
436
// end of a scavenge, so we need to conservatively
437
// account for that much possible delay
438
// in the query so as to avoid concurrent mode failures
439
// due to starting the collection just a wee bit too
440
// late.
441
double work = cms_duration() + gc0_period();
442
double deadline = time_until_cms_gen_full();
443
// If a concurrent mode failure occurred recently, we want to be
444
// more conservative and halve our expected time_until_cms_gen_full()
445
if (work > deadline) {
446
if (Verbose && PrintGCDetails) {
447
gclog_or_tty->print(
448
" CMSCollector: collect because of anticipated promotion "
449
"before full %3.7f + %3.7f > %3.7f ", cms_duration(),
450
gc0_period(), time_until_cms_gen_full());
451
}
452
return 0.0;
453
}
454
return work - deadline;
455
}
456
457
// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
458
// amount of change to prevent wild oscillation.
459
unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
460
unsigned int new_duty_cycle) {
461
assert(old_duty_cycle <= 100, "bad input value");
462
assert(new_duty_cycle <= 100, "bad input value");
463
464
// Note: use subtraction with caution since it may underflow (values are
465
// unsigned). Addition is safe since we're in the range 0-100.
466
unsigned int damped_duty_cycle = new_duty_cycle;
467
if (new_duty_cycle < old_duty_cycle) {
468
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
469
if (new_duty_cycle + largest_delta < old_duty_cycle) {
470
damped_duty_cycle = old_duty_cycle - largest_delta;
471
}
472
} else if (new_duty_cycle > old_duty_cycle) {
473
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
474
if (new_duty_cycle > old_duty_cycle + largest_delta) {
475
damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
476
}
477
}
478
assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
479
480
if (CMSTraceIncrementalPacing) {
481
gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
482
old_duty_cycle, new_duty_cycle, damped_duty_cycle);
483
}
484
return damped_duty_cycle;
485
}
486
487
unsigned int CMSStats::icms_update_duty_cycle_impl() {
488
assert(CMSIncrementalPacing && valid(),
489
"should be handled in icms_update_duty_cycle()");
490
491
double cms_time_so_far = cms_timer().seconds();
492
double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
493
double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
494
495
// Avoid division by 0.
496
double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
497
double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
498
499
unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
500
if (new_duty_cycle > _icms_duty_cycle) {
501
// Avoid very small duty cycles (1 or 2); 0 is allowed.
502
if (new_duty_cycle > 2) {
503
_icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
504
new_duty_cycle);
505
}
506
} else if (_allow_duty_cycle_reduction) {
507
// The duty cycle is reduced only once per cms cycle (see record_cms_end()).
508
new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
509
// Respect the minimum duty cycle.
510
unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
511
_icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
512
}
513
514
if (PrintGCDetails || CMSTraceIncrementalPacing) {
515
gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
516
}
517
518
_allow_duty_cycle_reduction = false;
519
return _icms_duty_cycle;
520
}
521
522
#ifndef PRODUCT
523
void CMSStats::print_on(outputStream *st) const {
524
st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
525
st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
526
gc0_duration(), gc0_period(), gc0_promoted());
527
st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
528
cms_duration(), cms_duration_per_mb(),
529
cms_period(), cms_allocated());
530
st->print(",cms_since_beg=%g,cms_since_end=%g",
531
cms_time_since_begin(), cms_time_since_end());
532
st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
533
_cms_used_at_gc0_begin, _cms_used_at_gc0_end);
534
if (CMSIncrementalMode) {
535
st->print(",dc=%d", icms_duty_cycle());
536
}
537
538
if (valid()) {
539
st->print(",promo_rate=%g,cms_alloc_rate=%g",
540
promotion_rate(), cms_allocation_rate());
541
st->print(",cms_consumption_rate=%g,time_until_full=%g",
542
cms_consumption_rate(), time_until_cms_gen_full());
543
}
544
st->print(" ");
545
}
546
#endif // #ifndef PRODUCT
547
548
CMSCollector::CollectorState CMSCollector::_collectorState =
549
CMSCollector::Idling;
550
bool CMSCollector::_foregroundGCIsActive = false;
551
bool CMSCollector::_foregroundGCShouldWait = false;
552
553
CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
554
CardTableRS* ct,
555
ConcurrentMarkSweepPolicy* cp):
556
_cmsGen(cmsGen),
557
_ct(ct),
558
_ref_processor(NULL), // will be set later
559
_conc_workers(NULL), // may be set later
560
_abort_preclean(false),
561
_start_sampling(false),
562
_between_prologue_and_epilogue(false),
563
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
564
_modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
565
-1 /* lock-free */, "No_lock" /* dummy */),
566
_modUnionClosure(&_modUnionTable),
567
_modUnionClosurePar(&_modUnionTable),
568
// Adjust my span to cover old (cms) gen
569
_span(cmsGen->reserved()),
570
// Construct the is_alive_closure with _span & markBitMap
571
_is_alive_closure(_span, &_markBitMap),
572
_restart_addr(NULL),
573
_overflow_list(NULL),
574
_stats(cmsGen),
575
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
576
_eden_chunk_array(NULL), // may be set in ctor body
577
_eden_chunk_capacity(0), // -- ditto --
578
_eden_chunk_index(0), // -- ditto --
579
_survivor_plab_array(NULL), // -- ditto --
580
_survivor_chunk_array(NULL), // -- ditto --
581
_survivor_chunk_capacity(0), // -- ditto --
582
_survivor_chunk_index(0), // -- ditto --
583
_ser_pmc_preclean_ovflw(0),
584
_ser_kac_preclean_ovflw(0),
585
_ser_pmc_remark_ovflw(0),
586
_par_pmc_remark_ovflw(0),
587
_ser_kac_ovflw(0),
588
_par_kac_ovflw(0),
589
#ifndef PRODUCT
590
_num_par_pushes(0),
591
#endif
592
_collection_count_start(0),
593
_verifying(false),
594
_icms_start_limit(NULL),
595
_icms_stop_limit(NULL),
596
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
597
_completed_initialization(false),
598
_collector_policy(cp),
599
_should_unload_classes(CMSClassUnloadingEnabled),
600
_concurrent_cycles_since_last_unload(0),
601
_roots_scanning_options(GenCollectedHeap::SO_None),
602
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
603
_intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
604
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
605
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
606
_cms_start_registered(false)
607
{
608
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
609
ExplicitGCInvokesConcurrent = true;
610
}
611
// Now expand the span and allocate the collection support structures
612
// (MUT, marking bit map etc.) to cover both generations subject to
613
// collection.
614
615
// For use by dirty card to oop closures.
616
_cmsGen->cmsSpace()->set_collector(this);
617
618
// Allocate MUT and marking bit map
619
{
620
MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
621
if (!_markBitMap.allocate(_span)) {
622
warning("Failed to allocate CMS Bit Map");
623
return;
624
}
625
assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
626
}
627
{
628
_modUnionTable.allocate(_span);
629
assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
630
}
631
632
if (!_markStack.allocate(MarkStackSize)) {
633
warning("Failed to allocate CMS Marking Stack");
634
return;
635
}
636
637
// Support for multi-threaded concurrent phases
638
if (CMSConcurrentMTEnabled) {
639
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
640
// just for now
641
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
642
}
643
if (ConcGCThreads > 1) {
644
_conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
645
ConcGCThreads, true);
646
if (_conc_workers == NULL) {
647
warning("GC/CMS: _conc_workers allocation failure: "
648
"forcing -CMSConcurrentMTEnabled");
649
CMSConcurrentMTEnabled = false;
650
} else {
651
_conc_workers->initialize_workers();
652
}
653
} else {
654
CMSConcurrentMTEnabled = false;
655
}
656
}
657
if (!CMSConcurrentMTEnabled) {
658
ConcGCThreads = 0;
659
} else {
660
// Turn off CMSCleanOnEnter optimization temporarily for
661
// the MT case where it's not fixed yet; see 6178663.
662
CMSCleanOnEnter = false;
663
}
664
assert((_conc_workers != NULL) == (ConcGCThreads > 1),
665
"Inconsistency");
666
667
// Parallel task queues; these are shared for the
668
// concurrent and stop-world phases of CMS, but
669
// are not shared with parallel scavenge (ParNew).
670
{
671
uint i;
672
uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
673
674
if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
675
|| ParallelRefProcEnabled)
676
&& num_queues > 0) {
677
_task_queues = new OopTaskQueueSet(num_queues);
678
if (_task_queues == NULL) {
679
warning("task_queues allocation failure.");
680
return;
681
}
682
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
683
if (_hash_seed == NULL) {
684
warning("_hash_seed array allocation failure");
685
return;
686
}
687
688
typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
689
for (i = 0; i < num_queues; i++) {
690
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
691
if (q == NULL) {
692
warning("work_queue allocation failure.");
693
return;
694
}
695
_task_queues->register_queue(i, q);
696
}
697
for (i = 0; i < num_queues; i++) {
698
_task_queues->queue(i)->initialize();
699
_hash_seed[i] = 17; // copied from ParNew
700
}
701
}
702
}
703
704
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
705
706
// Clip CMSBootstrapOccupancy between 0 and 100.
707
_bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
708
709
_full_gcs_since_conc_gc = 0;
710
711
// Now tell CMS generations the identity of their collector
712
ConcurrentMarkSweepGeneration::set_collector(this);
713
714
// Create & start a CMS thread for this CMS collector
715
_cmsThread = ConcurrentMarkSweepThread::start(this);
716
assert(cmsThread() != NULL, "CMS Thread should have been created");
717
assert(cmsThread()->collector() == this,
718
"CMS Thread should refer to this gen");
719
assert(CGC_lock != NULL, "Where's the CGC_lock?");
720
721
// Support for parallelizing young gen rescan
722
GenCollectedHeap* gch = GenCollectedHeap::heap();
723
_young_gen = gch->prev_gen(_cmsGen);
724
if (gch->supports_inline_contig_alloc()) {
725
_top_addr = gch->top_addr();
726
_end_addr = gch->end_addr();
727
assert(_young_gen != NULL, "no _young_gen");
728
_eden_chunk_index = 0;
729
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
730
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
731
if (_eden_chunk_array == NULL) {
732
_eden_chunk_capacity = 0;
733
warning("GC/CMS: _eden_chunk_array allocation failure");
734
}
735
}
736
assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
737
738
// Support for parallelizing survivor space rescan
739
if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
740
const size_t max_plab_samples =
741
((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
742
743
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
744
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
745
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
746
if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
747
|| _cursor == NULL) {
748
warning("Failed to allocate survivor plab/chunk array");
749
if (_survivor_plab_array != NULL) {
750
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
751
_survivor_plab_array = NULL;
752
}
753
if (_survivor_chunk_array != NULL) {
754
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
755
_survivor_chunk_array = NULL;
756
}
757
if (_cursor != NULL) {
758
FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
759
_cursor = NULL;
760
}
761
} else {
762
_survivor_chunk_capacity = 2*max_plab_samples;
763
for (uint i = 0; i < ParallelGCThreads; i++) {
764
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
765
if (vec == NULL) {
766
warning("Failed to allocate survivor plab array");
767
for (int j = i; j > 0; j--) {
768
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
769
}
770
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
771
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
772
_survivor_plab_array = NULL;
773
_survivor_chunk_array = NULL;
774
_survivor_chunk_capacity = 0;
775
break;
776
} else {
777
ChunkArray* cur =
778
::new (&_survivor_plab_array[i]) ChunkArray(vec,
779
max_plab_samples);
780
assert(cur->end() == 0, "Should be 0");
781
assert(cur->array() == vec, "Should be vec");
782
assert(cur->capacity() == max_plab_samples, "Error");
783
}
784
}
785
}
786
}
787
assert( ( _survivor_plab_array != NULL
788
&& _survivor_chunk_array != NULL)
789
|| ( _survivor_chunk_capacity == 0
790
&& _survivor_chunk_index == 0),
791
"Error");
792
793
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
794
_gc_counters = new CollectorCounters("CMS", 1);
795
_completed_initialization = true;
796
_inter_sweep_timer.start(); // start of time
797
}
798
799
size_t CMSCollector::plab_sample_minimum_size() {
800
// The default value of MinTLABSize is 2k, but there is
801
// no way to get the default value if the flag has been overridden.
802
return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
803
}
804
805
const char* ConcurrentMarkSweepGeneration::name() const {
806
return "concurrent mark-sweep generation";
807
}
808
void ConcurrentMarkSweepGeneration::update_counters() {
809
if (UsePerfData) {
810
_space_counters->update_all();
811
_gen_counters->update_all();
812
}
813
}
814
815
// this is an optimized version of update_counters(). it takes the
816
// used value as a parameter rather than computing it.
817
//
818
void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
819
if (UsePerfData) {
820
_space_counters->update_used(used);
821
_space_counters->update_capacity();
822
_gen_counters->update_all();
823
}
824
}
825
826
void ConcurrentMarkSweepGeneration::print() const {
827
Generation::print();
828
cmsSpace()->print();
829
}
830
831
#ifndef PRODUCT
832
void ConcurrentMarkSweepGeneration::print_statistics() {
833
cmsSpace()->printFLCensus(0);
834
}
835
#endif
836
837
void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
838
GenCollectedHeap* gch = GenCollectedHeap::heap();
839
if (PrintGCDetails) {
840
if (Verbose) {
841
gclog_or_tty->print("[%d %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]",
842
level(), short_name(), s, used(), capacity());
843
} else {
844
gclog_or_tty->print("[%d %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]",
845
level(), short_name(), s, used() / K, capacity() / K);
846
}
847
}
848
if (Verbose) {
849
gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")",
850
gch->used(), gch->capacity());
851
} else {
852
gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)",
853
gch->used() / K, gch->capacity() / K);
854
}
855
}
856
857
size_t
858
ConcurrentMarkSweepGeneration::contiguous_available() const {
859
// dld proposes an improvement in precision here. If the committed
860
// part of the space ends in a free block we should add that to
861
// uncommitted size in the calculation below. Will make this
862
// change later, staying with the approximation below for the
863
// time being. -- ysr.
864
return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
865
}
866
867
size_t
868
ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
869
return _cmsSpace->max_alloc_in_words() * HeapWordSize;
870
}
871
872
size_t ConcurrentMarkSweepGeneration::used_stable() const {
873
return cmsSpace()->used_stable();
874
}
875
876
size_t ConcurrentMarkSweepGeneration::max_available() const {
877
return free() + _virtual_space.uncommitted_size();
878
}
879
880
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
881
size_t available = max_available();
882
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
883
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
884
if (Verbose && PrintGCDetails) {
885
gclog_or_tty->print_cr(
886
"CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
887
"max_promo(" SIZE_FORMAT ")",
888
res? "":" not", available, res? ">=":"<",
889
av_promo, max_promotion_in_bytes);
890
}
891
return res;
892
}
893
894
// At a promotion failure dump information on block layout in heap
895
// (cms old generation).
896
void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
897
if (CMSDumpAtPromotionFailure) {
898
cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
899
}
900
}
901
902
CompactibleSpace*
903
ConcurrentMarkSweepGeneration::first_compaction_space() const {
904
return _cmsSpace;
905
}
906
907
void ConcurrentMarkSweepGeneration::reset_after_compaction() {
908
// Clear the promotion information. These pointers can be adjusted
909
// along with all the other pointers into the heap but
910
// compaction is expected to be a rare event with
911
// a heap using cms so don't do it without seeing the need.
912
if (CollectedHeap::use_parallel_gc_threads()) {
913
for (uint i = 0; i < ParallelGCThreads; i++) {
914
_par_gc_thread_states[i]->promo.reset();
915
}
916
}
917
}
918
919
void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
920
blk->do_space(_cmsSpace);
921
}
922
923
void ConcurrentMarkSweepGeneration::compute_new_size() {
924
assert_locked_or_safepoint(Heap_lock);
925
926
// If incremental collection failed, we just want to expand
927
// to the limit.
928
if (incremental_collection_failed()) {
929
clear_incremental_collection_failed();
930
grow_to_reserved();
931
return;
932
}
933
934
// The heap has been compacted but not reset yet.
935
// Any metric such as free() or used() will be incorrect.
936
937
CardGeneration::compute_new_size();
938
939
// Reset again after a possible resizing
940
if (did_compact()) {
941
cmsSpace()->reset_after_compaction();
942
}
943
}
944
945
void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
946
assert_locked_or_safepoint(Heap_lock);
947
948
// If incremental collection failed, we just want to expand
949
// to the limit.
950
if (incremental_collection_failed()) {
951
clear_incremental_collection_failed();
952
grow_to_reserved();
953
return;
954
}
955
956
double free_percentage = ((double) free()) / capacity();
957
double desired_free_percentage = (double) MinHeapFreeRatio / 100;
958
double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
959
960
// compute expansion delta needed for reaching desired free percentage
961
if (free_percentage < desired_free_percentage) {
962
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
963
assert(desired_capacity >= capacity(), "invalid expansion size");
964
size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
965
if (PrintGCDetails && Verbose) {
966
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
967
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
968
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
969
gclog_or_tty->print_cr(" Desired free fraction %f",
970
desired_free_percentage);
971
gclog_or_tty->print_cr(" Maximum free fraction %f",
972
maximum_free_percentage);
973
gclog_or_tty->print_cr(" Capactiy " SIZE_FORMAT, capacity()/1000);
974
gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT,
975
desired_capacity/1000);
976
int prev_level = level() - 1;
977
if (prev_level >= 0) {
978
size_t prev_size = 0;
979
GenCollectedHeap* gch = GenCollectedHeap::heap();
980
Generation* prev_gen = gch->_gens[prev_level];
981
prev_size = prev_gen->capacity();
982
gclog_or_tty->print_cr(" Younger gen size " SIZE_FORMAT,
983
prev_size/1000);
984
}
985
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT,
986
unsafe_max_alloc_nogc()/1000);
987
gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT,
988
contiguous_available()/1000);
989
gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)",
990
expand_bytes);
991
}
992
// safe if expansion fails
993
expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
994
if (PrintGCDetails && Verbose) {
995
gclog_or_tty->print_cr(" Expanded free fraction %f",
996
((double) free()) / capacity());
997
}
998
} else {
999
size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
1000
assert(desired_capacity <= capacity(), "invalid expansion size");
1001
size_t shrink_bytes = capacity() - desired_capacity;
1002
// Don't shrink unless the delta is greater than the minimum shrink we want
1003
if (shrink_bytes >= MinHeapDeltaBytes) {
1004
shrink_free_list_by(shrink_bytes);
1005
}
1006
}
1007
}
1008
1009
Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1010
return cmsSpace()->freelistLock();
1011
}
1012
1013
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1014
bool tlab) {
1015
CMSSynchronousYieldRequest yr;
1016
MutexLockerEx x(freelistLock(),
1017
Mutex::_no_safepoint_check_flag);
1018
return have_lock_and_allocate(size, tlab);
1019
}
1020
1021
HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1022
bool tlab /* ignored */) {
1023
assert_lock_strong(freelistLock());
1024
size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1025
HeapWord* res = cmsSpace()->allocate(adjustedSize);
1026
// Allocate the object live (grey) if the background collector has
1027
// started marking. This is necessary because the marker may
1028
// have passed this address and consequently this object will
1029
// not otherwise be greyed and would be incorrectly swept up.
1030
// Note that if this object contains references, the writing
1031
// of those references will dirty the card containing this object
1032
// allowing the object to be blackened (and its references scanned)
1033
// either during a preclean phase or at the final checkpoint.
1034
if (res != NULL) {
1035
// We may block here with an uninitialized object with
1036
// its mark-bit or P-bits not yet set. Such objects need
1037
// to be safely navigable by block_start().
1038
assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1039
assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1040
collector()->direct_allocated(res, adjustedSize);
1041
_direct_allocated_words += adjustedSize;
1042
// allocation counters
1043
NOT_PRODUCT(
1044
_numObjectsAllocated++;
1045
_numWordsAllocated += (int)adjustedSize;
1046
)
1047
}
1048
return res;
1049
}
1050
1051
// In the case of direct allocation by mutators in a generation that
1052
// is being concurrently collected, the object must be allocated
1053
// live (grey) if the background collector has started marking.
1054
// This is necessary because the marker may
1055
// have passed this address and consequently this object will
1056
// not otherwise be greyed and would be incorrectly swept up.
1057
// Note that if this object contains references, the writing
1058
// of those references will dirty the card containing this object
1059
// allowing the object to be blackened (and its references scanned)
1060
// either during a preclean phase or at the final checkpoint.
1061
void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1062
assert(_markBitMap.covers(start, size), "Out of bounds");
1063
if (_collectorState >= Marking) {
1064
MutexLockerEx y(_markBitMap.lock(),
1065
Mutex::_no_safepoint_check_flag);
1066
// [see comments preceding SweepClosure::do_blk() below for details]
1067
//
1068
// Can the P-bits be deleted now? JJJ
1069
//
1070
// 1. need to mark the object as live so it isn't collected
1071
// 2. need to mark the 2nd bit to indicate the object may be uninitialized
1072
// 3. need to mark the end of the object so marking, precleaning or sweeping
1073
// can skip over uninitialized or unparsable objects. An allocated
1074
// object is considered uninitialized for our purposes as long as
1075
// its klass word is NULL. All old gen objects are parsable
1076
// as soon as they are initialized.)
1077
_markBitMap.mark(start); // object is live
1078
_markBitMap.mark(start + 1); // object is potentially uninitialized?
1079
_markBitMap.mark(start + size - 1);
1080
// mark end of object
1081
}
1082
// check that oop looks uninitialized
1083
assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1084
}
1085
1086
void CMSCollector::promoted(bool par, HeapWord* start,
1087
bool is_obj_array, size_t obj_size) {
1088
assert(_markBitMap.covers(start), "Out of bounds");
1089
// See comment in direct_allocated() about when objects should
1090
// be allocated live.
1091
if (_collectorState >= Marking) {
1092
// we already hold the marking bit map lock, taken in
1093
// the prologue
1094
if (par) {
1095
_markBitMap.par_mark(start);
1096
} else {
1097
_markBitMap.mark(start);
1098
}
1099
// We don't need to mark the object as uninitialized (as
1100
// in direct_allocated above) because this is being done with the
1101
// world stopped and the object will be initialized by the
1102
// time the marking, precleaning or sweeping get to look at it.
1103
// But see the code for copying objects into the CMS generation,
1104
// where we need to ensure that concurrent readers of the
1105
// block offset table are able to safely navigate a block that
1106
// is in flux from being free to being allocated (and in
1107
// transition while being copied into) and subsequently
1108
// becoming a bona-fide object when the copy/promotion is complete.
1109
assert(SafepointSynchronize::is_at_safepoint(),
1110
"expect promotion only at safepoints");
1111
1112
if (_collectorState < Sweeping) {
1113
// Mark the appropriate cards in the modUnionTable, so that
1114
// this object gets scanned before the sweep. If this is
1115
// not done, CMS generation references in the object might
1116
// not get marked.
1117
// For the case of arrays, which are otherwise precisely
1118
// marked, we need to dirty the entire array, not just its head.
1119
if (is_obj_array) {
1120
// The [par_]mark_range() method expects mr.end() below to
1121
// be aligned to the granularity of a bit's representation
1122
// in the heap. In the case of the MUT below, that's a
1123
// card size.
1124
MemRegion mr(start,
1125
(HeapWord*)round_to((intptr_t)(start + obj_size),
1126
CardTableModRefBS::card_size /* bytes */));
1127
if (par) {
1128
_modUnionTable.par_mark_range(mr);
1129
} else {
1130
_modUnionTable.mark_range(mr);
1131
}
1132
} else { // not an obj array; we can just mark the head
1133
if (par) {
1134
_modUnionTable.par_mark(start);
1135
} else {
1136
_modUnionTable.mark(start);
1137
}
1138
}
1139
}
1140
}
1141
}
1142
1143
static inline size_t percent_of_space(Space* space, HeapWord* addr)
1144
{
1145
size_t delta = pointer_delta(addr, space->bottom());
1146
return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1147
}
1148
1149
void CMSCollector::icms_update_allocation_limits()
1150
{
1151
Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1152
EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1153
1154
const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1155
if (CMSTraceIncrementalPacing) {
1156
stats().print();
1157
}
1158
1159
assert(duty_cycle <= 100, "invalid duty cycle");
1160
if (duty_cycle != 0) {
1161
// The duty_cycle is a percentage between 0 and 100; convert to words and
1162
// then compute the offset from the endpoints of the space.
1163
size_t free_words = eden->free() / HeapWordSize;
1164
double free_words_dbl = (double)free_words;
1165
size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1166
size_t offset_words = (free_words - duty_cycle_words) / 2;
1167
1168
_icms_start_limit = eden->top() + offset_words;
1169
_icms_stop_limit = eden->end() - offset_words;
1170
1171
// The limits may be adjusted (shifted to the right) by
1172
// CMSIncrementalOffset, to allow the application more mutator time after a
1173
// young gen gc (when all mutators were stopped) and before CMS starts and
1174
// takes away one or more cpus.
1175
if (CMSIncrementalOffset != 0) {
1176
double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1177
size_t adjustment = (size_t)adjustment_dbl;
1178
HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1179
if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1180
_icms_start_limit += adjustment;
1181
_icms_stop_limit = tmp_stop;
1182
}
1183
}
1184
}
1185
if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1186
_icms_start_limit = _icms_stop_limit = eden->end();
1187
}
1188
1189
// Install the new start limit.
1190
eden->set_soft_end(_icms_start_limit);
1191
1192
if (CMSTraceIncrementalMode) {
1193
gclog_or_tty->print(" icms alloc limits: "
1194
PTR_FORMAT "," PTR_FORMAT
1195
" (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1196
p2i(_icms_start_limit), p2i(_icms_stop_limit),
1197
percent_of_space(eden, _icms_start_limit),
1198
percent_of_space(eden, _icms_stop_limit));
1199
if (Verbose) {
1200
gclog_or_tty->print("eden: ");
1201
eden->print_on(gclog_or_tty);
1202
}
1203
}
1204
}
1205
1206
// Any changes here should try to maintain the invariant
1207
// that if this method is called with _icms_start_limit
1208
// and _icms_stop_limit both NULL, then it should return NULL
1209
// and not notify the icms thread.
1210
HeapWord*
1211
CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1212
size_t word_size)
1213
{
1214
// A start_limit equal to end() means the duty cycle is 0, so treat that as a
1215
// nop.
1216
if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1217
if (top <= _icms_start_limit) {
1218
if (CMSTraceIncrementalMode) {
1219
space->print_on(gclog_or_tty);
1220
gclog_or_tty->stamp();
1221
gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1222
", new limit=" PTR_FORMAT
1223
" (" SIZE_FORMAT "%%)",
1224
p2i(top), p2i(_icms_stop_limit),
1225
percent_of_space(space, _icms_stop_limit));
1226
}
1227
ConcurrentMarkSweepThread::start_icms();
1228
assert(top < _icms_stop_limit, "Tautology");
1229
if (word_size < pointer_delta(_icms_stop_limit, top)) {
1230
return _icms_stop_limit;
1231
}
1232
1233
// The allocation will cross both the _start and _stop limits, so do the
1234
// stop notification also and return end().
1235
if (CMSTraceIncrementalMode) {
1236
space->print_on(gclog_or_tty);
1237
gclog_or_tty->stamp();
1238
gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1239
", new limit=" PTR_FORMAT
1240
" (" SIZE_FORMAT "%%)",
1241
p2i(top), p2i(space->end()),
1242
percent_of_space(space, space->end()));
1243
}
1244
ConcurrentMarkSweepThread::stop_icms();
1245
return space->end();
1246
}
1247
1248
if (top <= _icms_stop_limit) {
1249
if (CMSTraceIncrementalMode) {
1250
space->print_on(gclog_or_tty);
1251
gclog_or_tty->stamp();
1252
gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1253
", new limit=" PTR_FORMAT
1254
" (" SIZE_FORMAT "%%)",
1255
top, space->end(),
1256
percent_of_space(space, space->end()));
1257
}
1258
ConcurrentMarkSweepThread::stop_icms();
1259
return space->end();
1260
}
1261
1262
if (CMSTraceIncrementalMode) {
1263
space->print_on(gclog_or_tty);
1264
gclog_or_tty->stamp();
1265
gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1266
", new limit=" PTR_FORMAT,
1267
top, NULL);
1268
}
1269
}
1270
1271
return NULL;
1272
}
1273
1274
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1275
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1276
// allocate, copy and if necessary update promoinfo --
1277
// delegate to underlying space.
1278
assert_lock_strong(freelistLock());
1279
1280
#ifndef PRODUCT
1281
if (Universe::heap()->promotion_should_fail()) {
1282
return NULL;
1283
}
1284
#endif // #ifndef PRODUCT
1285
1286
oop res = _cmsSpace->promote(obj, obj_size);
1287
if (res == NULL) {
1288
// expand and retry
1289
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1290
expand(s*HeapWordSize, MinHeapDeltaBytes,
1291
CMSExpansionCause::_satisfy_promotion);
1292
// Since there's currently no next generation, we don't try to promote
1293
// into a more senior generation.
1294
assert(next_gen() == NULL, "assumption, based upon which no attempt "
1295
"is made to pass on a possibly failing "
1296
"promotion to next generation");
1297
res = _cmsSpace->promote(obj, obj_size);
1298
}
1299
if (res != NULL) {
1300
// See comment in allocate() about when objects should
1301
// be allocated live.
1302
assert(obj->is_oop(), "Will dereference klass pointer below");
1303
collector()->promoted(false, // Not parallel
1304
(HeapWord*)res, obj->is_objArray(), obj_size);
1305
// promotion counters
1306
NOT_PRODUCT(
1307
_numObjectsPromoted++;
1308
_numWordsPromoted +=
1309
(int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1310
)
1311
}
1312
return res;
1313
}
1314
1315
1316
HeapWord*
1317
ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1318
HeapWord* top,
1319
size_t word_sz)
1320
{
1321
return collector()->allocation_limit_reached(space, top, word_sz);
1322
}
1323
1324
// IMPORTANT: Notes on object size recognition in CMS.
1325
// ---------------------------------------------------
1326
// A block of storage in the CMS generation is always in
1327
// one of three states. A free block (FREE), an allocated
1328
// object (OBJECT) whose size() method reports the correct size,
1329
// and an intermediate state (TRANSIENT) in which its size cannot
1330
// be accurately determined.
1331
// STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
1332
// -----------------------------------------------------
1333
// FREE: klass_word & 1 == 1; mark_word holds block size
1334
//
1335
// OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1336
// obj->size() computes correct size
1337
//
1338
// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1339
//
1340
// STATE IDENTIFICATION: (64 bit+COOPS)
1341
// ------------------------------------
1342
// FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1343
//
1344
// OBJECT: klass_word installed; klass_word != 0;
1345
// obj->size() computes correct size
1346
//
1347
// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1348
//
1349
//
1350
// STATE TRANSITION DIAGRAM
1351
//
1352
// mut / parnew mut / parnew
1353
// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1354
// ^ |
1355
// |------------------------ DEAD <------------------------------------|
1356
// sweep mut
1357
//
1358
// While a block is in TRANSIENT state its size cannot be determined
1359
// so readers will either need to come back later or stall until
1360
// the size can be determined. Note that for the case of direct
1361
// allocation, P-bits, when available, may be used to determine the
1362
// size of an object that may not yet have been initialized.
1363
1364
// Things to support parallel young-gen collection.
1365
oop
1366
ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1367
oop old, markOop m,
1368
size_t word_sz) {
1369
#ifndef PRODUCT
1370
if (Universe::heap()->promotion_should_fail()) {
1371
return NULL;
1372
}
1373
#endif // #ifndef PRODUCT
1374
1375
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1376
PromotionInfo* promoInfo = &ps->promo;
1377
// if we are tracking promotions, then first ensure space for
1378
// promotion (including spooling space for saving header if necessary).
1379
// then allocate and copy, then track promoted info if needed.
1380
// When tracking (see PromotionInfo::track()), the mark word may
1381
// be displaced and in this case restoration of the mark word
1382
// occurs in the (oop_since_save_marks_)iterate phase.
1383
if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1384
// Out of space for allocating spooling buffers;
1385
// try expanding and allocating spooling buffers.
1386
if (!expand_and_ensure_spooling_space(promoInfo)) {
1387
return NULL;
1388
}
1389
}
1390
assert(promoInfo->has_spooling_space(), "Control point invariant");
1391
const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1392
HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1393
if (obj_ptr == NULL) {
1394
obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1395
if (obj_ptr == NULL) {
1396
return NULL;
1397
}
1398
}
1399
oop obj = oop(obj_ptr);
1400
OrderAccess::storestore();
1401
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1402
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1403
// IMPORTANT: See note on object initialization for CMS above.
1404
// Otherwise, copy the object. Here we must be careful to insert the
1405
// klass pointer last, since this marks the block as an allocated object.
1406
// Except with compressed oops it's the mark word.
1407
HeapWord* old_ptr = (HeapWord*)old;
1408
// Restore the mark word copied above.
1409
obj->set_mark(m);
1410
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1411
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1412
OrderAccess::storestore();
1413
1414
if (UseCompressedClassPointers) {
1415
// Copy gap missed by (aligned) header size calculation below
1416
obj->set_klass_gap(old->klass_gap());
1417
}
1418
if (word_sz > (size_t)oopDesc::header_size()) {
1419
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1420
obj_ptr + oopDesc::header_size(),
1421
word_sz - oopDesc::header_size());
1422
}
1423
1424
// Now we can track the promoted object, if necessary. We take care
1425
// to delay the transition from uninitialized to full object
1426
// (i.e., insertion of klass pointer) until after, so that it
1427
// atomically becomes a promoted object.
1428
if (promoInfo->tracking()) {
1429
promoInfo->track((PromotedObject*)obj, old->klass());
1430
}
1431
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1432
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1433
assert(old->is_oop(), "Will use and dereference old klass ptr below");
1434
1435
// Finally, install the klass pointer (this should be volatile).
1436
OrderAccess::storestore();
1437
obj->set_klass(old->klass());
1438
// We should now be able to calculate the right size for this object
1439
assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1440
1441
collector()->promoted(true, // parallel
1442
obj_ptr, old->is_objArray(), word_sz);
1443
1444
NOT_PRODUCT(
1445
Atomic::inc_ptr(&_numObjectsPromoted);
1446
Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1447
)
1448
1449
return obj;
1450
}
1451
1452
void
1453
ConcurrentMarkSweepGeneration::
1454
par_promote_alloc_undo(int thread_num,
1455
HeapWord* obj, size_t word_sz) {
1456
// CMS does not support promotion undo.
1457
ShouldNotReachHere();
1458
}
1459
1460
void
1461
ConcurrentMarkSweepGeneration::
1462
par_promote_alloc_done(int thread_num) {
1463
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1464
ps->lab.retire(thread_num);
1465
}
1466
1467
void
1468
ConcurrentMarkSweepGeneration::
1469
par_oop_since_save_marks_iterate_done(int thread_num) {
1470
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1471
ParScanWithoutBarrierClosure* dummy_cl = NULL;
1472
ps->promo.promoted_oops_iterate_nv(dummy_cl);
1473
}
1474
1475
bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1476
size_t size,
1477
bool tlab)
1478
{
1479
// We allow a STW collection only if a full
1480
// collection was requested.
1481
return full || should_allocate(size, tlab); // FIX ME !!!
1482
// This and promotion failure handling are connected at the
1483
// hip and should be fixed by untying them.
1484
}
1485
1486
bool CMSCollector::shouldConcurrentCollect() {
1487
if (_full_gc_requested) {
1488
if (Verbose && PrintGCDetails) {
1489
gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1490
" gc request (or gc_locker)");
1491
}
1492
return true;
1493
}
1494
1495
// For debugging purposes, change the type of collection.
1496
// If the rotation is not on the concurrent collection
1497
// type, don't start a concurrent collection.
1498
NOT_PRODUCT(
1499
if (RotateCMSCollectionTypes &&
1500
(_cmsGen->debug_collection_type() !=
1501
ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1502
assert(_cmsGen->debug_collection_type() !=
1503
ConcurrentMarkSweepGeneration::Unknown_collection_type,
1504
"Bad cms collection type");
1505
return false;
1506
}
1507
)
1508
1509
FreelistLocker x(this);
1510
// ------------------------------------------------------------------
1511
// Print out lots of information which affects the initiation of
1512
// a collection.
1513
if (PrintCMSInitiationStatistics && stats().valid()) {
1514
gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1515
gclog_or_tty->stamp();
1516
gclog_or_tty->cr();
1517
stats().print_on(gclog_or_tty);
1518
gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1519
stats().time_until_cms_gen_full());
1520
gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free());
1521
gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT,
1522
_cmsGen->contiguous_available());
1523
gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1524
gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1525
gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1526
gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1527
gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1528
gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1529
gclog_or_tty->print_cr("metadata initialized %d",
1530
MetaspaceGC::should_concurrent_collect());
1531
}
1532
// ------------------------------------------------------------------
1533
1534
// If the estimated time to complete a cms collection (cms_duration())
1535
// is less than the estimated time remaining until the cms generation
1536
// is full, start a collection.
1537
if (!UseCMSInitiatingOccupancyOnly) {
1538
if (stats().valid()) {
1539
if (stats().time_until_cms_start() == 0.0) {
1540
return true;
1541
}
1542
} else {
1543
// We want to conservatively collect somewhat early in order
1544
// to try and "bootstrap" our CMS/promotion statistics;
1545
// this branch will not fire after the first successful CMS
1546
// collection because the stats should then be valid.
1547
if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1548
if (Verbose && PrintGCDetails) {
1549
gclog_or_tty->print_cr(
1550
" CMSCollector: collect for bootstrapping statistics:"
1551
" occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1552
_bootstrap_occupancy);
1553
}
1554
return true;
1555
}
1556
}
1557
}
1558
1559
// Otherwise, we start a collection cycle if
1560
// old gen want a collection cycle started. Each may use
1561
// an appropriate criterion for making this decision.
1562
// XXX We need to make sure that the gen expansion
1563
// criterion dovetails well with this. XXX NEED TO FIX THIS
1564
if (_cmsGen->should_concurrent_collect()) {
1565
if (Verbose && PrintGCDetails) {
1566
gclog_or_tty->print_cr("CMS old gen initiated");
1567
}
1568
return true;
1569
}
1570
1571
// We start a collection if we believe an incremental collection may fail;
1572
// this is not likely to be productive in practice because it's probably too
1573
// late anyway.
1574
GenCollectedHeap* gch = GenCollectedHeap::heap();
1575
assert(gch->collector_policy()->is_two_generation_policy(),
1576
"You may want to check the correctness of the following");
1577
if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1578
if (Verbose && PrintGCDetails) {
1579
gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1580
}
1581
return true;
1582
}
1583
1584
if (MetaspaceGC::should_concurrent_collect()) {
1585
if (Verbose && PrintGCDetails) {
1586
gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1587
}
1588
return true;
1589
}
1590
1591
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
1592
if (CMSTriggerInterval >= 0) {
1593
if (CMSTriggerInterval == 0) {
1594
// Trigger always
1595
return true;
1596
}
1597
1598
// Check the CMS time since begin (we do not check the stats validity
1599
// as we want to be able to trigger the first CMS cycle as well)
1600
if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1601
if (Verbose && PrintGCDetails) {
1602
if (stats().valid()) {
1603
gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1604
stats().cms_time_since_begin());
1605
} else {
1606
gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1607
}
1608
}
1609
return true;
1610
}
1611
}
1612
1613
return false;
1614
}
1615
1616
void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1617
1618
// Clear _expansion_cause fields of constituent generations
1619
void CMSCollector::clear_expansion_cause() {
1620
_cmsGen->clear_expansion_cause();
1621
}
1622
1623
// We should be conservative in starting a collection cycle. To
1624
// start too eagerly runs the risk of collecting too often in the
1625
// extreme. To collect too rarely falls back on full collections,
1626
// which works, even if not optimum in terms of concurrent work.
1627
// As a work around for too eagerly collecting, use the flag
1628
// UseCMSInitiatingOccupancyOnly. This also has the advantage of
1629
// giving the user an easily understandable way of controlling the
1630
// collections.
1631
// We want to start a new collection cycle if any of the following
1632
// conditions hold:
1633
// . our current occupancy exceeds the configured initiating occupancy
1634
// for this generation, or
1635
// . we recently needed to expand this space and have not, since that
1636
// expansion, done a collection of this generation, or
1637
// . the underlying space believes that it may be a good idea to initiate
1638
// a concurrent collection (this may be based on criteria such as the
1639
// following: the space uses linear allocation and linear allocation is
1640
// going to fail, or there is believed to be excessive fragmentation in
1641
// the generation, etc... or ...
1642
// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1643
// the case of the old generation; see CR 6543076):
1644
// we may be approaching a point at which allocation requests may fail because
1645
// we will be out of sufficient free space given allocation rate estimates.]
1646
bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1647
1648
assert_lock_strong(freelistLock());
1649
if (occupancy() > initiating_occupancy()) {
1650
if (PrintGCDetails && Verbose) {
1651
gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1652
short_name(), occupancy(), initiating_occupancy());
1653
}
1654
return true;
1655
}
1656
if (UseCMSInitiatingOccupancyOnly) {
1657
return false;
1658
}
1659
if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1660
if (PrintGCDetails && Verbose) {
1661
gclog_or_tty->print(" %s: collect because expanded for allocation ",
1662
short_name());
1663
}
1664
return true;
1665
}
1666
if (_cmsSpace->should_concurrent_collect()) {
1667
if (PrintGCDetails && Verbose) {
1668
gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1669
short_name());
1670
}
1671
return true;
1672
}
1673
return false;
1674
}
1675
1676
void ConcurrentMarkSweepGeneration::collect(bool full,
1677
bool clear_all_soft_refs,
1678
size_t size,
1679
bool tlab)
1680
{
1681
collector()->collect(full, clear_all_soft_refs, size, tlab);
1682
}
1683
1684
void CMSCollector::collect(bool full,
1685
bool clear_all_soft_refs,
1686
size_t size,
1687
bool tlab)
1688
{
1689
if (!UseCMSCollectionPassing && _collectorState > Idling) {
1690
// For debugging purposes skip the collection if the state
1691
// is not currently idle
1692
if (TraceCMSState) {
1693
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1694
Thread::current(), full, _collectorState);
1695
}
1696
return;
1697
}
1698
1699
// The following "if" branch is present for defensive reasons.
1700
// In the current uses of this interface, it can be replaced with:
1701
// assert(!GC_locker.is_active(), "Can't be called otherwise");
1702
// But I am not placing that assert here to allow future
1703
// generality in invoking this interface.
1704
if (GC_locker::is_active()) {
1705
// A consistency test for GC_locker
1706
assert(GC_locker::needs_gc(), "Should have been set already");
1707
// Skip this foreground collection, instead
1708
// expanding the heap if necessary.
1709
// Need the free list locks for the call to free() in compute_new_size()
1710
compute_new_size();
1711
return;
1712
}
1713
acquire_control_and_collect(full, clear_all_soft_refs);
1714
_full_gcs_since_conc_gc++;
1715
}
1716
1717
void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1718
GenCollectedHeap* gch = GenCollectedHeap::heap();
1719
unsigned int gc_count = gch->total_full_collections();
1720
if (gc_count == full_gc_count) {
1721
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1722
_full_gc_requested = true;
1723
_full_gc_cause = cause;
1724
CGC_lock->notify(); // nudge CMS thread
1725
} else {
1726
assert(gc_count > full_gc_count, "Error: causal loop");
1727
}
1728
}
1729
1730
bool CMSCollector::is_external_interruption() {
1731
GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1732
return GCCause::is_user_requested_gc(cause) ||
1733
GCCause::is_serviceability_requested_gc(cause);
1734
}
1735
1736
void CMSCollector::report_concurrent_mode_interruption() {
1737
if (is_external_interruption()) {
1738
if (PrintGCDetails) {
1739
gclog_or_tty->print(" (concurrent mode interrupted)");
1740
}
1741
} else {
1742
if (PrintGCDetails) {
1743
gclog_or_tty->print(" (concurrent mode failure)");
1744
}
1745
_gc_tracer_cm->report_concurrent_mode_failure();
1746
}
1747
}
1748
1749
1750
// The foreground and background collectors need to coordinate in order
1751
// to make sure that they do not mutually interfere with CMS collections.
1752
// When a background collection is active,
1753
// the foreground collector may need to take over (preempt) and
1754
// synchronously complete an ongoing collection. Depending on the
1755
// frequency of the background collections and the heap usage
1756
// of the application, this preemption can be seldom or frequent.
1757
// There are only certain
1758
// points in the background collection that the "collection-baton"
1759
// can be passed to the foreground collector.
1760
//
1761
// The foreground collector will wait for the baton before
1762
// starting any part of the collection. The foreground collector
1763
// will only wait at one location.
1764
//
1765
// The background collector will yield the baton before starting a new
1766
// phase of the collection (e.g., before initial marking, marking from roots,
1767
// precleaning, final re-mark, sweep etc.) This is normally done at the head
1768
// of the loop which switches the phases. The background collector does some
1769
// of the phases (initial mark, final re-mark) with the world stopped.
1770
// Because of locking involved in stopping the world,
1771
// the foreground collector should not block waiting for the background
1772
// collector when it is doing a stop-the-world phase. The background
1773
// collector will yield the baton at an additional point just before
1774
// it enters a stop-the-world phase. Once the world is stopped, the
1775
// background collector checks the phase of the collection. If the
1776
// phase has not changed, it proceeds with the collection. If the
1777
// phase has changed, it skips that phase of the collection. See
1778
// the comments on the use of the Heap_lock in collect_in_background().
1779
//
1780
// Variable used in baton passing.
1781
// _foregroundGCIsActive - Set to true by the foreground collector when
1782
// it wants the baton. The foreground clears it when it has finished
1783
// the collection.
1784
// _foregroundGCShouldWait - Set to true by the background collector
1785
// when it is running. The foreground collector waits while
1786
// _foregroundGCShouldWait is true.
1787
// CGC_lock - monitor used to protect access to the above variables
1788
// and to notify the foreground and background collectors.
1789
// _collectorState - current state of the CMS collection.
1790
//
1791
// The foreground collector
1792
// acquires the CGC_lock
1793
// sets _foregroundGCIsActive
1794
// waits on the CGC_lock for _foregroundGCShouldWait to be false
1795
// various locks acquired in preparation for the collection
1796
// are released so as not to block the background collector
1797
// that is in the midst of a collection
1798
// proceeds with the collection
1799
// clears _foregroundGCIsActive
1800
// returns
1801
//
1802
// The background collector in a loop iterating on the phases of the
1803
// collection
1804
// acquires the CGC_lock
1805
// sets _foregroundGCShouldWait
1806
// if _foregroundGCIsActive is set
1807
// clears _foregroundGCShouldWait, notifies _CGC_lock
1808
// waits on _CGC_lock for _foregroundGCIsActive to become false
1809
// and exits the loop.
1810
// otherwise
1811
// proceed with that phase of the collection
1812
// if the phase is a stop-the-world phase,
1813
// yield the baton once more just before enqueueing
1814
// the stop-world CMS operation (executed by the VM thread).
1815
// returns after all phases of the collection are done
1816
//
1817
1818
void CMSCollector::acquire_control_and_collect(bool full,
1819
bool clear_all_soft_refs) {
1820
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1821
assert(!Thread::current()->is_ConcurrentGC_thread(),
1822
"shouldn't try to acquire control from self!");
1823
1824
// Start the protocol for acquiring control of the
1825
// collection from the background collector (aka CMS thread).
1826
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1827
"VM thread should have CMS token");
1828
// Remember the possibly interrupted state of an ongoing
1829
// concurrent collection
1830
CollectorState first_state = _collectorState;
1831
1832
// Signal to a possibly ongoing concurrent collection that
1833
// we want to do a foreground collection.
1834
_foregroundGCIsActive = true;
1835
1836
// Disable incremental mode during a foreground collection.
1837
ICMSDisabler icms_disabler;
1838
1839
// release locks and wait for a notify from the background collector
1840
// releasing the locks in only necessary for phases which
1841
// do yields to improve the granularity of the collection.
1842
assert_lock_strong(bitMapLock());
1843
// We need to lock the Free list lock for the space that we are
1844
// currently collecting.
1845
assert(haveFreelistLocks(), "Must be holding free list locks");
1846
bitMapLock()->unlock();
1847
releaseFreelistLocks();
1848
{
1849
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1850
if (_foregroundGCShouldWait) {
1851
// We are going to be waiting for action for the CMS thread;
1852
// it had better not be gone (for instance at shutdown)!
1853
assert(ConcurrentMarkSweepThread::cmst() != NULL,
1854
"CMS thread must be running");
1855
// Wait here until the background collector gives us the go-ahead
1856
ConcurrentMarkSweepThread::clear_CMS_flag(
1857
ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1858
// Get a possibly blocked CMS thread going:
1859
// Note that we set _foregroundGCIsActive true above,
1860
// without protection of the CGC_lock.
1861
CGC_lock->notify();
1862
assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1863
"Possible deadlock");
1864
while (_foregroundGCShouldWait) {
1865
// wait for notification
1866
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1867
// Possibility of delay/starvation here, since CMS token does
1868
// not know to give priority to VM thread? Actually, i think
1869
// there wouldn't be any delay/starvation, but the proof of
1870
// that "fact" (?) appears non-trivial. XXX 20011219YSR
1871
}
1872
ConcurrentMarkSweepThread::set_CMS_flag(
1873
ConcurrentMarkSweepThread::CMS_vm_has_token);
1874
}
1875
}
1876
// The CMS_token is already held. Get back the other locks.
1877
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1878
"VM thread should have CMS token");
1879
getFreelistLocks();
1880
bitMapLock()->lock_without_safepoint_check();
1881
if (TraceCMSState) {
1882
gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1883
INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1884
gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1885
}
1886
1887
// Check if we need to do a compaction, or if not, whether
1888
// we need to start the mark-sweep from scratch.
1889
bool should_compact = false;
1890
bool should_start_over = false;
1891
decide_foreground_collection_type(clear_all_soft_refs,
1892
&should_compact, &should_start_over);
1893
1894
NOT_PRODUCT(
1895
if (RotateCMSCollectionTypes) {
1896
if (_cmsGen->debug_collection_type() ==
1897
ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1898
should_compact = true;
1899
} else if (_cmsGen->debug_collection_type() ==
1900
ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1901
should_compact = false;
1902
}
1903
}
1904
)
1905
1906
if (first_state > Idling) {
1907
report_concurrent_mode_interruption();
1908
}
1909
1910
set_did_compact(should_compact);
1911
if (should_compact) {
1912
// If the collection is being acquired from the background
1913
// collector, there may be references on the discovered
1914
// references lists that have NULL referents (being those
1915
// that were concurrently cleared by a mutator) or
1916
// that are no longer active (having been enqueued concurrently
1917
// by the mutator).
1918
// Scrub the list of those references because Mark-Sweep-Compact
1919
// code assumes referents are not NULL and that all discovered
1920
// Reference objects are active.
1921
ref_processor()->clean_up_discovered_references();
1922
1923
if (first_state > Idling) {
1924
save_heap_summary();
1925
}
1926
1927
do_compaction_work(clear_all_soft_refs);
1928
1929
// Has the GC time limit been exceeded?
1930
DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1931
size_t max_eden_size = young_gen->max_eden_size();
1932
GenCollectedHeap* gch = GenCollectedHeap::heap();
1933
GCCause::Cause gc_cause = gch->gc_cause();
1934
size_policy()->check_gc_overhead_limit(_young_gen->used(),
1935
young_gen->eden()->used(),
1936
_cmsGen->max_capacity(),
1937
max_eden_size,
1938
full,
1939
gc_cause,
1940
gch->collector_policy());
1941
} else {
1942
do_mark_sweep_work(clear_all_soft_refs, first_state,
1943
should_start_over);
1944
}
1945
// Reset the expansion cause, now that we just completed
1946
// a collection cycle.
1947
clear_expansion_cause();
1948
_foregroundGCIsActive = false;
1949
return;
1950
}
1951
1952
// Resize the tenured generation
1953
// after obtaining the free list locks for the
1954
// two generations.
1955
void CMSCollector::compute_new_size() {
1956
assert_locked_or_safepoint(Heap_lock);
1957
FreelistLocker z(this);
1958
MetaspaceGC::compute_new_size();
1959
_cmsGen->compute_new_size_free_list();
1960
// recalculate CMS used space after CMS collection
1961
_cmsGen->cmsSpace()->recalculate_used_stable();
1962
}
1963
1964
// A work method used by foreground collection to determine
1965
// what type of collection (compacting or not, continuing or fresh)
1966
// it should do.
1967
// NOTE: the intent is to make UseCMSCompactAtFullCollection
1968
// and CMSCompactWhenClearAllSoftRefs the default in the future
1969
// and do away with the flags after a suitable period.
1970
void CMSCollector::decide_foreground_collection_type(
1971
bool clear_all_soft_refs, bool* should_compact,
1972
bool* should_start_over) {
1973
// Normally, we'll compact only if the UseCMSCompactAtFullCollection
1974
// flag is set, and we have either requested a System.gc() or
1975
// the number of full gc's since the last concurrent cycle
1976
// has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1977
// or if an incremental collection has failed
1978
GenCollectedHeap* gch = GenCollectedHeap::heap();
1979
assert(gch->collector_policy()->is_two_generation_policy(),
1980
"You may want to check the correctness of the following");
1981
// Inform cms gen if this was due to partial collection failing.
1982
// The CMS gen may use this fact to determine its expansion policy.
1983
if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1984
assert(!_cmsGen->incremental_collection_failed(),
1985
"Should have been noticed, reacted to and cleared");
1986
_cmsGen->set_incremental_collection_failed();
1987
}
1988
*should_compact =
1989
UseCMSCompactAtFullCollection &&
1990
((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1991
GCCause::is_user_requested_gc(gch->gc_cause()) ||
1992
gch->incremental_collection_will_fail(true /* consult_young */));
1993
*should_start_over = false;
1994
if (clear_all_soft_refs && !*should_compact) {
1995
// We are about to do a last ditch collection attempt
1996
// so it would normally make sense to do a compaction
1997
// to reclaim as much space as possible.
1998
if (CMSCompactWhenClearAllSoftRefs) {
1999
// Default: The rationale is that in this case either
2000
// we are past the final marking phase, in which case
2001
// we'd have to start over, or so little has been done
2002
// that there's little point in saving that work. Compaction
2003
// appears to be the sensible choice in either case.
2004
*should_compact = true;
2005
} else {
2006
// We have been asked to clear all soft refs, but not to
2007
// compact. Make sure that we aren't past the final checkpoint
2008
// phase, for that is where we process soft refs. If we are already
2009
// past that phase, we'll need to redo the refs discovery phase and
2010
// if necessary clear soft refs that weren't previously
2011
// cleared. We do so by remembering the phase in which
2012
// we came in, and if we are past the refs processing
2013
// phase, we'll choose to just redo the mark-sweep
2014
// collection from scratch.
2015
if (_collectorState > FinalMarking) {
2016
// We are past the refs processing phase;
2017
// start over and do a fresh synchronous CMS cycle
2018
_collectorState = Resetting; // skip to reset to start new cycle
2019
reset(false /* == !asynch */);
2020
*should_start_over = true;
2021
} // else we can continue a possibly ongoing current cycle
2022
}
2023
}
2024
}
2025
2026
// A work method used by the foreground collector to do
2027
// a mark-sweep-compact.
2028
void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
2029
GenCollectedHeap* gch = GenCollectedHeap::heap();
2030
2031
STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2032
gc_timer->register_gc_start();
2033
2034
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2035
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2036
2037
GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
2038
if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2039
gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2040
"collections passed to foreground collector", _full_gcs_since_conc_gc);
2041
}
2042
2043
// Sample collection interval time and reset for collection pause.
2044
if (UseAdaptiveSizePolicy) {
2045
size_policy()->msc_collection_begin();
2046
}
2047
2048
// Temporarily widen the span of the weak reference processing to
2049
// the entire heap.
2050
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2051
ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2052
// Temporarily, clear the "is_alive_non_header" field of the
2053
// reference processor.
2054
ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2055
// Temporarily make reference _processing_ single threaded (non-MT).
2056
ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2057
// Temporarily make refs discovery atomic
2058
ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2059
// Temporarily make reference _discovery_ single threaded (non-MT)
2060
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2061
2062
ref_processor()->set_enqueuing_is_done(false);
2063
ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2064
ref_processor()->setup_policy(clear_all_soft_refs);
2065
// If an asynchronous collection finishes, the _modUnionTable is
2066
// all clear. If we are assuming the collection from an asynchronous
2067
// collection, clear the _modUnionTable.
2068
assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2069
"_modUnionTable should be clear if the baton was not passed");
2070
_modUnionTable.clear_all();
2071
assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2072
"mod union for klasses should be clear if the baton was passed");
2073
_ct->klass_rem_set()->clear_mod_union();
2074
2075
// We must adjust the allocation statistics being maintained
2076
// in the free list space. We do so by reading and clearing
2077
// the sweep timer and updating the block flux rate estimates below.
2078
assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2079
if (_inter_sweep_timer.is_active()) {
2080
_inter_sweep_timer.stop();
2081
// Note that we do not use this sample to update the _inter_sweep_estimate.
2082
_cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2083
_inter_sweep_estimate.padded_average(),
2084
_intra_sweep_estimate.padded_average());
2085
}
2086
2087
GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2088
ref_processor(), clear_all_soft_refs);
2089
#ifdef ASSERT
2090
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2091
size_t free_size = cms_space->free();
2092
assert(free_size ==
2093
pointer_delta(cms_space->end(), cms_space->compaction_top())
2094
* HeapWordSize,
2095
"All the free space should be compacted into one chunk at top");
2096
assert(cms_space->dictionary()->total_chunk_size(
2097
debug_only(cms_space->freelistLock())) == 0 ||
2098
cms_space->totalSizeInIndexedFreeLists() == 0,
2099
"All the free space should be in a single chunk");
2100
size_t num = cms_space->totalCount();
2101
assert((free_size == 0 && num == 0) ||
2102
(free_size > 0 && (num == 1 || num == 2)),
2103
"There should be at most 2 free chunks after compaction");
2104
#endif // ASSERT
2105
_collectorState = Resetting;
2106
assert(_restart_addr == NULL,
2107
"Should have been NULL'd before baton was passed");
2108
reset(false /* == !asynch */);
2109
_cmsGen->reset_after_compaction();
2110
_concurrent_cycles_since_last_unload = 0;
2111
2112
// Clear any data recorded in the PLAB chunk arrays.
2113
if (_survivor_plab_array != NULL) {
2114
reset_survivor_plab_arrays();
2115
}
2116
2117
// Adjust the per-size allocation stats for the next epoch.
2118
_cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2119
// Restart the "inter sweep timer" for the next epoch.
2120
_inter_sweep_timer.reset();
2121
_inter_sweep_timer.start();
2122
2123
// Sample collection pause time and reset for collection interval.
2124
if (UseAdaptiveSizePolicy) {
2125
size_policy()->msc_collection_end(gch->gc_cause());
2126
}
2127
2128
gc_timer->register_gc_end();
2129
2130
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2131
2132
// For a mark-sweep-compact, compute_new_size() will be called
2133
// in the heap's do_collection() method.
2134
}
2135
2136
// A work method used by the foreground collector to do
2137
// a mark-sweep, after taking over from a possibly on-going
2138
// concurrent mark-sweep collection.
2139
void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2140
CollectorState first_state, bool should_start_over) {
2141
if (PrintGC && Verbose) {
2142
gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2143
"collector with count %d",
2144
_full_gcs_since_conc_gc);
2145
}
2146
switch (_collectorState) {
2147
case Idling:
2148
if (first_state == Idling || should_start_over) {
2149
// The background GC was not active, or should
2150
// restarted from scratch; start the cycle.
2151
_collectorState = InitialMarking;
2152
}
2153
// If first_state was not Idling, then a background GC
2154
// was in progress and has now finished. No need to do it
2155
// again. Leave the state as Idling.
2156
break;
2157
case Precleaning:
2158
// In the foreground case don't do the precleaning since
2159
// it is not done concurrently and there is extra work
2160
// required.
2161
_collectorState = FinalMarking;
2162
}
2163
collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2164
2165
// For a mark-sweep, compute_new_size() will be called
2166
// in the heap's do_collection() method.
2167
}
2168
2169
2170
void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2171
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2172
EdenSpace* eden_space = dng->eden();
2173
ContiguousSpace* from_space = dng->from();
2174
ContiguousSpace* to_space = dng->to();
2175
// Eden
2176
if (_eden_chunk_array != NULL) {
2177
gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2178
eden_space->bottom(), eden_space->top(),
2179
eden_space->end(), eden_space->capacity());
2180
gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2181
"_eden_chunk_capacity=" SIZE_FORMAT,
2182
_eden_chunk_index, _eden_chunk_capacity);
2183
for (size_t i = 0; i < _eden_chunk_index; i++) {
2184
gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2185
i, _eden_chunk_array[i]);
2186
}
2187
}
2188
// Survivor
2189
if (_survivor_chunk_array != NULL) {
2190
gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2191
from_space->bottom(), from_space->top(),
2192
from_space->end(), from_space->capacity());
2193
gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2194
"_survivor_chunk_capacity=" SIZE_FORMAT,
2195
_survivor_chunk_index, _survivor_chunk_capacity);
2196
for (size_t i = 0; i < _survivor_chunk_index; i++) {
2197
gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2198
i, _survivor_chunk_array[i]);
2199
}
2200
}
2201
}
2202
2203
void CMSCollector::getFreelistLocks() const {
2204
// Get locks for all free lists in all generations that this
2205
// collector is responsible for
2206
_cmsGen->freelistLock()->lock_without_safepoint_check();
2207
}
2208
2209
void CMSCollector::releaseFreelistLocks() const {
2210
// Release locks for all free lists in all generations that this
2211
// collector is responsible for
2212
_cmsGen->freelistLock()->unlock();
2213
}
2214
2215
bool CMSCollector::haveFreelistLocks() const {
2216
// Check locks for all free lists in all generations that this
2217
// collector is responsible for
2218
assert_lock_strong(_cmsGen->freelistLock());
2219
PRODUCT_ONLY(ShouldNotReachHere());
2220
return true;
2221
}
2222
2223
// A utility class that is used by the CMS collector to
2224
// temporarily "release" the foreground collector from its
2225
// usual obligation to wait for the background collector to
2226
// complete an ongoing phase before proceeding.
2227
class ReleaseForegroundGC: public StackObj {
2228
private:
2229
CMSCollector* _c;
2230
public:
2231
ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2232
assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2233
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2234
// allow a potentially blocked foreground collector to proceed
2235
_c->_foregroundGCShouldWait = false;
2236
if (_c->_foregroundGCIsActive) {
2237
CGC_lock->notify();
2238
}
2239
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2240
"Possible deadlock");
2241
}
2242
2243
~ReleaseForegroundGC() {
2244
assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2245
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2246
_c->_foregroundGCShouldWait = true;
2247
}
2248
};
2249
2250
// There are separate collect_in_background and collect_in_foreground because of
2251
// the different locking requirements of the background collector and the
2252
// foreground collector. There was originally an attempt to share
2253
// one "collect" method between the background collector and the foreground
2254
// collector but the if-then-else required made it cleaner to have
2255
// separate methods.
2256
void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2257
assert(Thread::current()->is_ConcurrentGC_thread(),
2258
"A CMS asynchronous collection is only allowed on a CMS thread.");
2259
2260
GenCollectedHeap* gch = GenCollectedHeap::heap();
2261
{
2262
bool safepoint_check = Mutex::_no_safepoint_check_flag;
2263
MutexLockerEx hl(Heap_lock, safepoint_check);
2264
FreelistLocker fll(this);
2265
MutexLockerEx x(CGC_lock, safepoint_check);
2266
if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2267
// The foreground collector is active or we're
2268
// not using asynchronous collections. Skip this
2269
// background collection.
2270
assert(!_foregroundGCShouldWait, "Should be clear");
2271
return;
2272
} else {
2273
assert(_collectorState == Idling, "Should be idling before start.");
2274
_collectorState = InitialMarking;
2275
register_gc_start(cause);
2276
// Reset the expansion cause, now that we are about to begin
2277
// a new cycle.
2278
clear_expansion_cause();
2279
2280
// Clear the MetaspaceGC flag since a concurrent collection
2281
// is starting but also clear it after the collection.
2282
MetaspaceGC::set_should_concurrent_collect(false);
2283
}
2284
// Decide if we want to enable class unloading as part of the
2285
// ensuing concurrent GC cycle.
2286
update_should_unload_classes();
2287
_full_gc_requested = false; // acks all outstanding full gc requests
2288
_full_gc_cause = GCCause::_no_gc;
2289
// Signal that we are about to start a collection
2290
gch->increment_total_full_collections(); // ... starting a collection cycle
2291
_collection_count_start = gch->total_full_collections();
2292
}
2293
2294
// Used for PrintGC
2295
size_t prev_used = 0;
2296
if (PrintGC && Verbose) {
2297
prev_used = _cmsGen->used(); // XXXPERM
2298
}
2299
2300
// The change of the collection state is normally done at this level;
2301
// the exceptions are phases that are executed while the world is
2302
// stopped. For those phases the change of state is done while the
2303
// world is stopped. For baton passing purposes this allows the
2304
// background collector to finish the phase and change state atomically.
2305
// The foreground collector cannot wait on a phase that is done
2306
// while the world is stopped because the foreground collector already
2307
// has the world stopped and would deadlock.
2308
while (_collectorState != Idling) {
2309
if (TraceCMSState) {
2310
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2311
Thread::current(), _collectorState);
2312
}
2313
// The foreground collector
2314
// holds the Heap_lock throughout its collection.
2315
// holds the CMS token (but not the lock)
2316
// except while it is waiting for the background collector to yield.
2317
//
2318
// The foreground collector should be blocked (not for long)
2319
// if the background collector is about to start a phase
2320
// executed with world stopped. If the background
2321
// collector has already started such a phase, the
2322
// foreground collector is blocked waiting for the
2323
// Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2324
// are executed in the VM thread.
2325
//
2326
// The locking order is
2327
// PendingListLock (PLL) -- if applicable (FinalMarking)
2328
// Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2329
// CMS token (claimed in
2330
// stop_world_and_do() -->
2331
// safepoint_synchronize() -->
2332
// CMSThread::synchronize())
2333
2334
{
2335
// Check if the FG collector wants us to yield.
2336
CMSTokenSync x(true); // is cms thread
2337
if (waitForForegroundGC()) {
2338
// We yielded to a foreground GC, nothing more to be
2339
// done this round.
2340
assert(_foregroundGCShouldWait == false, "We set it to false in "
2341
"waitForForegroundGC()");
2342
if (TraceCMSState) {
2343
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2344
" exiting collection CMS state %d",
2345
Thread::current(), _collectorState);
2346
}
2347
return;
2348
} else {
2349
// The background collector can run but check to see if the
2350
// foreground collector has done a collection while the
2351
// background collector was waiting to get the CGC_lock
2352
// above. If yes, break so that _foregroundGCShouldWait
2353
// is cleared before returning.
2354
if (_collectorState == Idling) {
2355
break;
2356
}
2357
}
2358
}
2359
2360
assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2361
"should be waiting");
2362
2363
switch (_collectorState) {
2364
case InitialMarking:
2365
{
2366
ReleaseForegroundGC x(this);
2367
stats().record_cms_begin();
2368
VM_CMS_Initial_Mark initial_mark_op(this);
2369
VMThread::execute(&initial_mark_op);
2370
}
2371
// The collector state may be any legal state at this point
2372
// since the background collector may have yielded to the
2373
// foreground collector.
2374
break;
2375
case Marking:
2376
// initial marking in checkpointRootsInitialWork has been completed
2377
if (markFromRoots(true)) { // we were successful
2378
assert(_collectorState == Precleaning, "Collector state should "
2379
"have changed");
2380
} else {
2381
assert(_foregroundGCIsActive, "Internal state inconsistency");
2382
}
2383
break;
2384
case Precleaning:
2385
if (UseAdaptiveSizePolicy) {
2386
size_policy()->concurrent_precleaning_begin();
2387
}
2388
// marking from roots in markFromRoots has been completed
2389
preclean();
2390
if (UseAdaptiveSizePolicy) {
2391
size_policy()->concurrent_precleaning_end();
2392
}
2393
assert(_collectorState == AbortablePreclean ||
2394
_collectorState == FinalMarking,
2395
"Collector state should have changed");
2396
break;
2397
case AbortablePreclean:
2398
if (UseAdaptiveSizePolicy) {
2399
size_policy()->concurrent_phases_resume();
2400
}
2401
abortable_preclean();
2402
if (UseAdaptiveSizePolicy) {
2403
size_policy()->concurrent_precleaning_end();
2404
}
2405
assert(_collectorState == FinalMarking, "Collector state should "
2406
"have changed");
2407
break;
2408
case FinalMarking:
2409
{
2410
ReleaseForegroundGC x(this);
2411
2412
VM_CMS_Final_Remark final_remark_op(this);
2413
VMThread::execute(&final_remark_op);
2414
}
2415
assert(_foregroundGCShouldWait, "block post-condition");
2416
break;
2417
case Sweeping:
2418
if (UseAdaptiveSizePolicy) {
2419
size_policy()->concurrent_sweeping_begin();
2420
}
2421
// final marking in checkpointRootsFinal has been completed
2422
sweep(true);
2423
assert(_collectorState == Resizing, "Collector state change "
2424
"to Resizing must be done under the free_list_lock");
2425
_full_gcs_since_conc_gc = 0;
2426
2427
// Stop the timers for adaptive size policy for the concurrent phases
2428
if (UseAdaptiveSizePolicy) {
2429
size_policy()->concurrent_sweeping_end();
2430
size_policy()->concurrent_phases_end(gch->gc_cause(),
2431
gch->prev_gen(_cmsGen)->capacity(),
2432
_cmsGen->free());
2433
}
2434
2435
case Resizing: {
2436
// Sweeping has been completed...
2437
// At this point the background collection has completed.
2438
// Don't move the call to compute_new_size() down
2439
// into code that might be executed if the background
2440
// collection was preempted.
2441
{
2442
ReleaseForegroundGC x(this); // unblock FG collection
2443
MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2444
CMSTokenSync z(true); // not strictly needed.
2445
if (_collectorState == Resizing) {
2446
compute_new_size();
2447
save_heap_summary();
2448
_collectorState = Resetting;
2449
} else {
2450
assert(_collectorState == Idling, "The state should only change"
2451
" because the foreground collector has finished the collection");
2452
}
2453
}
2454
break;
2455
}
2456
case Resetting:
2457
// CMS heap resizing has been completed
2458
reset(true);
2459
assert(_collectorState == Idling, "Collector state should "
2460
"have changed");
2461
2462
MetaspaceGC::set_should_concurrent_collect(false);
2463
2464
stats().record_cms_end();
2465
// Don't move the concurrent_phases_end() and compute_new_size()
2466
// calls to here because a preempted background collection
2467
// has it's state set to "Resetting".
2468
break;
2469
case Idling:
2470
default:
2471
ShouldNotReachHere();
2472
break;
2473
}
2474
if (TraceCMSState) {
2475
gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2476
Thread::current(), _collectorState);
2477
}
2478
assert(_foregroundGCShouldWait, "block post-condition");
2479
}
2480
2481
// Should this be in gc_epilogue?
2482
collector_policy()->counters()->update_counters();
2483
2484
{
2485
// Clear _foregroundGCShouldWait and, in the event that the
2486
// foreground collector is waiting, notify it, before
2487
// returning.
2488
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2489
_foregroundGCShouldWait = false;
2490
if (_foregroundGCIsActive) {
2491
CGC_lock->notify();
2492
}
2493
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2494
"Possible deadlock");
2495
}
2496
if (TraceCMSState) {
2497
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2498
" exiting collection CMS state %d",
2499
Thread::current(), _collectorState);
2500
}
2501
if (PrintGC && Verbose) {
2502
_cmsGen->print_heap_change(prev_used);
2503
}
2504
}
2505
2506
void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2507
if (!_cms_start_registered) {
2508
register_gc_start(cause);
2509
}
2510
}
2511
2512
void CMSCollector::register_gc_start(GCCause::Cause cause) {
2513
_cms_start_registered = true;
2514
_gc_timer_cm->register_gc_start();
2515
_gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2516
}
2517
2518
void CMSCollector::register_gc_end() {
2519
if (_cms_start_registered) {
2520
report_heap_summary(GCWhen::AfterGC);
2521
2522
_gc_timer_cm->register_gc_end();
2523
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2524
_cms_start_registered = false;
2525
}
2526
}
2527
2528
void CMSCollector::save_heap_summary() {
2529
GenCollectedHeap* gch = GenCollectedHeap::heap();
2530
_last_heap_summary = gch->create_heap_summary();
2531
_last_metaspace_summary = gch->create_metaspace_summary();
2532
}
2533
2534
void CMSCollector::report_heap_summary(GCWhen::Type when) {
2535
_gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2536
_gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2537
}
2538
2539
void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2540
assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2541
"Foreground collector should be waiting, not executing");
2542
assert(Thread::current()->is_VM_thread(), "A foreground collection"
2543
"may only be done by the VM Thread with the world stopped");
2544
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2545
"VM thread should have CMS token");
2546
2547
// The gc id is created in register_foreground_gc_start if this collection is synchronous
2548
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
2549
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2550
true, NULL, gc_id);)
2551
if (UseAdaptiveSizePolicy) {
2552
size_policy()->ms_collection_begin();
2553
}
2554
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2555
2556
HandleMark hm; // Discard invalid handles created during verification
2557
2558
if (VerifyBeforeGC &&
2559
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2560
Universe::verify();
2561
}
2562
2563
// Snapshot the soft reference policy to be used in this collection cycle.
2564
ref_processor()->setup_policy(clear_all_soft_refs);
2565
2566
// Decide if class unloading should be done
2567
update_should_unload_classes();
2568
2569
bool init_mark_was_synchronous = false; // until proven otherwise
2570
while (_collectorState != Idling) {
2571
if (TraceCMSState) {
2572
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2573
Thread::current(), _collectorState);
2574
}
2575
switch (_collectorState) {
2576
case InitialMarking:
2577
register_foreground_gc_start(cause);
2578
init_mark_was_synchronous = true; // fact to be exploited in re-mark
2579
checkpointRootsInitial(false);
2580
assert(_collectorState == Marking, "Collector state should have changed"
2581
" within checkpointRootsInitial()");
2582
break;
2583
case Marking:
2584
// initial marking in checkpointRootsInitialWork has been completed
2585
if (VerifyDuringGC &&
2586
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2587
Universe::verify("Verify before initial mark: ");
2588
}
2589
{
2590
bool res = markFromRoots(false);
2591
assert(res && _collectorState == FinalMarking, "Collector state should "
2592
"have changed");
2593
break;
2594
}
2595
case FinalMarking:
2596
if (VerifyDuringGC &&
2597
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2598
Universe::verify("Verify before re-mark: ");
2599
}
2600
checkpointRootsFinal(false, clear_all_soft_refs,
2601
init_mark_was_synchronous);
2602
assert(_collectorState == Sweeping, "Collector state should not "
2603
"have changed within checkpointRootsFinal()");
2604
break;
2605
case Sweeping:
2606
// final marking in checkpointRootsFinal has been completed
2607
if (VerifyDuringGC &&
2608
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2609
Universe::verify("Verify before sweep: ");
2610
}
2611
sweep(false);
2612
assert(_collectorState == Resizing, "Incorrect state");
2613
break;
2614
case Resizing: {
2615
// Sweeping has been completed; the actual resize in this case
2616
// is done separately; nothing to be done in this state.
2617
_collectorState = Resetting;
2618
break;
2619
}
2620
case Resetting:
2621
// The heap has been resized.
2622
if (VerifyDuringGC &&
2623
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2624
Universe::verify("Verify before reset: ");
2625
}
2626
save_heap_summary();
2627
reset(false);
2628
assert(_collectorState == Idling, "Collector state should "
2629
"have changed");
2630
break;
2631
case Precleaning:
2632
case AbortablePreclean:
2633
// Elide the preclean phase
2634
_collectorState = FinalMarking;
2635
break;
2636
default:
2637
ShouldNotReachHere();
2638
}
2639
if (TraceCMSState) {
2640
gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2641
Thread::current(), _collectorState);
2642
}
2643
}
2644
2645
if (UseAdaptiveSizePolicy) {
2646
GenCollectedHeap* gch = GenCollectedHeap::heap();
2647
size_policy()->ms_collection_end(gch->gc_cause());
2648
}
2649
2650
if (VerifyAfterGC &&
2651
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2652
Universe::verify();
2653
}
2654
if (TraceCMSState) {
2655
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2656
" exiting collection CMS state %d",
2657
Thread::current(), _collectorState);
2658
}
2659
}
2660
2661
bool CMSCollector::waitForForegroundGC() {
2662
bool res = false;
2663
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2664
"CMS thread should have CMS token");
2665
// Block the foreground collector until the
2666
// background collectors decides whether to
2667
// yield.
2668
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2669
_foregroundGCShouldWait = true;
2670
if (_foregroundGCIsActive) {
2671
// The background collector yields to the
2672
// foreground collector and returns a value
2673
// indicating that it has yielded. The foreground
2674
// collector can proceed.
2675
res = true;
2676
_foregroundGCShouldWait = false;
2677
ConcurrentMarkSweepThread::clear_CMS_flag(
2678
ConcurrentMarkSweepThread::CMS_cms_has_token);
2679
ConcurrentMarkSweepThread::set_CMS_flag(
2680
ConcurrentMarkSweepThread::CMS_cms_wants_token);
2681
// Get a possibly blocked foreground thread going
2682
CGC_lock->notify();
2683
if (TraceCMSState) {
2684
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2685
Thread::current(), _collectorState);
2686
}
2687
while (_foregroundGCIsActive) {
2688
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2689
}
2690
ConcurrentMarkSweepThread::set_CMS_flag(
2691
ConcurrentMarkSweepThread::CMS_cms_has_token);
2692
ConcurrentMarkSweepThread::clear_CMS_flag(
2693
ConcurrentMarkSweepThread::CMS_cms_wants_token);
2694
}
2695
if (TraceCMSState) {
2696
gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2697
Thread::current(), _collectorState);
2698
}
2699
return res;
2700
}
2701
2702
// Because of the need to lock the free lists and other structures in
2703
// the collector, common to all the generations that the collector is
2704
// collecting, we need the gc_prologues of individual CMS generations
2705
// delegate to their collector. It may have been simpler had the
2706
// current infrastructure allowed one to call a prologue on a
2707
// collector. In the absence of that we have the generation's
2708
// prologue delegate to the collector, which delegates back
2709
// some "local" work to a worker method in the individual generations
2710
// that it's responsible for collecting, while itself doing any
2711
// work common to all generations it's responsible for. A similar
2712
// comment applies to the gc_epilogue()'s.
2713
// The role of the varaible _between_prologue_and_epilogue is to
2714
// enforce the invocation protocol.
2715
void CMSCollector::gc_prologue(bool full) {
2716
// Call gc_prologue_work() for the CMSGen
2717
// we are responsible for.
2718
2719
// The following locking discipline assumes that we are only called
2720
// when the world is stopped.
2721
assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2722
2723
// The CMSCollector prologue must call the gc_prologues for the
2724
// "generations" that it's responsible
2725
// for.
2726
2727
assert( Thread::current()->is_VM_thread()
2728
|| ( CMSScavengeBeforeRemark
2729
&& Thread::current()->is_ConcurrentGC_thread()),
2730
"Incorrect thread type for prologue execution");
2731
2732
if (_between_prologue_and_epilogue) {
2733
// We have already been invoked; this is a gc_prologue delegation
2734
// from yet another CMS generation that we are responsible for, just
2735
// ignore it since all relevant work has already been done.
2736
return;
2737
}
2738
2739
// set a bit saying prologue has been called; cleared in epilogue
2740
_between_prologue_and_epilogue = true;
2741
// Claim locks for common data structures, then call gc_prologue_work()
2742
// for each CMSGen.
2743
2744
getFreelistLocks(); // gets free list locks on constituent spaces
2745
bitMapLock()->lock_without_safepoint_check();
2746
2747
// Should call gc_prologue_work() for all cms gens we are responsible for
2748
bool duringMarking = _collectorState >= Marking
2749
&& _collectorState < Sweeping;
2750
2751
// The young collections clear the modified oops state, which tells if
2752
// there are any modified oops in the class. The remark phase also needs
2753
// that information. Tell the young collection to save the union of all
2754
// modified klasses.
2755
if (duringMarking) {
2756
_ct->klass_rem_set()->set_accumulate_modified_oops(true);
2757
}
2758
2759
bool registerClosure = duringMarking;
2760
2761
ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2762
&_modUnionClosurePar
2763
: &_modUnionClosure;
2764
_cmsGen->gc_prologue_work(full, registerClosure, muc);
2765
2766
if (!full) {
2767
stats().record_gc0_begin();
2768
}
2769
}
2770
2771
void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2772
2773
_capacity_at_prologue = capacity();
2774
_used_at_prologue = used();
2775
_cmsSpace->recalculate_used_stable();
2776
2777
// Delegate to CMScollector which knows how to coordinate between
2778
// this and any other CMS generations that it is responsible for
2779
// collecting.
2780
collector()->gc_prologue(full);
2781
}
2782
2783
// This is a "private" interface for use by this generation's CMSCollector.
2784
// Not to be called directly by any other entity (for instance,
2785
// GenCollectedHeap, which calls the "public" gc_prologue method above).
2786
void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2787
bool registerClosure, ModUnionClosure* modUnionClosure) {
2788
assert(!incremental_collection_failed(), "Shouldn't be set yet");
2789
assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2790
"Should be NULL");
2791
if (registerClosure) {
2792
cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2793
}
2794
cmsSpace()->gc_prologue();
2795
// Clear stat counters
2796
NOT_PRODUCT(
2797
assert(_numObjectsPromoted == 0, "check");
2798
assert(_numWordsPromoted == 0, "check");
2799
if (Verbose && PrintGC) {
2800
gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, "
2801
SIZE_FORMAT " bytes concurrently",
2802
_numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2803
}
2804
_numObjectsAllocated = 0;
2805
_numWordsAllocated = 0;
2806
)
2807
}
2808
2809
void CMSCollector::gc_epilogue(bool full) {
2810
// The following locking discipline assumes that we are only called
2811
// when the world is stopped.
2812
assert(SafepointSynchronize::is_at_safepoint(),
2813
"world is stopped assumption");
2814
2815
// Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2816
// if linear allocation blocks need to be appropriately marked to allow the
2817
// the blocks to be parsable. We also check here whether we need to nudge the
2818
// CMS collector thread to start a new cycle (if it's not already active).
2819
assert( Thread::current()->is_VM_thread()
2820
|| ( CMSScavengeBeforeRemark
2821
&& Thread::current()->is_ConcurrentGC_thread()),
2822
"Incorrect thread type for epilogue execution");
2823
2824
if (!_between_prologue_and_epilogue) {
2825
// We have already been invoked; this is a gc_epilogue delegation
2826
// from yet another CMS generation that we are responsible for, just
2827
// ignore it since all relevant work has already been done.
2828
return;
2829
}
2830
assert(haveFreelistLocks(), "must have freelist locks");
2831
assert_lock_strong(bitMapLock());
2832
2833
_ct->klass_rem_set()->set_accumulate_modified_oops(false);
2834
2835
_cmsGen->gc_epilogue_work(full);
2836
2837
if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2838
// in case sampling was not already enabled, enable it
2839
_start_sampling = true;
2840
}
2841
// reset _eden_chunk_array so sampling starts afresh
2842
_eden_chunk_index = 0;
2843
2844
size_t cms_used = _cmsGen->cmsSpace()->used();
2845
_cmsGen->cmsSpace()->recalculate_used_stable();
2846
2847
// update performance counters - this uses a special version of
2848
// update_counters() that allows the utilization to be passed as a
2849
// parameter, avoiding multiple calls to used().
2850
//
2851
_cmsGen->update_counters(cms_used);
2852
2853
if (CMSIncrementalMode) {
2854
icms_update_allocation_limits();
2855
}
2856
2857
bitMapLock()->unlock();
2858
releaseFreelistLocks();
2859
2860
if (!CleanChunkPoolAsync) {
2861
Chunk::clean_chunk_pool();
2862
}
2863
2864
set_did_compact(false);
2865
_between_prologue_and_epilogue = false; // ready for next cycle
2866
}
2867
2868
void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2869
collector()->gc_epilogue(full);
2870
2871
// Also reset promotion tracking in par gc thread states.
2872
if (CollectedHeap::use_parallel_gc_threads()) {
2873
for (uint i = 0; i < ParallelGCThreads; i++) {
2874
_par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2875
}
2876
}
2877
}
2878
2879
void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2880
assert(!incremental_collection_failed(), "Should have been cleared");
2881
cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2882
cmsSpace()->gc_epilogue();
2883
// Print stat counters
2884
NOT_PRODUCT(
2885
assert(_numObjectsAllocated == 0, "check");
2886
assert(_numWordsAllocated == 0, "check");
2887
if (Verbose && PrintGC) {
2888
gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, "
2889
SIZE_FORMAT " bytes",
2890
_numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2891
}
2892
_numObjectsPromoted = 0;
2893
_numWordsPromoted = 0;
2894
)
2895
2896
if (PrintGC && Verbose) {
2897
// Call down the chain in contiguous_available needs the freelistLock
2898
// so print this out before releasing the freeListLock.
2899
gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ",
2900
contiguous_available());
2901
}
2902
}
2903
2904
#ifndef PRODUCT
2905
bool CMSCollector::have_cms_token() {
2906
Thread* thr = Thread::current();
2907
if (thr->is_VM_thread()) {
2908
return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2909
} else if (thr->is_ConcurrentGC_thread()) {
2910
return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2911
} else if (thr->is_GC_task_thread()) {
2912
return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2913
ParGCRareEvent_lock->owned_by_self();
2914
}
2915
return false;
2916
}
2917
#endif
2918
2919
// Check reachability of the given heap address in CMS generation,
2920
// treating all other generations as roots.
2921
bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2922
// We could "guarantee" below, rather than assert, but i'll
2923
// leave these as "asserts" so that an adventurous debugger
2924
// could try this in the product build provided some subset of
2925
// the conditions were met, provided they were intersted in the
2926
// results and knew that the computation below wouldn't interfere
2927
// with other concurrent computations mutating the structures
2928
// being read or written.
2929
assert(SafepointSynchronize::is_at_safepoint(),
2930
"Else mutations in object graph will make answer suspect");
2931
assert(have_cms_token(), "Should hold cms token");
2932
assert(haveFreelistLocks(), "must hold free list locks");
2933
assert_lock_strong(bitMapLock());
2934
2935
// Clear the marking bit map array before starting, but, just
2936
// for kicks, first report if the given address is already marked
2937
gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2938
_markBitMap.isMarked(addr) ? "" : " not");
2939
2940
if (verify_after_remark()) {
2941
MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2942
bool result = verification_mark_bm()->isMarked(addr);
2943
gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2944
result ? "IS" : "is NOT");
2945
return result;
2946
} else {
2947
gclog_or_tty->print_cr("Could not compute result");
2948
return false;
2949
}
2950
}
2951
2952
2953
void
2954
CMSCollector::print_on_error(outputStream* st) {
2955
CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2956
if (collector != NULL) {
2957
CMSBitMap* bitmap = &collector->_markBitMap;
2958
st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2959
bitmap->print_on_error(st, " Bits: ");
2960
2961
st->cr();
2962
2963
CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2964
st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2965
mut_bitmap->print_on_error(st, " Bits: ");
2966
}
2967
}
2968
2969
////////////////////////////////////////////////////////
2970
// CMS Verification Support
2971
////////////////////////////////////////////////////////
2972
// Following the remark phase, the following invariant
2973
// should hold -- each object in the CMS heap which is
2974
// marked in markBitMap() should be marked in the verification_mark_bm().
2975
2976
class VerifyMarkedClosure: public BitMapClosure {
2977
CMSBitMap* _marks;
2978
bool _failed;
2979
2980
public:
2981
VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2982
2983
bool do_bit(size_t offset) {
2984
HeapWord* addr = _marks->offsetToHeapWord(offset);
2985
if (!_marks->isMarked(addr)) {
2986
oop(addr)->print_on(gclog_or_tty);
2987
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
2988
_failed = true;
2989
}
2990
return true;
2991
}
2992
2993
bool failed() { return _failed; }
2994
};
2995
2996
bool CMSCollector::verify_after_remark(bool silent) {
2997
if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2998
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2999
static bool init = false;
3000
3001
assert(SafepointSynchronize::is_at_safepoint(),
3002
"Else mutations in object graph will make answer suspect");
3003
assert(have_cms_token(),
3004
"Else there may be mutual interference in use of "
3005
" verification data structures");
3006
assert(_collectorState > Marking && _collectorState <= Sweeping,
3007
"Else marking info checked here may be obsolete");
3008
assert(haveFreelistLocks(), "must hold free list locks");
3009
assert_lock_strong(bitMapLock());
3010
3011
3012
// Allocate marking bit map if not already allocated
3013
if (!init) { // first time
3014
if (!verification_mark_bm()->allocate(_span)) {
3015
return false;
3016
}
3017
init = true;
3018
}
3019
3020
assert(verification_mark_stack()->isEmpty(), "Should be empty");
3021
3022
// Turn off refs discovery -- so we will be tracing through refs.
3023
// This is as intended, because by this time
3024
// GC must already have cleared any refs that need to be cleared,
3025
// and traced those that need to be marked; moreover,
3026
// the marking done here is not going to intefere in any
3027
// way with the marking information used by GC.
3028
NoRefDiscovery no_discovery(ref_processor());
3029
3030
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3031
3032
// Clear any marks from a previous round
3033
verification_mark_bm()->clear_all();
3034
assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
3035
verify_work_stacks_empty();
3036
3037
GenCollectedHeap* gch = GenCollectedHeap::heap();
3038
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3039
// Update the saved marks which may affect the root scans.
3040
gch->save_marks();
3041
3042
if (CMSRemarkVerifyVariant == 1) {
3043
// In this first variant of verification, we complete
3044
// all marking, then check if the new marks-verctor is
3045
// a subset of the CMS marks-vector.
3046
verify_after_remark_work_1();
3047
} else if (CMSRemarkVerifyVariant == 2) {
3048
// In this second variant of verification, we flag an error
3049
// (i.e. an object reachable in the new marks-vector not reachable
3050
// in the CMS marks-vector) immediately, also indicating the
3051
// identify of an object (A) that references the unmarked object (B) --
3052
// presumably, a mutation to A failed to be picked up by preclean/remark?
3053
verify_after_remark_work_2();
3054
} else {
3055
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3056
CMSRemarkVerifyVariant);
3057
}
3058
if (!silent) gclog_or_tty->print(" done] ");
3059
return true;
3060
}
3061
3062
void CMSCollector::verify_after_remark_work_1() {
3063
ResourceMark rm;
3064
HandleMark hm;
3065
GenCollectedHeap* gch = GenCollectedHeap::heap();
3066
3067
// Get a clear set of claim bits for the roots processing to work with.
3068
ClassLoaderDataGraph::clear_claimed_marks();
3069
3070
// Mark from roots one level into CMS
3071
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3072
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3073
3074
gch->gen_process_roots(_cmsGen->level(),
3075
true, // younger gens are roots
3076
true, // activate StrongRootsScope
3077
GenCollectedHeap::ScanningOption(roots_scanning_options()),
3078
should_unload_classes(),
3079
&notOlder,
3080
NULL,
3081
NULL); // SSS: Provide correct closure
3082
3083
// Now mark from the roots
3084
MarkFromRootsClosure markFromRootsClosure(this, _span,
3085
verification_mark_bm(), verification_mark_stack(),
3086
false /* don't yield */, true /* verifying */);
3087
assert(_restart_addr == NULL, "Expected pre-condition");
3088
verification_mark_bm()->iterate(&markFromRootsClosure);
3089
while (_restart_addr != NULL) {
3090
// Deal with stack overflow: by restarting at the indicated
3091
// address.
3092
HeapWord* ra = _restart_addr;
3093
markFromRootsClosure.reset(ra);
3094
_restart_addr = NULL;
3095
verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3096
}
3097
assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3098
verify_work_stacks_empty();
3099
3100
// Marking completed -- now verify that each bit marked in
3101
// verification_mark_bm() is also marked in markBitMap(); flag all
3102
// errors by printing corresponding objects.
3103
VerifyMarkedClosure vcl(markBitMap());
3104
verification_mark_bm()->iterate(&vcl);
3105
if (vcl.failed()) {
3106
gclog_or_tty->print("Verification failed");
3107
Universe::heap()->print_on(gclog_or_tty);
3108
fatal("CMS: failed marking verification after remark");
3109
}
3110
}
3111
3112
class VerifyKlassOopsKlassClosure : public KlassClosure {
3113
class VerifyKlassOopsClosure : public OopClosure {
3114
CMSBitMap* _bitmap;
3115
public:
3116
VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3117
void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3118
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3119
} _oop_closure;
3120
public:
3121
VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3122
void do_klass(Klass* k) {
3123
k->oops_do(&_oop_closure);
3124
}
3125
};
3126
3127
void CMSCollector::verify_after_remark_work_2() {
3128
ResourceMark rm;
3129
HandleMark hm;
3130
GenCollectedHeap* gch = GenCollectedHeap::heap();
3131
3132
// Get a clear set of claim bits for the roots processing to work with.
3133
ClassLoaderDataGraph::clear_claimed_marks();
3134
3135
// Mark from roots one level into CMS
3136
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3137
markBitMap());
3138
CLDToOopClosure cld_closure(&notOlder, true);
3139
3140
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3141
3142
gch->gen_process_roots(_cmsGen->level(),
3143
true, // younger gens are roots
3144
true, // activate StrongRootsScope
3145
GenCollectedHeap::ScanningOption(roots_scanning_options()),
3146
should_unload_classes(),
3147
&notOlder,
3148
NULL,
3149
&cld_closure);
3150
3151
// Now mark from the roots
3152
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3153
verification_mark_bm(), markBitMap(), verification_mark_stack());
3154
assert(_restart_addr == NULL, "Expected pre-condition");
3155
verification_mark_bm()->iterate(&markFromRootsClosure);
3156
while (_restart_addr != NULL) {
3157
// Deal with stack overflow: by restarting at the indicated
3158
// address.
3159
HeapWord* ra = _restart_addr;
3160
markFromRootsClosure.reset(ra);
3161
_restart_addr = NULL;
3162
verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3163
}
3164
assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3165
verify_work_stacks_empty();
3166
3167
VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3168
ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3169
3170
// Marking completed -- now verify that each bit marked in
3171
// verification_mark_bm() is also marked in markBitMap(); flag all
3172
// errors by printing corresponding objects.
3173
VerifyMarkedClosure vcl(markBitMap());
3174
verification_mark_bm()->iterate(&vcl);
3175
assert(!vcl.failed(), "Else verification above should not have succeeded");
3176
}
3177
3178
void ConcurrentMarkSweepGeneration::save_marks() {
3179
// delegate to CMS space
3180
cmsSpace()->save_marks();
3181
for (uint i = 0; i < ParallelGCThreads; i++) {
3182
_par_gc_thread_states[i]->promo.startTrackingPromotions();
3183
}
3184
}
3185
3186
bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3187
return cmsSpace()->no_allocs_since_save_marks();
3188
}
3189
3190
#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
3191
\
3192
void ConcurrentMarkSweepGeneration:: \
3193
oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
3194
cl->set_generation(this); \
3195
cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
3196
cl->reset_generation(); \
3197
save_marks(); \
3198
}
3199
3200
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3201
3202
void
3203
ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3204
cl->set_generation(this);
3205
younger_refs_in_space_iterate(_cmsSpace, cl);
3206
cl->reset_generation();
3207
}
3208
3209
void
3210
ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3211
if (freelistLock()->owned_by_self()) {
3212
Generation::oop_iterate(cl);
3213
} else {
3214
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3215
Generation::oop_iterate(cl);
3216
}
3217
}
3218
3219
void
3220
ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3221
if (freelistLock()->owned_by_self()) {
3222
Generation::object_iterate(cl);
3223
} else {
3224
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3225
Generation::object_iterate(cl);
3226
}
3227
}
3228
3229
void
3230
ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3231
if (freelistLock()->owned_by_self()) {
3232
Generation::safe_object_iterate(cl);
3233
} else {
3234
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3235
Generation::safe_object_iterate(cl);
3236
}
3237
}
3238
3239
void
3240
ConcurrentMarkSweepGeneration::post_compact() {
3241
}
3242
3243
void
3244
ConcurrentMarkSweepGeneration::prepare_for_verify() {
3245
// Fix the linear allocation blocks to look like free blocks.
3246
3247
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3248
// are not called when the heap is verified during universe initialization and
3249
// at vm shutdown.
3250
if (freelistLock()->owned_by_self()) {
3251
cmsSpace()->prepare_for_verify();
3252
} else {
3253
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3254
cmsSpace()->prepare_for_verify();
3255
}
3256
}
3257
3258
void
3259
ConcurrentMarkSweepGeneration::verify() {
3260
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3261
// are not called when the heap is verified during universe initialization and
3262
// at vm shutdown.
3263
if (freelistLock()->owned_by_self()) {
3264
cmsSpace()->verify();
3265
} else {
3266
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3267
cmsSpace()->verify();
3268
}
3269
}
3270
3271
void CMSCollector::verify() {
3272
_cmsGen->verify();
3273
}
3274
3275
#ifndef PRODUCT
3276
bool CMSCollector::overflow_list_is_empty() const {
3277
assert(_num_par_pushes >= 0, "Inconsistency");
3278
if (_overflow_list == NULL) {
3279
assert(_num_par_pushes == 0, "Inconsistency");
3280
}
3281
return _overflow_list == NULL;
3282
}
3283
3284
// The methods verify_work_stacks_empty() and verify_overflow_empty()
3285
// merely consolidate assertion checks that appear to occur together frequently.
3286
void CMSCollector::verify_work_stacks_empty() const {
3287
assert(_markStack.isEmpty(), "Marking stack should be empty");
3288
assert(overflow_list_is_empty(), "Overflow list should be empty");
3289
}
3290
3291
void CMSCollector::verify_overflow_empty() const {
3292
assert(overflow_list_is_empty(), "Overflow list should be empty");
3293
assert(no_preserved_marks(), "No preserved marks");
3294
}
3295
#endif // PRODUCT
3296
3297
// Decide if we want to enable class unloading as part of the
3298
// ensuing concurrent GC cycle. We will collect and
3299
// unload classes if it's the case that:
3300
// (1) an explicit gc request has been made and the flag
3301
// ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3302
// (2) (a) class unloading is enabled at the command line, and
3303
// (b) old gen is getting really full
3304
// NOTE: Provided there is no change in the state of the heap between
3305
// calls to this method, it should have idempotent results. Moreover,
3306
// its results should be monotonically increasing (i.e. going from 0 to 1,
3307
// but not 1 to 0) between successive calls between which the heap was
3308
// not collected. For the implementation below, it must thus rely on
3309
// the property that concurrent_cycles_since_last_unload()
3310
// will not decrease unless a collection cycle happened and that
3311
// _cmsGen->is_too_full() are
3312
// themselves also monotonic in that sense. See check_monotonicity()
3313
// below.
3314
void CMSCollector::update_should_unload_classes() {
3315
_should_unload_classes = false;
3316
// Condition 1 above
3317
if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3318
_should_unload_classes = true;
3319
} else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3320
// Disjuncts 2.b.(i,ii,iii) above
3321
_should_unload_classes = (concurrent_cycles_since_last_unload() >=
3322
CMSClassUnloadingMaxInterval)
3323
|| _cmsGen->is_too_full();
3324
}
3325
}
3326
3327
bool ConcurrentMarkSweepGeneration::is_too_full() const {
3328
bool res = should_concurrent_collect();
3329
res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3330
return res;
3331
}
3332
3333
void CMSCollector::setup_cms_unloading_and_verification_state() {
3334
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3335
|| VerifyBeforeExit;
3336
const int rso = GenCollectedHeap::SO_AllCodeCache;
3337
3338
// We set the proper root for this CMS cycle here.
3339
if (should_unload_classes()) { // Should unload classes this cycle
3340
remove_root_scanning_option(rso); // Shrink the root set appropriately
3341
set_verifying(should_verify); // Set verification state for this cycle
3342
return; // Nothing else needs to be done at this time
3343
}
3344
3345
// Not unloading classes this cycle
3346
assert(!should_unload_classes(), "Inconsitency!");
3347
3348
// If we are not unloading classes then add SO_AllCodeCache to root
3349
// scanning options.
3350
add_root_scanning_option(rso);
3351
3352
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3353
set_verifying(true);
3354
} else if (verifying() && !should_verify) {
3355
// We were verifying, but some verification flags got disabled.
3356
set_verifying(false);
3357
// Exclude symbols, strings and code cache elements from root scanning to
3358
// reduce IM and RM pauses.
3359
remove_root_scanning_option(rso);
3360
}
3361
}
3362
3363
3364
#ifndef PRODUCT
3365
HeapWord* CMSCollector::block_start(const void* p) const {
3366
const HeapWord* addr = (HeapWord*)p;
3367
if (_span.contains(p)) {
3368
if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3369
return _cmsGen->cmsSpace()->block_start(p);
3370
}
3371
}
3372
return NULL;
3373
}
3374
#endif
3375
3376
HeapWord*
3377
ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3378
bool tlab,
3379
bool parallel) {
3380
CMSSynchronousYieldRequest yr;
3381
assert(!tlab, "Can't deal with TLAB allocation");
3382
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3383
expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3384
CMSExpansionCause::_satisfy_allocation);
3385
if (GCExpandToAllocateDelayMillis > 0) {
3386
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3387
}
3388
return have_lock_and_allocate(word_size, tlab);
3389
}
3390
3391
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
3392
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
3393
// to CardGeneration and share it...
3394
bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3395
return CardGeneration::expand(bytes, expand_bytes);
3396
}
3397
3398
void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3399
CMSExpansionCause::Cause cause)
3400
{
3401
3402
bool success = expand(bytes, expand_bytes);
3403
3404
// remember why we expanded; this information is used
3405
// by shouldConcurrentCollect() when making decisions on whether to start
3406
// a new CMS cycle.
3407
if (success) {
3408
set_expansion_cause(cause);
3409
if (PrintGCDetails && Verbose) {
3410
gclog_or_tty->print_cr("Expanded CMS gen for %s",
3411
CMSExpansionCause::to_string(cause));
3412
}
3413
}
3414
}
3415
3416
HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3417
HeapWord* res = NULL;
3418
MutexLocker x(ParGCRareEvent_lock);
3419
while (true) {
3420
// Expansion by some other thread might make alloc OK now:
3421
res = ps->lab.alloc(word_sz);
3422
if (res != NULL) return res;
3423
// If there's not enough expansion space available, give up.
3424
if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3425
return NULL;
3426
}
3427
// Otherwise, we try expansion.
3428
expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3429
CMSExpansionCause::_allocate_par_lab);
3430
// Now go around the loop and try alloc again;
3431
// A competing par_promote might beat us to the expansion space,
3432
// so we may go around the loop again if promotion fails agaion.
3433
if (GCExpandToAllocateDelayMillis > 0) {
3434
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3435
}
3436
}
3437
}
3438
3439
3440
bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3441
PromotionInfo* promo) {
3442
MutexLocker x(ParGCRareEvent_lock);
3443
size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3444
while (true) {
3445
// Expansion by some other thread might make alloc OK now:
3446
if (promo->ensure_spooling_space()) {
3447
assert(promo->has_spooling_space(),
3448
"Post-condition of successful ensure_spooling_space()");
3449
return true;
3450
}
3451
// If there's not enough expansion space available, give up.
3452
if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3453
return false;
3454
}
3455
// Otherwise, we try expansion.
3456
expand(refill_size_bytes, MinHeapDeltaBytes,
3457
CMSExpansionCause::_allocate_par_spooling_space);
3458
// Now go around the loop and try alloc again;
3459
// A competing allocation might beat us to the expansion space,
3460
// so we may go around the loop again if allocation fails again.
3461
if (GCExpandToAllocateDelayMillis > 0) {
3462
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3463
}
3464
}
3465
}
3466
3467
3468
void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3469
assert_locked_or_safepoint(ExpandHeap_lock);
3470
// Shrink committed space
3471
_virtual_space.shrink_by(bytes);
3472
// Shrink space; this also shrinks the space's BOT
3473
_cmsSpace->set_end((HeapWord*) _virtual_space.high());
3474
size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3475
// Shrink the shared block offset array
3476
_bts->resize(new_word_size);
3477
MemRegion mr(_cmsSpace->bottom(), new_word_size);
3478
// Shrink the card table
3479
Universe::heap()->barrier_set()->resize_covered_region(mr);
3480
3481
if (Verbose && PrintGC) {
3482
size_t new_mem_size = _virtual_space.committed_size();
3483
size_t old_mem_size = new_mem_size + bytes;
3484
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3485
name(), old_mem_size/K, new_mem_size/K);
3486
}
3487
}
3488
3489
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3490
assert_locked_or_safepoint(Heap_lock);
3491
size_t size = ReservedSpace::page_align_size_down(bytes);
3492
// Only shrink if a compaction was done so that all the free space
3493
// in the generation is in a contiguous block at the end.
3494
if (size > 0 && did_compact()) {
3495
shrink_by(size);
3496
}
3497
}
3498
3499
bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3500
assert_locked_or_safepoint(Heap_lock);
3501
bool result = _virtual_space.expand_by(bytes);
3502
if (result) {
3503
size_t new_word_size =
3504
heap_word_size(_virtual_space.committed_size());
3505
MemRegion mr(_cmsSpace->bottom(), new_word_size);
3506
_bts->resize(new_word_size); // resize the block offset shared array
3507
Universe::heap()->barrier_set()->resize_covered_region(mr);
3508
// Hmmmm... why doesn't CFLS::set_end verify locking?
3509
// This is quite ugly; FIX ME XXX
3510
_cmsSpace->assert_locked(freelistLock());
3511
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
3512
3513
// update the space and generation capacity counters
3514
if (UsePerfData) {
3515
_space_counters->update_capacity();
3516
_gen_counters->update_all();
3517
}
3518
3519
if (Verbose && PrintGC) {
3520
size_t new_mem_size = _virtual_space.committed_size();
3521
size_t old_mem_size = new_mem_size - bytes;
3522
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3523
name(), old_mem_size/K, bytes/K, new_mem_size/K);
3524
}
3525
}
3526
return result;
3527
}
3528
3529
bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3530
assert_locked_or_safepoint(Heap_lock);
3531
bool success = true;
3532
const size_t remaining_bytes = _virtual_space.uncommitted_size();
3533
if (remaining_bytes > 0) {
3534
success = grow_by(remaining_bytes);
3535
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3536
}
3537
return success;
3538
}
3539
3540
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3541
assert_locked_or_safepoint(Heap_lock);
3542
assert_lock_strong(freelistLock());
3543
if (PrintGCDetails && Verbose) {
3544
warning("Shrinking of CMS not yet implemented");
3545
}
3546
return;
3547
}
3548
3549
3550
// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3551
// phases.
3552
class CMSPhaseAccounting: public StackObj {
3553
public:
3554
CMSPhaseAccounting(CMSCollector *collector,
3555
const char *phase,
3556
const GCId gc_id,
3557
bool print_cr = true);
3558
~CMSPhaseAccounting();
3559
3560
private:
3561
CMSCollector *_collector;
3562
const char *_phase;
3563
elapsedTimer _wallclock;
3564
bool _print_cr;
3565
const GCId _gc_id;
3566
3567
public:
3568
// Not MT-safe; so do not pass around these StackObj's
3569
// where they may be accessed by other threads.
3570
jlong wallclock_millis() {
3571
assert(_wallclock.is_active(), "Wall clock should not stop");
3572
_wallclock.stop(); // to record time
3573
jlong ret = _wallclock.milliseconds();
3574
_wallclock.start(); // restart
3575
return ret;
3576
}
3577
};
3578
3579
CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3580
const char *phase,
3581
const GCId gc_id,
3582
bool print_cr) :
3583
_collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
3584
3585
if (PrintCMSStatistics != 0) {
3586
_collector->resetYields();
3587
}
3588
if (PrintGCDetails) {
3589
gclog_or_tty->gclog_stamp(_gc_id);
3590
gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3591
_collector->cmsGen()->short_name(), _phase);
3592
}
3593
_collector->resetTimer();
3594
_wallclock.start();
3595
_collector->startTimer();
3596
}
3597
3598
CMSPhaseAccounting::~CMSPhaseAccounting() {
3599
assert(_wallclock.is_active(), "Wall clock should not have stopped");
3600
_collector->stopTimer();
3601
_wallclock.stop();
3602
if (PrintGCDetails) {
3603
gclog_or_tty->gclog_stamp(_gc_id);
3604
gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3605
_collector->cmsGen()->short_name(),
3606
_phase, _collector->timerValue(), _wallclock.seconds());
3607
if (_print_cr) {
3608
gclog_or_tty->cr();
3609
}
3610
if (PrintCMSStatistics != 0) {
3611
gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3612
_collector->yields());
3613
}
3614
}
3615
}
3616
3617
// CMS work
3618
3619
// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3620
class CMSParMarkTask : public AbstractGangTask {
3621
protected:
3622
CMSCollector* _collector;
3623
int _n_workers;
3624
CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3625
AbstractGangTask(name),
3626
_collector(collector),
3627
_n_workers(n_workers) {}
3628
// Work method in support of parallel rescan ... of young gen spaces
3629
void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3630
ContiguousSpace* space,
3631
HeapWord** chunk_array, size_t chunk_top);
3632
void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3633
};
3634
3635
// Parallel initial mark task
3636
class CMSParInitialMarkTask: public CMSParMarkTask {
3637
public:
3638
CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3639
CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3640
collector, n_workers) {}
3641
void work(uint worker_id);
3642
};
3643
3644
// Checkpoint the roots into this generation from outside
3645
// this generation. [Note this initial checkpoint need only
3646
// be approximate -- we'll do a catch up phase subsequently.]
3647
void CMSCollector::checkpointRootsInitial(bool asynch) {
3648
assert(_collectorState == InitialMarking, "Wrong collector state");
3649
check_correct_thread_executing();
3650
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3651
3652
save_heap_summary();
3653
report_heap_summary(GCWhen::BeforeGC);
3654
3655
ReferenceProcessor* rp = ref_processor();
3656
SpecializationStats::clear();
3657
assert(_restart_addr == NULL, "Control point invariant");
3658
if (asynch) {
3659
// acquire locks for subsequent manipulations
3660
MutexLockerEx x(bitMapLock(),
3661
Mutex::_no_safepoint_check_flag);
3662
checkpointRootsInitialWork(asynch);
3663
// enable ("weak") refs discovery
3664
rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3665
_collectorState = Marking;
3666
} else {
3667
// (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3668
// which recognizes if we are a CMS generation, and doesn't try to turn on
3669
// discovery; verify that they aren't meddling.
3670
assert(!rp->discovery_is_atomic(),
3671
"incorrect setting of discovery predicate");
3672
assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3673
"ref discovery for this generation kind");
3674
// already have locks
3675
checkpointRootsInitialWork(asynch);
3676
// now enable ("weak") refs discovery
3677
rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3678
_collectorState = Marking;
3679
}
3680
SpecializationStats::print();
3681
_cmsGen->cmsSpace()->recalculate_used_stable();
3682
}
3683
3684
void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3685
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3686
assert(_collectorState == InitialMarking, "just checking");
3687
3688
// If there has not been a GC[n-1] since last GC[n] cycle completed,
3689
// precede our marking with a collection of all
3690
// younger generations to keep floating garbage to a minimum.
3691
// XXX: we won't do this for now -- it's an optimization to be done later.
3692
3693
// already have locks
3694
assert_lock_strong(bitMapLock());
3695
assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3696
3697
// Setup the verification and class unloading state for this
3698
// CMS collection cycle.
3699
setup_cms_unloading_and_verification_state();
3700
3701
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3702
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
3703
if (UseAdaptiveSizePolicy) {
3704
size_policy()->checkpoint_roots_initial_begin();
3705
}
3706
3707
// Reset all the PLAB chunk arrays if necessary.
3708
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3709
reset_survivor_plab_arrays();
3710
}
3711
3712
ResourceMark rm;
3713
HandleMark hm;
3714
3715
MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3716
GenCollectedHeap* gch = GenCollectedHeap::heap();
3717
3718
verify_work_stacks_empty();
3719
verify_overflow_empty();
3720
3721
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3722
// Update the saved marks which may affect the root scans.
3723
gch->save_marks();
3724
3725
// weak reference processing has not started yet.
3726
ref_processor()->set_enqueuing_is_done(false);
3727
3728
// Need to remember all newly created CLDs,
3729
// so that we can guarantee that the remark finds them.
3730
ClassLoaderDataGraph::remember_new_clds(true);
3731
3732
// Whenever a CLD is found, it will be claimed before proceeding to mark
3733
// the klasses. The claimed marks need to be cleared before marking starts.
3734
ClassLoaderDataGraph::clear_claimed_marks();
3735
3736
if (CMSPrintEdenSurvivorChunks) {
3737
print_eden_and_survivor_chunk_arrays();
3738
}
3739
3740
{
3741
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3742
if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3743
// The parallel version.
3744
FlexibleWorkGang* workers = gch->workers();
3745
assert(workers != NULL, "Need parallel worker threads.");
3746
int n_workers = workers->active_workers();
3747
CMSParInitialMarkTask tsk(this, n_workers);
3748
gch->set_par_threads(n_workers);
3749
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3750
if (n_workers > 1) {
3751
GenCollectedHeap::StrongRootsScope srs(gch);
3752
workers->run_task(&tsk);
3753
} else {
3754
GenCollectedHeap::StrongRootsScope srs(gch);
3755
tsk.work(0);
3756
}
3757
gch->set_par_threads(0);
3758
} else {
3759
// The serial version.
3760
CLDToOopClosure cld_closure(&notOlder, true);
3761
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3762
gch->gen_process_roots(_cmsGen->level(),
3763
true, // younger gens are roots
3764
true, // activate StrongRootsScope
3765
GenCollectedHeap::ScanningOption(roots_scanning_options()),
3766
should_unload_classes(),
3767
&notOlder,
3768
NULL,
3769
&cld_closure);
3770
}
3771
}
3772
3773
// Clear mod-union table; it will be dirtied in the prologue of
3774
// CMS generation per each younger generation collection.
3775
3776
assert(_modUnionTable.isAllClear(),
3777
"Was cleared in most recent final checkpoint phase"
3778
" or no bits are set in the gc_prologue before the start of the next "
3779
"subsequent marking phase.");
3780
3781
assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3782
3783
// Save the end of the used_region of the constituent generations
3784
// to be used to limit the extent of sweep in each generation.
3785
save_sweep_limits();
3786
if (UseAdaptiveSizePolicy) {
3787
size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3788
}
3789
verify_overflow_empty();
3790
}
3791
3792
bool CMSCollector::markFromRoots(bool asynch) {
3793
// we might be tempted to assert that:
3794
// assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3795
// "inconsistent argument?");
3796
// However that wouldn't be right, because it's possible that
3797
// a safepoint is indeed in progress as a younger generation
3798
// stop-the-world GC happens even as we mark in this generation.
3799
assert(_collectorState == Marking, "inconsistent state?");
3800
check_correct_thread_executing();
3801
verify_overflow_empty();
3802
3803
bool res;
3804
if (asynch) {
3805
3806
// Start the timers for adaptive size policy for the concurrent phases
3807
// Do it here so that the foreground MS can use the concurrent
3808
// timer since a foreground MS might has the sweep done concurrently
3809
// or STW.
3810
if (UseAdaptiveSizePolicy) {
3811
size_policy()->concurrent_marking_begin();
3812
}
3813
3814
// Weak ref discovery note: We may be discovering weak
3815
// refs in this generation concurrent (but interleaved) with
3816
// weak ref discovery by a younger generation collector.
3817
3818
CMSTokenSyncWithLocks ts(true, bitMapLock());
3819
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3820
CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3821
res = markFromRootsWork(asynch);
3822
if (res) {
3823
_collectorState = Precleaning;
3824
} else { // We failed and a foreground collection wants to take over
3825
assert(_foregroundGCIsActive, "internal state inconsistency");
3826
assert(_restart_addr == NULL, "foreground will restart from scratch");
3827
if (PrintGCDetails) {
3828
gclog_or_tty->print_cr("bailing out to foreground collection");
3829
}
3830
}
3831
if (UseAdaptiveSizePolicy) {
3832
size_policy()->concurrent_marking_end();
3833
}
3834
} else {
3835
assert(SafepointSynchronize::is_at_safepoint(),
3836
"inconsistent with asynch == false");
3837
if (UseAdaptiveSizePolicy) {
3838
size_policy()->ms_collection_marking_begin();
3839
}
3840
// already have locks
3841
res = markFromRootsWork(asynch);
3842
_collectorState = FinalMarking;
3843
if (UseAdaptiveSizePolicy) {
3844
GenCollectedHeap* gch = GenCollectedHeap::heap();
3845
size_policy()->ms_collection_marking_end(gch->gc_cause());
3846
}
3847
}
3848
verify_overflow_empty();
3849
return res;
3850
}
3851
3852
bool CMSCollector::markFromRootsWork(bool asynch) {
3853
// iterate over marked bits in bit map, doing a full scan and mark
3854
// from these roots using the following algorithm:
3855
// . if oop is to the right of the current scan pointer,
3856
// mark corresponding bit (we'll process it later)
3857
// . else (oop is to left of current scan pointer)
3858
// push oop on marking stack
3859
// . drain the marking stack
3860
3861
// Note that when we do a marking step we need to hold the
3862
// bit map lock -- recall that direct allocation (by mutators)
3863
// and promotion (by younger generation collectors) is also
3864
// marking the bit map. [the so-called allocate live policy.]
3865
// Because the implementation of bit map marking is not
3866
// robust wrt simultaneous marking of bits in the same word,
3867
// we need to make sure that there is no such interference
3868
// between concurrent such updates.
3869
3870
// already have locks
3871
assert_lock_strong(bitMapLock());
3872
3873
verify_work_stacks_empty();
3874
verify_overflow_empty();
3875
bool result = false;
3876
if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3877
result = do_marking_mt(asynch);
3878
} else {
3879
result = do_marking_st(asynch);
3880
}
3881
return result;
3882
}
3883
3884
// Forward decl
3885
class CMSConcMarkingTask;
3886
3887
class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3888
CMSCollector* _collector;
3889
CMSConcMarkingTask* _task;
3890
public:
3891
virtual void yield();
3892
3893
// "n_threads" is the number of threads to be terminated.
3894
// "queue_set" is a set of work queues of other threads.
3895
// "collector" is the CMS collector associated with this task terminator.
3896
// "yield" indicates whether we need the gang as a whole to yield.
3897
CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3898
ParallelTaskTerminator(n_threads, queue_set),
3899
_collector(collector) { }
3900
3901
void set_task(CMSConcMarkingTask* task) {
3902
_task = task;
3903
}
3904
};
3905
3906
class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3907
CMSConcMarkingTask* _task;
3908
public:
3909
bool should_exit_termination();
3910
void set_task(CMSConcMarkingTask* task) {
3911
_task = task;
3912
}
3913
};
3914
3915
// MT Concurrent Marking Task
3916
class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3917
CMSCollector* _collector;
3918
int _n_workers; // requested/desired # workers
3919
bool _asynch;
3920
bool _result;
3921
CompactibleFreeListSpace* _cms_space;
3922
char _pad_front[64]; // padding to ...
3923
HeapWord* _global_finger; // ... avoid sharing cache line
3924
char _pad_back[64];
3925
HeapWord* _restart_addr;
3926
3927
// Exposed here for yielding support
3928
Mutex* const _bit_map_lock;
3929
3930
// The per thread work queues, available here for stealing
3931
OopTaskQueueSet* _task_queues;
3932
3933
// Termination (and yielding) support
3934
CMSConcMarkingTerminator _term;
3935
CMSConcMarkingTerminatorTerminator _term_term;
3936
3937
public:
3938
CMSConcMarkingTask(CMSCollector* collector,
3939
CompactibleFreeListSpace* cms_space,
3940
bool asynch,
3941
YieldingFlexibleWorkGang* workers,
3942
OopTaskQueueSet* task_queues):
3943
YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3944
_collector(collector),
3945
_cms_space(cms_space),
3946
_asynch(asynch), _n_workers(0), _result(true),
3947
_task_queues(task_queues),
3948
_term(_n_workers, task_queues, _collector),
3949
_bit_map_lock(collector->bitMapLock())
3950
{
3951
_requested_size = _n_workers;
3952
_term.set_task(this);
3953
_term_term.set_task(this);
3954
_restart_addr = _global_finger = _cms_space->bottom();
3955
}
3956
3957
3958
OopTaskQueueSet* task_queues() { return _task_queues; }
3959
3960
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3961
3962
HeapWord** global_finger_addr() { return &_global_finger; }
3963
3964
CMSConcMarkingTerminator* terminator() { return &_term; }
3965
3966
virtual void set_for_termination(int active_workers) {
3967
terminator()->reset_for_reuse(active_workers);
3968
}
3969
3970
void work(uint worker_id);
3971
bool should_yield() {
3972
return ConcurrentMarkSweepThread::should_yield()
3973
&& !_collector->foregroundGCIsActive()
3974
&& _asynch;
3975
}
3976
3977
virtual void coordinator_yield(); // stuff done by coordinator
3978
bool result() { return _result; }
3979
3980
void reset(HeapWord* ra) {
3981
assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3982
_restart_addr = _global_finger = ra;
3983
_term.reset_for_reuse();
3984
}
3985
3986
static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3987
OopTaskQueue* work_q);
3988
3989
private:
3990
void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3991
void do_work_steal(int i);
3992
void bump_global_finger(HeapWord* f);
3993
};
3994
3995
bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3996
assert(_task != NULL, "Error");
3997
return _task->yielding();
3998
// Note that we do not need the disjunct || _task->should_yield() above
3999
// because we want terminating threads to yield only if the task
4000
// is already in the midst of yielding, which happens only after at least one
4001
// thread has yielded.
4002
}
4003
4004
void CMSConcMarkingTerminator::yield() {
4005
if (_task->should_yield()) {
4006
_task->yield();
4007
} else {
4008
ParallelTaskTerminator::yield();
4009
}
4010
}
4011
4012
////////////////////////////////////////////////////////////////
4013
// Concurrent Marking Algorithm Sketch
4014
////////////////////////////////////////////////////////////////
4015
// Until all tasks exhausted (both spaces):
4016
// -- claim next available chunk
4017
// -- bump global finger via CAS
4018
// -- find first object that starts in this chunk
4019
// and start scanning bitmap from that position
4020
// -- scan marked objects for oops
4021
// -- CAS-mark target, and if successful:
4022
// . if target oop is above global finger (volatile read)
4023
// nothing to do
4024
// . if target oop is in chunk and above local finger
4025
// then nothing to do
4026
// . else push on work-queue
4027
// -- Deal with possible overflow issues:
4028
// . local work-queue overflow causes stuff to be pushed on
4029
// global (common) overflow queue
4030
// . always first empty local work queue
4031
// . then get a batch of oops from global work queue if any
4032
// . then do work stealing
4033
// -- When all tasks claimed (both spaces)
4034
// and local work queue empty,
4035
// then in a loop do:
4036
// . check global overflow stack; steal a batch of oops and trace
4037
// . try to steal from other threads oif GOS is empty
4038
// . if neither is available, offer termination
4039
// -- Terminate and return result
4040
//
4041
void CMSConcMarkingTask::work(uint worker_id) {
4042
elapsedTimer _timer;
4043
ResourceMark rm;
4044
HandleMark hm;
4045
4046
DEBUG_ONLY(_collector->verify_overflow_empty();)
4047
4048
// Before we begin work, our work queue should be empty
4049
assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4050
// Scan the bitmap covering _cms_space, tracing through grey objects.
4051
_timer.start();
4052
do_scan_and_mark(worker_id, _cms_space);
4053
_timer.stop();
4054
if (PrintCMSStatistics != 0) {
4055
gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4056
worker_id, _timer.seconds());
4057
// XXX: need xxx/xxx type of notation, two timers
4058
}
4059
4060
// ... do work stealing
4061
_timer.reset();
4062
_timer.start();
4063
do_work_steal(worker_id);
4064
_timer.stop();
4065
if (PrintCMSStatistics != 0) {
4066
gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4067
worker_id, _timer.seconds());
4068
// XXX: need xxx/xxx type of notation, two timers
4069
}
4070
assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4071
assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4072
// Note that under the current task protocol, the
4073
// following assertion is true even of the spaces
4074
// expanded since the completion of the concurrent
4075
// marking. XXX This will likely change under a strict
4076
// ABORT semantics.
4077
// After perm removal the comparison was changed to
4078
// greater than or equal to from strictly greater than.
4079
// Before perm removal the highest address sweep would
4080
// have been at the end of perm gen but now is at the
4081
// end of the tenured gen.
4082
assert(_global_finger >= _cms_space->end(),
4083
"All tasks have been completed");
4084
DEBUG_ONLY(_collector->verify_overflow_empty();)
4085
}
4086
4087
void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4088
HeapWord* read = _global_finger;
4089
HeapWord* cur = read;
4090
while (f > read) {
4091
cur = read;
4092
read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4093
if (cur == read) {
4094
// our cas succeeded
4095
assert(_global_finger >= f, "protocol consistency");
4096
break;
4097
}
4098
}
4099
}
4100
4101
// This is really inefficient, and should be redone by
4102
// using (not yet available) block-read and -write interfaces to the
4103
// stack and the work_queue. XXX FIX ME !!!
4104
bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4105
OopTaskQueue* work_q) {
4106
// Fast lock-free check
4107
if (ovflw_stk->length() == 0) {
4108
return false;
4109
}
4110
assert(work_q->size() == 0, "Shouldn't steal");
4111
MutexLockerEx ml(ovflw_stk->par_lock(),
4112
Mutex::_no_safepoint_check_flag);
4113
// Grab up to 1/4 the size of the work queue
4114
size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4115
(size_t)ParGCDesiredObjsFromOverflowList);
4116
num = MIN2(num, ovflw_stk->length());
4117
for (int i = (int) num; i > 0; i--) {
4118
oop cur = ovflw_stk->pop();
4119
assert(cur != NULL, "Counted wrong?");
4120
work_q->push(cur);
4121
}
4122
return num > 0;
4123
}
4124
4125
void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4126
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4127
int n_tasks = pst->n_tasks();
4128
// We allow that there may be no tasks to do here because
4129
// we are restarting after a stack overflow.
4130
assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4131
uint nth_task = 0;
4132
4133
HeapWord* aligned_start = sp->bottom();
4134
if (sp->used_region().contains(_restart_addr)) {
4135
// Align down to a card boundary for the start of 0th task
4136
// for this space.
4137
aligned_start =
4138
(HeapWord*)align_size_down((uintptr_t)_restart_addr,
4139
CardTableModRefBS::card_size);
4140
}
4141
4142
size_t chunk_size = sp->marking_task_size();
4143
while (!pst->is_task_claimed(/* reference */ nth_task)) {
4144
// Having claimed the nth task in this space,
4145
// compute the chunk that it corresponds to:
4146
MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4147
aligned_start + (nth_task+1)*chunk_size);
4148
// Try and bump the global finger via a CAS;
4149
// note that we need to do the global finger bump
4150
// _before_ taking the intersection below, because
4151
// the task corresponding to that region will be
4152
// deemed done even if the used_region() expands
4153
// because of allocation -- as it almost certainly will
4154
// during start-up while the threads yield in the
4155
// closure below.
4156
HeapWord* finger = span.end();
4157
bump_global_finger(finger); // atomically
4158
// There are null tasks here corresponding to chunks
4159
// beyond the "top" address of the space.
4160
span = span.intersection(sp->used_region());
4161
if (!span.is_empty()) { // Non-null task
4162
HeapWord* prev_obj;
4163
assert(!span.contains(_restart_addr) || nth_task == 0,
4164
"Inconsistency");
4165
if (nth_task == 0) {
4166
// For the 0th task, we'll not need to compute a block_start.
4167
if (span.contains(_restart_addr)) {
4168
// In the case of a restart because of stack overflow,
4169
// we might additionally skip a chunk prefix.
4170
prev_obj = _restart_addr;
4171
} else {
4172
prev_obj = span.start();
4173
}
4174
} else {
4175
// We want to skip the first object because
4176
// the protocol is to scan any object in its entirety
4177
// that _starts_ in this span; a fortiori, any
4178
// object starting in an earlier span is scanned
4179
// as part of an earlier claimed task.
4180
// Below we use the "careful" version of block_start
4181
// so we do not try to navigate uninitialized objects.
4182
prev_obj = sp->block_start_careful(span.start());
4183
// Below we use a variant of block_size that uses the
4184
// Printezis bits to avoid waiting for allocated
4185
// objects to become initialized/parsable.
4186
while (prev_obj < span.start()) {
4187
size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4188
if (sz > 0) {
4189
prev_obj += sz;
4190
} else {
4191
// In this case we may end up doing a bit of redundant
4192
// scanning, but that appears unavoidable, short of
4193
// locking the free list locks; see bug 6324141.
4194
break;
4195
}
4196
}
4197
}
4198
if (prev_obj < span.end()) {
4199
MemRegion my_span = MemRegion(prev_obj, span.end());
4200
// Do the marking work within a non-empty span --
4201
// the last argument to the constructor indicates whether the
4202
// iteration should be incremental with periodic yields.
4203
Par_MarkFromRootsClosure cl(this, _collector, my_span,
4204
&_collector->_markBitMap,
4205
work_queue(i),
4206
&_collector->_markStack,
4207
_asynch);
4208
_collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4209
} // else nothing to do for this task
4210
} // else nothing to do for this task
4211
}
4212
// We'd be tempted to assert here that since there are no
4213
// more tasks left to claim in this space, the global_finger
4214
// must exceed space->top() and a fortiori space->end(). However,
4215
// that would not quite be correct because the bumping of
4216
// global_finger occurs strictly after the claiming of a task,
4217
// so by the time we reach here the global finger may not yet
4218
// have been bumped up by the thread that claimed the last
4219
// task.
4220
pst->all_tasks_completed();
4221
}
4222
4223
class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
4224
private:
4225
CMSCollector* _collector;
4226
CMSConcMarkingTask* _task;
4227
MemRegion _span;
4228
CMSBitMap* _bit_map;
4229
CMSMarkStack* _overflow_stack;
4230
OopTaskQueue* _work_queue;
4231
protected:
4232
DO_OOP_WORK_DEFN
4233
public:
4234
Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4235
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4236
MetadataAwareOopClosure(collector->ref_processor()),
4237
_collector(collector),
4238
_task(task),
4239
_span(collector->_span),
4240
_work_queue(work_queue),
4241
_bit_map(bit_map),
4242
_overflow_stack(overflow_stack)
4243
{ }
4244
virtual void do_oop(oop* p);
4245
virtual void do_oop(narrowOop* p);
4246
4247
void trim_queue(size_t max);
4248
void handle_stack_overflow(HeapWord* lost);
4249
void do_yield_check() {
4250
if (_task->should_yield()) {
4251
_task->yield();
4252
}
4253
}
4254
};
4255
4256
// Grey object scanning during work stealing phase --
4257
// the salient assumption here is that any references
4258
// that are in these stolen objects being scanned must
4259
// already have been initialized (else they would not have
4260
// been published), so we do not need to check for
4261
// uninitialized objects before pushing here.
4262
void Par_ConcMarkingClosure::do_oop(oop obj) {
4263
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4264
HeapWord* addr = (HeapWord*)obj;
4265
// Check if oop points into the CMS generation
4266
// and is not marked
4267
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4268
// a white object ...
4269
// If we manage to "claim" the object, by being the
4270
// first thread to mark it, then we push it on our
4271
// marking stack
4272
if (_bit_map->par_mark(addr)) { // ... now grey
4273
// push on work queue (grey set)
4274
bool simulate_overflow = false;
4275
NOT_PRODUCT(
4276
if (CMSMarkStackOverflowALot &&
4277
_collector->simulate_overflow()) {
4278
// simulate a stack overflow
4279
simulate_overflow = true;
4280
}
4281
)
4282
if (simulate_overflow ||
4283
!(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4284
// stack overflow
4285
if (PrintCMSStatistics != 0) {
4286
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4287
SIZE_FORMAT, _overflow_stack->capacity());
4288
}
4289
// We cannot assert that the overflow stack is full because
4290
// it may have been emptied since.
4291
assert(simulate_overflow ||
4292
_work_queue->size() == _work_queue->max_elems(),
4293
"Else push should have succeeded");
4294
handle_stack_overflow(addr);
4295
}
4296
} // Else, some other thread got there first
4297
do_yield_check();
4298
}
4299
}
4300
4301
void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4302
void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4303
4304
void Par_ConcMarkingClosure::trim_queue(size_t max) {
4305
while (_work_queue->size() > max) {
4306
oop new_oop;
4307
if (_work_queue->pop_local(new_oop)) {
4308
assert(new_oop->is_oop(), "Should be an oop");
4309
assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4310
assert(_span.contains((HeapWord*)new_oop), "Not in span");
4311
new_oop->oop_iterate(this); // do_oop() above
4312
do_yield_check();
4313
}
4314
}
4315
}
4316
4317
// Upon stack overflow, we discard (part of) the stack,
4318
// remembering the least address amongst those discarded
4319
// in CMSCollector's _restart_address.
4320
void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4321
// We need to do this under a mutex to prevent other
4322
// workers from interfering with the work done below.
4323
MutexLockerEx ml(_overflow_stack->par_lock(),
4324
Mutex::_no_safepoint_check_flag);
4325
// Remember the least grey address discarded
4326
HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4327
_collector->lower_restart_addr(ra);
4328
_overflow_stack->reset(); // discard stack contents
4329
_overflow_stack->expand(); // expand the stack if possible
4330
}
4331
4332
4333
void CMSConcMarkingTask::do_work_steal(int i) {
4334
OopTaskQueue* work_q = work_queue(i);
4335
oop obj_to_scan;
4336
CMSBitMap* bm = &(_collector->_markBitMap);
4337
CMSMarkStack* ovflw = &(_collector->_markStack);
4338
int* seed = _collector->hash_seed(i);
4339
Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4340
while (true) {
4341
cl.trim_queue(0);
4342
assert(work_q->size() == 0, "Should have been emptied above");
4343
if (get_work_from_overflow_stack(ovflw, work_q)) {
4344
// Can't assert below because the work obtained from the
4345
// overflow stack may already have been stolen from us.
4346
// assert(work_q->size() > 0, "Work from overflow stack");
4347
continue;
4348
} else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4349
assert(obj_to_scan->is_oop(), "Should be an oop");
4350
assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4351
obj_to_scan->oop_iterate(&cl);
4352
} else if (terminator()->offer_termination(&_term_term)) {
4353
assert(work_q->size() == 0, "Impossible!");
4354
break;
4355
} else if (yielding() || should_yield()) {
4356
yield();
4357
}
4358
}
4359
}
4360
4361
// This is run by the CMS (coordinator) thread.
4362
void CMSConcMarkingTask::coordinator_yield() {
4363
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4364
"CMS thread should hold CMS token");
4365
// First give up the locks, then yield, then re-lock
4366
// We should probably use a constructor/destructor idiom to
4367
// do this unlock/lock or modify the MutexUnlocker class to
4368
// serve our purpose. XXX
4369
assert_lock_strong(_bit_map_lock);
4370
_bit_map_lock->unlock();
4371
ConcurrentMarkSweepThread::desynchronize(true);
4372
ConcurrentMarkSweepThread::acknowledge_yield_request();
4373
_collector->stopTimer();
4374
if (PrintCMSStatistics != 0) {
4375
_collector->incrementYields();
4376
}
4377
_collector->icms_wait();
4378
4379
// It is possible for whichever thread initiated the yield request
4380
// not to get a chance to wake up and take the bitmap lock between
4381
// this thread releasing it and reacquiring it. So, while the
4382
// should_yield() flag is on, let's sleep for a bit to give the
4383
// other thread a chance to wake up. The limit imposed on the number
4384
// of iterations is defensive, to avoid any unforseen circumstances
4385
// putting us into an infinite loop. Since it's always been this
4386
// (coordinator_yield()) method that was observed to cause the
4387
// problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4388
// which is by default non-zero. For the other seven methods that
4389
// also perform the yield operation, as are using a different
4390
// parameter (CMSYieldSleepCount) which is by default zero. This way we
4391
// can enable the sleeping for those methods too, if necessary.
4392
// See 6442774.
4393
//
4394
// We really need to reconsider the synchronization between the GC
4395
// thread and the yield-requesting threads in the future and we
4396
// should really use wait/notify, which is the recommended
4397
// way of doing this type of interaction. Additionally, we should
4398
// consolidate the eight methods that do the yield operation and they
4399
// are almost identical into one for better maintenability and
4400
// readability. See 6445193.
4401
//
4402
// Tony 2006.06.29
4403
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4404
ConcurrentMarkSweepThread::should_yield() &&
4405
!CMSCollector::foregroundGCIsActive(); ++i) {
4406
os::sleep(Thread::current(), 1, false);
4407
ConcurrentMarkSweepThread::acknowledge_yield_request();
4408
}
4409
4410
ConcurrentMarkSweepThread::synchronize(true);
4411
_bit_map_lock->lock_without_safepoint_check();
4412
_collector->startTimer();
4413
}
4414
4415
bool CMSCollector::do_marking_mt(bool asynch) {
4416
assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4417
int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4418
conc_workers()->total_workers(),
4419
conc_workers()->active_workers(),
4420
Threads::number_of_non_daemon_threads());
4421
conc_workers()->set_active_workers(num_workers);
4422
4423
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4424
4425
CMSConcMarkingTask tsk(this,
4426
cms_space,
4427
asynch,
4428
conc_workers(),
4429
task_queues());
4430
4431
// Since the actual number of workers we get may be different
4432
// from the number we requested above, do we need to do anything different
4433
// below? In particular, may be we need to subclass the SequantialSubTasksDone
4434
// class?? XXX
4435
cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4436
4437
// Refs discovery is already non-atomic.
4438
assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4439
assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4440
conc_workers()->start_task(&tsk);
4441
while (tsk.yielded()) {
4442
tsk.coordinator_yield();
4443
conc_workers()->continue_task(&tsk);
4444
}
4445
// If the task was aborted, _restart_addr will be non-NULL
4446
assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4447
while (_restart_addr != NULL) {
4448
// XXX For now we do not make use of ABORTED state and have not
4449
// yet implemented the right abort semantics (even in the original
4450
// single-threaded CMS case). That needs some more investigation
4451
// and is deferred for now; see CR# TBF. 07252005YSR. XXX
4452
assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4453
// If _restart_addr is non-NULL, a marking stack overflow
4454
// occurred; we need to do a fresh marking iteration from the
4455
// indicated restart address.
4456
if (_foregroundGCIsActive && asynch) {
4457
// We may be running into repeated stack overflows, having
4458
// reached the limit of the stack size, while making very
4459
// slow forward progress. It may be best to bail out and
4460
// let the foreground collector do its job.
4461
// Clear _restart_addr, so that foreground GC
4462
// works from scratch. This avoids the headache of
4463
// a "rescan" which would otherwise be needed because
4464
// of the dirty mod union table & card table.
4465
_restart_addr = NULL;
4466
return false;
4467
}
4468
// Adjust the task to restart from _restart_addr
4469
tsk.reset(_restart_addr);
4470
cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4471
_restart_addr);
4472
_restart_addr = NULL;
4473
// Get the workers going again
4474
conc_workers()->start_task(&tsk);
4475
while (tsk.yielded()) {
4476
tsk.coordinator_yield();
4477
conc_workers()->continue_task(&tsk);
4478
}
4479
}
4480
assert(tsk.completed(), "Inconsistency");
4481
assert(tsk.result() == true, "Inconsistency");
4482
return true;
4483
}
4484
4485
bool CMSCollector::do_marking_st(bool asynch) {
4486
ResourceMark rm;
4487
HandleMark hm;
4488
4489
// Temporarily make refs discovery single threaded (non-MT)
4490
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4491
MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4492
&_markStack, CMSYield && asynch);
4493
// the last argument to iterate indicates whether the iteration
4494
// should be incremental with periodic yields.
4495
_markBitMap.iterate(&markFromRootsClosure);
4496
// If _restart_addr is non-NULL, a marking stack overflow
4497
// occurred; we need to do a fresh iteration from the
4498
// indicated restart address.
4499
while (_restart_addr != NULL) {
4500
if (_foregroundGCIsActive && asynch) {
4501
// We may be running into repeated stack overflows, having
4502
// reached the limit of the stack size, while making very
4503
// slow forward progress. It may be best to bail out and
4504
// let the foreground collector do its job.
4505
// Clear _restart_addr, so that foreground GC
4506
// works from scratch. This avoids the headache of
4507
// a "rescan" which would otherwise be needed because
4508
// of the dirty mod union table & card table.
4509
_restart_addr = NULL;
4510
return false; // indicating failure to complete marking
4511
}
4512
// Deal with stack overflow:
4513
// we restart marking from _restart_addr
4514
HeapWord* ra = _restart_addr;
4515
markFromRootsClosure.reset(ra);
4516
_restart_addr = NULL;
4517
_markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4518
}
4519
return true;
4520
}
4521
4522
void CMSCollector::preclean() {
4523
check_correct_thread_executing();
4524
assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4525
verify_work_stacks_empty();
4526
verify_overflow_empty();
4527
_abort_preclean = false;
4528
if (CMSPrecleaningEnabled) {
4529
if (!CMSEdenChunksRecordAlways) {
4530
_eden_chunk_index = 0;
4531
}
4532
size_t used = get_eden_used();
4533
size_t capacity = get_eden_capacity();
4534
// Don't start sampling unless we will get sufficiently
4535
// many samples.
4536
if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4537
* CMSScheduleRemarkEdenPenetration)) {
4538
_start_sampling = true;
4539
} else {
4540
_start_sampling = false;
4541
}
4542
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4543
CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4544
preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4545
}
4546
CMSTokenSync x(true); // is cms thread
4547
if (CMSPrecleaningEnabled) {
4548
sample_eden();
4549
_collectorState = AbortablePreclean;
4550
} else {
4551
_collectorState = FinalMarking;
4552
}
4553
verify_work_stacks_empty();
4554
verify_overflow_empty();
4555
}
4556
4557
// Try and schedule the remark such that young gen
4558
// occupancy is CMSScheduleRemarkEdenPenetration %.
4559
void CMSCollector::abortable_preclean() {
4560
check_correct_thread_executing();
4561
assert(CMSPrecleaningEnabled, "Inconsistent control state");
4562
assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4563
4564
// If Eden's current occupancy is below this threshold,
4565
// immediately schedule the remark; else preclean
4566
// past the next scavenge in an effort to
4567
// schedule the pause as described avove. By choosing
4568
// CMSScheduleRemarkEdenSizeThreshold >= max eden size
4569
// we will never do an actual abortable preclean cycle.
4570
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4571
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4572
CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4573
// We need more smarts in the abortable preclean
4574
// loop below to deal with cases where allocation
4575
// in young gen is very very slow, and our precleaning
4576
// is running a losing race against a horde of
4577
// mutators intent on flooding us with CMS updates
4578
// (dirty cards).
4579
// One, admittedly dumb, strategy is to give up
4580
// after a certain number of abortable precleaning loops
4581
// or after a certain maximum time. We want to make
4582
// this smarter in the next iteration.
4583
// XXX FIX ME!!! YSR
4584
size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4585
while (!(should_abort_preclean() ||
4586
ConcurrentMarkSweepThread::should_terminate())) {
4587
workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4588
cumworkdone += workdone;
4589
loops++;
4590
// Voluntarily terminate abortable preclean phase if we have
4591
// been at it for too long.
4592
if ((CMSMaxAbortablePrecleanLoops != 0) &&
4593
loops >= CMSMaxAbortablePrecleanLoops) {
4594
if (PrintGCDetails) {
4595
gclog_or_tty->print(" CMS: abort preclean due to loops ");
4596
}
4597
break;
4598
}
4599
if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4600
if (PrintGCDetails) {
4601
gclog_or_tty->print(" CMS: abort preclean due to time ");
4602
}
4603
break;
4604
}
4605
// If we are doing little work each iteration, we should
4606
// take a short break.
4607
if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4608
// Sleep for some time, waiting for work to accumulate
4609
stopTimer();
4610
cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4611
startTimer();
4612
waited++;
4613
}
4614
}
4615
if (PrintCMSStatistics > 0) {
4616
gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4617
loops, waited, cumworkdone);
4618
}
4619
}
4620
CMSTokenSync x(true); // is cms thread
4621
if (_collectorState != Idling) {
4622
assert(_collectorState == AbortablePreclean,
4623
"Spontaneous state transition?");
4624
_collectorState = FinalMarking;
4625
} // Else, a foreground collection completed this CMS cycle.
4626
return;
4627
}
4628
4629
// Respond to an Eden sampling opportunity
4630
void CMSCollector::sample_eden() {
4631
// Make sure a young gc cannot sneak in between our
4632
// reading and recording of a sample.
4633
assert(Thread::current()->is_ConcurrentGC_thread(),
4634
"Only the cms thread may collect Eden samples");
4635
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4636
"Should collect samples while holding CMS token");
4637
if (!_start_sampling) {
4638
return;
4639
}
4640
// When CMSEdenChunksRecordAlways is true, the eden chunk array
4641
// is populated by the young generation.
4642
if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4643
if (_eden_chunk_index < _eden_chunk_capacity) {
4644
_eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4645
assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4646
"Unexpected state of Eden");
4647
// We'd like to check that what we just sampled is an oop-start address;
4648
// however, we cannot do that here since the object may not yet have been
4649
// initialized. So we'll instead do the check when we _use_ this sample
4650
// later.
4651
if (_eden_chunk_index == 0 ||
4652
(pointer_delta(_eden_chunk_array[_eden_chunk_index],
4653
_eden_chunk_array[_eden_chunk_index-1])
4654
>= CMSSamplingGrain)) {
4655
_eden_chunk_index++; // commit sample
4656
}
4657
}
4658
}
4659
if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4660
size_t used = get_eden_used();
4661
size_t capacity = get_eden_capacity();
4662
assert(used <= capacity, "Unexpected state of Eden");
4663
if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4664
_abort_preclean = true;
4665
}
4666
}
4667
}
4668
4669
4670
size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4671
assert(_collectorState == Precleaning ||
4672
_collectorState == AbortablePreclean, "incorrect state");
4673
ResourceMark rm;
4674
HandleMark hm;
4675
4676
// Precleaning is currently not MT but the reference processor
4677
// may be set for MT. Disable it temporarily here.
4678
ReferenceProcessor* rp = ref_processor();
4679
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4680
4681
// Do one pass of scrubbing the discovered reference lists
4682
// to remove any reference objects with strongly-reachable
4683
// referents.
4684
if (clean_refs) {
4685
CMSPrecleanRefsYieldClosure yield_cl(this);
4686
assert(rp->span().equals(_span), "Spans should be equal");
4687
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4688
&_markStack, true /* preclean */);
4689
CMSDrainMarkingStackClosure complete_trace(this,
4690
_span, &_markBitMap, &_markStack,
4691
&keep_alive, true /* preclean */);
4692
4693
// We don't want this step to interfere with a young
4694
// collection because we don't want to take CPU
4695
// or memory bandwidth away from the young GC threads
4696
// (which may be as many as there are CPUs).
4697
// Note that we don't need to protect ourselves from
4698
// interference with mutators because they can't
4699
// manipulate the discovered reference lists nor affect
4700
// the computed reachability of the referents, the
4701
// only properties manipulated by the precleaning
4702
// of these reference lists.
4703
stopTimer();
4704
CMSTokenSyncWithLocks x(true /* is cms thread */,
4705
bitMapLock());
4706
startTimer();
4707
sample_eden();
4708
4709
// The following will yield to allow foreground
4710
// collection to proceed promptly. XXX YSR:
4711
// The code in this method may need further
4712
// tweaking for better performance and some restructuring
4713
// for cleaner interfaces.
4714
GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4715
rp->preclean_discovered_references(
4716
rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4717
gc_timer, _gc_tracer_cm->gc_id());
4718
}
4719
4720
if (clean_survivor) { // preclean the active survivor space(s)
4721
assert(_young_gen->kind() == Generation::DefNew ||
4722
_young_gen->kind() == Generation::ParNew ||
4723
_young_gen->kind() == Generation::ASParNew,
4724
"incorrect type for cast");
4725
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4726
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4727
&_markBitMap, &_modUnionTable,
4728
&_markStack, true /* precleaning phase */);
4729
stopTimer();
4730
CMSTokenSyncWithLocks ts(true /* is cms thread */,
4731
bitMapLock());
4732
startTimer();
4733
unsigned int before_count =
4734
GenCollectedHeap::heap()->total_collections();
4735
SurvivorSpacePrecleanClosure
4736
sss_cl(this, _span, &_markBitMap, &_markStack,
4737
&pam_cl, before_count, CMSYield);
4738
dng->from()->object_iterate_careful(&sss_cl);
4739
dng->to()->object_iterate_careful(&sss_cl);
4740
}
4741
MarkRefsIntoAndScanClosure
4742
mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4743
&_markStack, this, CMSYield,
4744
true /* precleaning phase */);
4745
// CAUTION: The following closure has persistent state that may need to
4746
// be reset upon a decrease in the sequence of addresses it
4747
// processes.
4748
ScanMarkedObjectsAgainCarefullyClosure
4749
smoac_cl(this, _span,
4750
&_markBitMap, &_markStack, &mrias_cl, CMSYield);
4751
4752
// Preclean dirty cards in ModUnionTable and CardTable using
4753
// appropriate convergence criterion;
4754
// repeat CMSPrecleanIter times unless we find that
4755
// we are losing.
4756
assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4757
assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4758
"Bad convergence multiplier");
4759
assert(CMSPrecleanThreshold >= 100,
4760
"Unreasonably low CMSPrecleanThreshold");
4761
4762
size_t numIter, cumNumCards, lastNumCards, curNumCards;
4763
for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4764
numIter < CMSPrecleanIter;
4765
numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4766
curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4767
if (Verbose && PrintGCDetails) {
4768
gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4769
}
4770
// Either there are very few dirty cards, so re-mark
4771
// pause will be small anyway, or our pre-cleaning isn't
4772
// that much faster than the rate at which cards are being
4773
// dirtied, so we might as well stop and re-mark since
4774
// precleaning won't improve our re-mark time by much.
4775
if (curNumCards <= CMSPrecleanThreshold ||
4776
(numIter > 0 &&
4777
(curNumCards * CMSPrecleanDenominator >
4778
lastNumCards * CMSPrecleanNumerator))) {
4779
numIter++;
4780
cumNumCards += curNumCards;
4781
break;
4782
}
4783
}
4784
4785
preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4786
4787
curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4788
cumNumCards += curNumCards;
4789
if (PrintGCDetails && PrintCMSStatistics != 0) {
4790
gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4791
curNumCards, cumNumCards, numIter);
4792
}
4793
return cumNumCards; // as a measure of useful work done
4794
}
4795
4796
// PRECLEANING NOTES:
4797
// Precleaning involves:
4798
// . reading the bits of the modUnionTable and clearing the set bits.
4799
// . For the cards corresponding to the set bits, we scan the
4800
// objects on those cards. This means we need the free_list_lock
4801
// so that we can safely iterate over the CMS space when scanning
4802
// for oops.
4803
// . When we scan the objects, we'll be both reading and setting
4804
// marks in the marking bit map, so we'll need the marking bit map.
4805
// . For protecting _collector_state transitions, we take the CGC_lock.
4806
// Note that any races in the reading of of card table entries by the
4807
// CMS thread on the one hand and the clearing of those entries by the
4808
// VM thread or the setting of those entries by the mutator threads on the
4809
// other are quite benign. However, for efficiency it makes sense to keep
4810
// the VM thread from racing with the CMS thread while the latter is
4811
// dirty card info to the modUnionTable. We therefore also use the
4812
// CGC_lock to protect the reading of the card table and the mod union
4813
// table by the CM thread.
4814
// . We run concurrently with mutator updates, so scanning
4815
// needs to be done carefully -- we should not try to scan
4816
// potentially uninitialized objects.
4817
//
4818
// Locking strategy: While holding the CGC_lock, we scan over and
4819
// reset a maximal dirty range of the mod union / card tables, then lock
4820
// the free_list_lock and bitmap lock to do a full marking, then
4821
// release these locks; and repeat the cycle. This allows for a
4822
// certain amount of fairness in the sharing of these locks between
4823
// the CMS collector on the one hand, and the VM thread and the
4824
// mutators on the other.
4825
4826
// NOTE: preclean_mod_union_table() and preclean_card_table()
4827
// further below are largely identical; if you need to modify
4828
// one of these methods, please check the other method too.
4829
4830
size_t CMSCollector::preclean_mod_union_table(
4831
ConcurrentMarkSweepGeneration* gen,
4832
ScanMarkedObjectsAgainCarefullyClosure* cl) {
4833
verify_work_stacks_empty();
4834
verify_overflow_empty();
4835
4836
// strategy: starting with the first card, accumulate contiguous
4837
// ranges of dirty cards; clear these cards, then scan the region
4838
// covered by these cards.
4839
4840
// Since all of the MUT is committed ahead, we can just use
4841
// that, in case the generations expand while we are precleaning.
4842
// It might also be fine to just use the committed part of the
4843
// generation, but we might potentially miss cards when the
4844
// generation is rapidly expanding while we are in the midst
4845
// of precleaning.
4846
HeapWord* startAddr = gen->reserved().start();
4847
HeapWord* endAddr = gen->reserved().end();
4848
4849
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4850
4851
size_t numDirtyCards, cumNumDirtyCards;
4852
HeapWord *nextAddr, *lastAddr;
4853
for (cumNumDirtyCards = numDirtyCards = 0,
4854
nextAddr = lastAddr = startAddr;
4855
nextAddr < endAddr;
4856
nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4857
4858
ResourceMark rm;
4859
HandleMark hm;
4860
4861
MemRegion dirtyRegion;
4862
{
4863
stopTimer();
4864
// Potential yield point
4865
CMSTokenSync ts(true);
4866
startTimer();
4867
sample_eden();
4868
// Get dirty region starting at nextOffset (inclusive),
4869
// simultaneously clearing it.
4870
dirtyRegion =
4871
_modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4872
assert(dirtyRegion.start() >= nextAddr,
4873
"returned region inconsistent?");
4874
}
4875
// Remember where the next search should begin.
4876
// The returned region (if non-empty) is a right open interval,
4877
// so lastOffset is obtained from the right end of that
4878
// interval.
4879
lastAddr = dirtyRegion.end();
4880
// Should do something more transparent and less hacky XXX
4881
numDirtyCards =
4882
_modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4883
4884
// We'll scan the cards in the dirty region (with periodic
4885
// yields for foreground GC as needed).
4886
if (!dirtyRegion.is_empty()) {
4887
assert(numDirtyCards > 0, "consistency check");
4888
HeapWord* stop_point = NULL;
4889
stopTimer();
4890
// Potential yield point
4891
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4892
bitMapLock());
4893
startTimer();
4894
{
4895
verify_work_stacks_empty();
4896
verify_overflow_empty();
4897
sample_eden();
4898
stop_point =
4899
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4900
}
4901
if (stop_point != NULL) {
4902
// The careful iteration stopped early either because it found an
4903
// uninitialized object, or because we were in the midst of an
4904
// "abortable preclean", which should now be aborted. Redirty
4905
// the bits corresponding to the partially-scanned or unscanned
4906
// cards. We'll either restart at the next block boundary or
4907
// abort the preclean.
4908
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4909
"Should only be AbortablePreclean.");
4910
_modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4911
if (should_abort_preclean()) {
4912
break; // out of preclean loop
4913
} else {
4914
// Compute the next address at which preclean should pick up;
4915
// might need bitMapLock in order to read P-bits.
4916
lastAddr = next_card_start_after_block(stop_point);
4917
}
4918
}
4919
} else {
4920
assert(lastAddr == endAddr, "consistency check");
4921
assert(numDirtyCards == 0, "consistency check");
4922
break;
4923
}
4924
}
4925
verify_work_stacks_empty();
4926
verify_overflow_empty();
4927
return cumNumDirtyCards;
4928
}
4929
4930
// NOTE: preclean_mod_union_table() above and preclean_card_table()
4931
// below are largely identical; if you need to modify
4932
// one of these methods, please check the other method too.
4933
4934
size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4935
ScanMarkedObjectsAgainCarefullyClosure* cl) {
4936
// strategy: it's similar to precleamModUnionTable above, in that
4937
// we accumulate contiguous ranges of dirty cards, mark these cards
4938
// precleaned, then scan the region covered by these cards.
4939
HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4940
HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4941
4942
cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4943
4944
size_t numDirtyCards, cumNumDirtyCards;
4945
HeapWord *lastAddr, *nextAddr;
4946
4947
for (cumNumDirtyCards = numDirtyCards = 0,
4948
nextAddr = lastAddr = startAddr;
4949
nextAddr < endAddr;
4950
nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4951
4952
ResourceMark rm;
4953
HandleMark hm;
4954
4955
MemRegion dirtyRegion;
4956
{
4957
// See comments in "Precleaning notes" above on why we
4958
// do this locking. XXX Could the locking overheads be
4959
// too high when dirty cards are sparse? [I don't think so.]
4960
stopTimer();
4961
CMSTokenSync x(true); // is cms thread
4962
startTimer();
4963
sample_eden();
4964
// Get and clear dirty region from card table
4965
dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4966
MemRegion(nextAddr, endAddr),
4967
true,
4968
CardTableModRefBS::precleaned_card_val());
4969
4970
assert(dirtyRegion.start() >= nextAddr,
4971
"returned region inconsistent?");
4972
}
4973
lastAddr = dirtyRegion.end();
4974
numDirtyCards =
4975
dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4976
4977
if (!dirtyRegion.is_empty()) {
4978
stopTimer();
4979
CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4980
startTimer();
4981
sample_eden();
4982
verify_work_stacks_empty();
4983
verify_overflow_empty();
4984
HeapWord* stop_point =
4985
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4986
if (stop_point != NULL) {
4987
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4988
"Should only be AbortablePreclean.");
4989
_ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4990
if (should_abort_preclean()) {
4991
break; // out of preclean loop
4992
} else {
4993
// Compute the next address at which preclean should pick up.
4994
lastAddr = next_card_start_after_block(stop_point);
4995
}
4996
}
4997
} else {
4998
break;
4999
}
5000
}
5001
verify_work_stacks_empty();
5002
verify_overflow_empty();
5003
return cumNumDirtyCards;
5004
}
5005
5006
class PrecleanKlassClosure : public KlassClosure {
5007
KlassToOopClosure _cm_klass_closure;
5008
public:
5009
PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5010
void do_klass(Klass* k) {
5011
if (k->has_accumulated_modified_oops()) {
5012
k->clear_accumulated_modified_oops();
5013
5014
_cm_klass_closure.do_klass(k);
5015
}
5016
}
5017
};
5018
5019
// The freelist lock is needed to prevent asserts, is it really needed?
5020
void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5021
5022
cl->set_freelistLock(freelistLock);
5023
5024
CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5025
5026
// SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5027
// SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5028
PrecleanKlassClosure preclean_klass_closure(cl);
5029
ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5030
5031
verify_work_stacks_empty();
5032
verify_overflow_empty();
5033
}
5034
5035
void CMSCollector::checkpointRootsFinal(bool asynch,
5036
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5037
assert(_collectorState == FinalMarking, "incorrect state transition?");
5038
check_correct_thread_executing();
5039
// world is stopped at this checkpoint
5040
assert(SafepointSynchronize::is_at_safepoint(),
5041
"world should be stopped");
5042
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5043
5044
verify_work_stacks_empty();
5045
verify_overflow_empty();
5046
5047
SpecializationStats::clear();
5048
if (PrintGCDetails) {
5049
gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
5050
_young_gen->used() / K,
5051
_young_gen->capacity() / K);
5052
}
5053
if (asynch) {
5054
if (CMSScavengeBeforeRemark) {
5055
GenCollectedHeap* gch = GenCollectedHeap::heap();
5056
// Temporarily set flag to false, GCH->do_collection will
5057
// expect it to be false and set to true
5058
FlagSetting fl(gch->_is_gc_active, false);
5059
NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5060
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5061
int level = _cmsGen->level() - 1;
5062
if (level >= 0) {
5063
gch->do_collection(true, // full (i.e. force, see below)
5064
false, // !clear_all_soft_refs
5065
0, // size
5066
false, // is_tlab
5067
level // max_level
5068
);
5069
}
5070
}
5071
FreelistLocker x(this);
5072
MutexLockerEx y(bitMapLock(),
5073
Mutex::_no_safepoint_check_flag);
5074
assert(!init_mark_was_synchronous, "but that's impossible!");
5075
checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5076
_cmsGen->cmsSpace()->recalculate_used_stable();
5077
} else {
5078
// already have all the locks
5079
checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5080
init_mark_was_synchronous);
5081
_cmsGen->cmsSpace()->recalculate_used_stable();
5082
}
5083
verify_work_stacks_empty();
5084
verify_overflow_empty();
5085
SpecializationStats::print();
5086
}
5087
5088
void CMSCollector::checkpointRootsFinalWork(bool asynch,
5089
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5090
5091
NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5092
5093
assert(haveFreelistLocks(), "must have free list locks");
5094
assert_lock_strong(bitMapLock());
5095
5096
if (UseAdaptiveSizePolicy) {
5097
size_policy()->checkpoint_roots_final_begin();
5098
}
5099
5100
ResourceMark rm;
5101
HandleMark hm;
5102
5103
GenCollectedHeap* gch = GenCollectedHeap::heap();
5104
5105
if (should_unload_classes()) {
5106
CodeCache::gc_prologue();
5107
}
5108
assert(haveFreelistLocks(), "must have free list locks");
5109
assert_lock_strong(bitMapLock());
5110
5111
if (!init_mark_was_synchronous) {
5112
// We might assume that we need not fill TLAB's when
5113
// CMSScavengeBeforeRemark is set, because we may have just done
5114
// a scavenge which would have filled all TLAB's -- and besides
5115
// Eden would be empty. This however may not always be the case --
5116
// for instance although we asked for a scavenge, it may not have
5117
// happened because of a JNI critical section. We probably need
5118
// a policy for deciding whether we can in that case wait until
5119
// the critical section releases and then do the remark following
5120
// the scavenge, and skip it here. In the absence of that policy,
5121
// or of an indication of whether the scavenge did indeed occur,
5122
// we cannot rely on TLAB's having been filled and must do
5123
// so here just in case a scavenge did not happen.
5124
gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
5125
// Update the saved marks which may affect the root scans.
5126
gch->save_marks();
5127
5128
if (CMSPrintEdenSurvivorChunks) {
5129
print_eden_and_survivor_chunk_arrays();
5130
}
5131
5132
{
5133
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5134
5135
// Note on the role of the mod union table:
5136
// Since the marker in "markFromRoots" marks concurrently with
5137
// mutators, it is possible for some reachable objects not to have been
5138
// scanned. For instance, an only reference to an object A was
5139
// placed in object B after the marker scanned B. Unless B is rescanned,
5140
// A would be collected. Such updates to references in marked objects
5141
// are detected via the mod union table which is the set of all cards
5142
// dirtied since the first checkpoint in this GC cycle and prior to
5143
// the most recent young generation GC, minus those cleaned up by the
5144
// concurrent precleaning.
5145
if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5146
GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5147
do_remark_parallel();
5148
} else {
5149
GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5150
_gc_timer_cm, _gc_tracer_cm->gc_id());
5151
do_remark_non_parallel();
5152
}
5153
}
5154
} else {
5155
assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5156
// The initial mark was stop-world, so there's no rescanning to
5157
// do; go straight on to the next step below.
5158
}
5159
verify_work_stacks_empty();
5160
verify_overflow_empty();
5161
5162
{
5163
NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5164
refProcessingWork(asynch, clear_all_soft_refs);
5165
}
5166
verify_work_stacks_empty();
5167
verify_overflow_empty();
5168
5169
if (should_unload_classes()) {
5170
CodeCache::gc_epilogue();
5171
}
5172
JvmtiExport::gc_epilogue();
5173
5174
// If we encountered any (marking stack / work queue) overflow
5175
// events during the current CMS cycle, take appropriate
5176
// remedial measures, where possible, so as to try and avoid
5177
// recurrence of that condition.
5178
assert(_markStack.isEmpty(), "No grey objects");
5179
size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5180
_ser_kac_ovflw + _ser_kac_preclean_ovflw;
5181
if (ser_ovflw > 0) {
5182
if (PrintCMSStatistics != 0) {
5183
gclog_or_tty->print_cr("Marking stack overflow (benign) "
5184
"(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT
5185
", kac_preclean=" SIZE_FORMAT ")",
5186
_ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5187
_ser_kac_ovflw, _ser_kac_preclean_ovflw);
5188
}
5189
_markStack.expand();
5190
_ser_pmc_remark_ovflw = 0;
5191
_ser_pmc_preclean_ovflw = 0;
5192
_ser_kac_preclean_ovflw = 0;
5193
_ser_kac_ovflw = 0;
5194
}
5195
if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5196
if (PrintCMSStatistics != 0) {
5197
gclog_or_tty->print_cr("Work queue overflow (benign) "
5198
"(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
5199
_par_pmc_remark_ovflw, _par_kac_ovflw);
5200
}
5201
_par_pmc_remark_ovflw = 0;
5202
_par_kac_ovflw = 0;
5203
}
5204
if (PrintCMSStatistics != 0) {
5205
if (_markStack._hit_limit > 0) {
5206
gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
5207
_markStack._hit_limit);
5208
}
5209
if (_markStack._failed_double > 0) {
5210
gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT "),"
5211
" current capacity " SIZE_FORMAT,
5212
_markStack._failed_double,
5213
_markStack.capacity());
5214
}
5215
}
5216
_markStack._hit_limit = 0;
5217
_markStack._failed_double = 0;
5218
5219
if ((VerifyAfterGC || VerifyDuringGC) &&
5220
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5221
verify_after_remark();
5222
}
5223
5224
_gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5225
5226
// Change under the freelistLocks.
5227
_collectorState = Sweeping;
5228
// Call isAllClear() under bitMapLock
5229
assert(_modUnionTable.isAllClear(),
5230
"Should be clear by end of the final marking");
5231
assert(_ct->klass_rem_set()->mod_union_is_clear(),
5232
"Should be clear by end of the final marking");
5233
if (UseAdaptiveSizePolicy) {
5234
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5235
}
5236
}
5237
5238
void CMSParInitialMarkTask::work(uint worker_id) {
5239
elapsedTimer _timer;
5240
ResourceMark rm;
5241
HandleMark hm;
5242
5243
// ---------- scan from roots --------------
5244
_timer.start();
5245
GenCollectedHeap* gch = GenCollectedHeap::heap();
5246
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5247
5248
// ---------- young gen roots --------------
5249
{
5250
work_on_young_gen_roots(worker_id, &par_mri_cl);
5251
_timer.stop();
5252
if (PrintCMSStatistics != 0) {
5253
gclog_or_tty->print_cr(
5254
"Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5255
worker_id, _timer.seconds());
5256
}
5257
}
5258
5259
// ---------- remaining roots --------------
5260
_timer.reset();
5261
_timer.start();
5262
5263
CLDToOopClosure cld_closure(&par_mri_cl, true);
5264
5265
gch->gen_process_roots(_collector->_cmsGen->level(),
5266
false, // yg was scanned above
5267
false, // this is parallel code
5268
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5269
_collector->should_unload_classes(),
5270
&par_mri_cl,
5271
NULL,
5272
&cld_closure);
5273
assert(_collector->should_unload_classes()
5274
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5275
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5276
_timer.stop();
5277
if (PrintCMSStatistics != 0) {
5278
gclog_or_tty->print_cr(
5279
"Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5280
worker_id, _timer.seconds());
5281
}
5282
}
5283
5284
// Parallel remark task
5285
class CMSParRemarkTask: public CMSParMarkTask {
5286
CompactibleFreeListSpace* _cms_space;
5287
5288
// The per-thread work queues, available here for stealing.
5289
OopTaskQueueSet* _task_queues;
5290
ParallelTaskTerminator _term;
5291
5292
public:
5293
// A value of 0 passed to n_workers will cause the number of
5294
// workers to be taken from the active workers in the work gang.
5295
CMSParRemarkTask(CMSCollector* collector,
5296
CompactibleFreeListSpace* cms_space,
5297
int n_workers, FlexibleWorkGang* workers,
5298
OopTaskQueueSet* task_queues):
5299
CMSParMarkTask("Rescan roots and grey objects in parallel",
5300
collector, n_workers),
5301
_cms_space(cms_space),
5302
_task_queues(task_queues),
5303
_term(n_workers, task_queues) { }
5304
5305
OopTaskQueueSet* task_queues() { return _task_queues; }
5306
5307
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5308
5309
ParallelTaskTerminator* terminator() { return &_term; }
5310
int n_workers() { return _n_workers; }
5311
5312
void work(uint worker_id);
5313
5314
private:
5315
// ... of dirty cards in old space
5316
void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5317
Par_MarkRefsIntoAndScanClosure* cl);
5318
5319
// ... work stealing for the above
5320
void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5321
};
5322
5323
class RemarkKlassClosure : public KlassClosure {
5324
KlassToOopClosure _cm_klass_closure;
5325
public:
5326
RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5327
void do_klass(Klass* k) {
5328
// Check if we have modified any oops in the Klass during the concurrent marking.
5329
if (k->has_accumulated_modified_oops()) {
5330
k->clear_accumulated_modified_oops();
5331
5332
// We could have transfered the current modified marks to the accumulated marks,
5333
// like we do with the Card Table to Mod Union Table. But it's not really necessary.
5334
} else if (k->has_modified_oops()) {
5335
// Don't clear anything, this info is needed by the next young collection.
5336
} else {
5337
// No modified oops in the Klass.
5338
return;
5339
}
5340
5341
// The klass has modified fields, need to scan the klass.
5342
_cm_klass_closure.do_klass(k);
5343
}
5344
};
5345
5346
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5347
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5348
EdenSpace* eden_space = dng->eden();
5349
ContiguousSpace* from_space = dng->from();
5350
ContiguousSpace* to_space = dng->to();
5351
5352
HeapWord** eca = _collector->_eden_chunk_array;
5353
size_t ect = _collector->_eden_chunk_index;
5354
HeapWord** sca = _collector->_survivor_chunk_array;
5355
size_t sct = _collector->_survivor_chunk_index;
5356
5357
assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5358
assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5359
5360
do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5361
do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5362
do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5363
}
5364
5365
// work_queue(i) is passed to the closure
5366
// Par_MarkRefsIntoAndScanClosure. The "i" parameter
5367
// also is passed to do_dirty_card_rescan_tasks() and to
5368
// do_work_steal() to select the i-th task_queue.
5369
5370
void CMSParRemarkTask::work(uint worker_id) {
5371
elapsedTimer _timer;
5372
ResourceMark rm;
5373
HandleMark hm;
5374
5375
// ---------- rescan from roots --------------
5376
_timer.start();
5377
GenCollectedHeap* gch = GenCollectedHeap::heap();
5378
Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5379
_collector->_span, _collector->ref_processor(),
5380
&(_collector->_markBitMap),
5381
work_queue(worker_id));
5382
5383
// Rescan young gen roots first since these are likely
5384
// coarsely partitioned and may, on that account, constitute
5385
// the critical path; thus, it's best to start off that
5386
// work first.
5387
// ---------- young gen roots --------------
5388
{
5389
work_on_young_gen_roots(worker_id, &par_mrias_cl);
5390
_timer.stop();
5391
if (PrintCMSStatistics != 0) {
5392
gclog_or_tty->print_cr(
5393
"Finished young gen rescan work in %dth thread: %3.3f sec",
5394
worker_id, _timer.seconds());
5395
}
5396
}
5397
5398
// ---------- remaining roots --------------
5399
_timer.reset();
5400
_timer.start();
5401
gch->gen_process_roots(_collector->_cmsGen->level(),
5402
false, // yg was scanned above
5403
false, // this is parallel code
5404
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5405
_collector->should_unload_classes(),
5406
&par_mrias_cl,
5407
NULL,
5408
NULL); // The dirty klasses will be handled below
5409
5410
assert(_collector->should_unload_classes()
5411
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5412
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5413
_timer.stop();
5414
if (PrintCMSStatistics != 0) {
5415
gclog_or_tty->print_cr(
5416
"Finished remaining root rescan work in %dth thread: %3.3f sec",
5417
worker_id, _timer.seconds());
5418
}
5419
5420
// ---------- unhandled CLD scanning ----------
5421
if (worker_id == 0) { // Single threaded at the moment.
5422
_timer.reset();
5423
_timer.start();
5424
5425
// Scan all new class loader data objects and new dependencies that were
5426
// introduced during concurrent marking.
5427
ResourceMark rm;
5428
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5429
for (int i = 0; i < array->length(); i++) {
5430
par_mrias_cl.do_class_loader_data(array->at(i));
5431
}
5432
5433
// We don't need to keep track of new CLDs anymore.
5434
ClassLoaderDataGraph::remember_new_clds(false);
5435
5436
_timer.stop();
5437
if (PrintCMSStatistics != 0) {
5438
gclog_or_tty->print_cr(
5439
"Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5440
worker_id, _timer.seconds());
5441
}
5442
}
5443
5444
// ---------- dirty klass scanning ----------
5445
if (worker_id == 0) { // Single threaded at the moment.
5446
_timer.reset();
5447
_timer.start();
5448
5449
// Scan all classes that was dirtied during the concurrent marking phase.
5450
RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5451
ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5452
5453
_timer.stop();
5454
if (PrintCMSStatistics != 0) {
5455
gclog_or_tty->print_cr(
5456
"Finished dirty klass scanning work in %dth thread: %3.3f sec",
5457
worker_id, _timer.seconds());
5458
}
5459
}
5460
5461
// We might have added oops to ClassLoaderData::_handles during the
5462
// concurrent marking phase. These oops point to newly allocated objects
5463
// that are guaranteed to be kept alive. Either by the direct allocation
5464
// code, or when the young collector processes the roots. Hence,
5465
// we don't have to revisit the _handles block during the remark phase.
5466
5467
// ---------- rescan dirty cards ------------
5468
_timer.reset();
5469
_timer.start();
5470
5471
// Do the rescan tasks for each of the two spaces
5472
// (cms_space) in turn.
5473
// "worker_id" is passed to select the task_queue for "worker_id"
5474
do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5475
_timer.stop();
5476
if (PrintCMSStatistics != 0) {
5477
gclog_or_tty->print_cr(
5478
"Finished dirty card rescan work in %dth thread: %3.3f sec",
5479
worker_id, _timer.seconds());
5480
}
5481
5482
// ---------- steal work from other threads ...
5483
// ---------- ... and drain overflow list.
5484
_timer.reset();
5485
_timer.start();
5486
do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5487
_timer.stop();
5488
if (PrintCMSStatistics != 0) {
5489
gclog_or_tty->print_cr(
5490
"Finished work stealing in %dth thread: %3.3f sec",
5491
worker_id, _timer.seconds());
5492
}
5493
}
5494
5495
// Note that parameter "i" is not used.
5496
void
5497
CMSParMarkTask::do_young_space_rescan(uint worker_id,
5498
OopsInGenClosure* cl, ContiguousSpace* space,
5499
HeapWord** chunk_array, size_t chunk_top) {
5500
// Until all tasks completed:
5501
// . claim an unclaimed task
5502
// . compute region boundaries corresponding to task claimed
5503
// using chunk_array
5504
// . par_oop_iterate(cl) over that region
5505
5506
ResourceMark rm;
5507
HandleMark hm;
5508
5509
SequentialSubTasksDone* pst = space->par_seq_tasks();
5510
5511
uint nth_task = 0;
5512
uint n_tasks = pst->n_tasks();
5513
5514
if (n_tasks > 0) {
5515
assert(pst->valid(), "Uninitialized use?");
5516
HeapWord *start, *end;
5517
while (!pst->is_task_claimed(/* reference */ nth_task)) {
5518
// We claimed task # nth_task; compute its boundaries.
5519
if (chunk_top == 0) { // no samples were taken
5520
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5521
start = space->bottom();
5522
end = space->top();
5523
} else if (nth_task == 0) {
5524
start = space->bottom();
5525
end = chunk_array[nth_task];
5526
} else if (nth_task < (uint)chunk_top) {
5527
assert(nth_task >= 1, "Control point invariant");
5528
start = chunk_array[nth_task - 1];
5529
end = chunk_array[nth_task];
5530
} else {
5531
assert(nth_task == (uint)chunk_top, "Control point invariant");
5532
start = chunk_array[chunk_top - 1];
5533
end = space->top();
5534
}
5535
MemRegion mr(start, end);
5536
// Verify that mr is in space
5537
assert(mr.is_empty() || space->used_region().contains(mr),
5538
"Should be in space");
5539
// Verify that "start" is an object boundary
5540
assert(mr.is_empty() || oop(mr.start())->is_oop(),
5541
"Should be an oop");
5542
space->par_oop_iterate(mr, cl);
5543
}
5544
pst->all_tasks_completed();
5545
}
5546
}
5547
5548
void
5549
CMSParRemarkTask::do_dirty_card_rescan_tasks(
5550
CompactibleFreeListSpace* sp, int i,
5551
Par_MarkRefsIntoAndScanClosure* cl) {
5552
// Until all tasks completed:
5553
// . claim an unclaimed task
5554
// . compute region boundaries corresponding to task claimed
5555
// . transfer dirty bits ct->mut for that region
5556
// . apply rescanclosure to dirty mut bits for that region
5557
5558
ResourceMark rm;
5559
HandleMark hm;
5560
5561
OopTaskQueue* work_q = work_queue(i);
5562
ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5563
// CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5564
// CAUTION: This closure has state that persists across calls to
5565
// the work method dirty_range_iterate_clear() in that it has
5566
// imbedded in it a (subtype of) UpwardsObjectClosure. The
5567
// use of that state in the imbedded UpwardsObjectClosure instance
5568
// assumes that the cards are always iterated (even if in parallel
5569
// by several threads) in monotonically increasing order per each
5570
// thread. This is true of the implementation below which picks
5571
// card ranges (chunks) in monotonically increasing order globally
5572
// and, a-fortiori, in monotonically increasing order per thread
5573
// (the latter order being a subsequence of the former).
5574
// If the work code below is ever reorganized into a more chaotic
5575
// work-partitioning form than the current "sequential tasks"
5576
// paradigm, the use of that persistent state will have to be
5577
// revisited and modified appropriately. See also related
5578
// bug 4756801 work on which should examine this code to make
5579
// sure that the changes there do not run counter to the
5580
// assumptions made here and necessary for correctness and
5581
// efficiency. Note also that this code might yield inefficient
5582
// behaviour in the case of very large objects that span one or
5583
// more work chunks. Such objects would potentially be scanned
5584
// several times redundantly. Work on 4756801 should try and
5585
// address that performance anomaly if at all possible. XXX
5586
MemRegion full_span = _collector->_span;
5587
CMSBitMap* bm = &(_collector->_markBitMap); // shared
5588
MarkFromDirtyCardsClosure
5589
greyRescanClosure(_collector, full_span, // entire span of interest
5590
sp, bm, work_q, cl);
5591
5592
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5593
assert(pst->valid(), "Uninitialized use?");
5594
uint nth_task = 0;
5595
const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5596
MemRegion span = sp->used_region();
5597
HeapWord* start_addr = span.start();
5598
HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5599
alignment);
5600
const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5601
assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5602
start_addr, "Check alignment");
5603
assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5604
chunk_size, "Check alignment");
5605
5606
while (!pst->is_task_claimed(/* reference */ nth_task)) {
5607
// Having claimed the nth_task, compute corresponding mem-region,
5608
// which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5609
// The alignment restriction ensures that we do not need any
5610
// synchronization with other gang-workers while setting or
5611
// clearing bits in thus chunk of the MUT.
5612
MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5613
start_addr + (nth_task+1)*chunk_size);
5614
// The last chunk's end might be way beyond end of the
5615
// used region. In that case pull back appropriately.
5616
if (this_span.end() > end_addr) {
5617
this_span.set_end(end_addr);
5618
assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5619
}
5620
// Iterate over the dirty cards covering this chunk, marking them
5621
// precleaned, and setting the corresponding bits in the mod union
5622
// table. Since we have been careful to partition at Card and MUT-word
5623
// boundaries no synchronization is needed between parallel threads.
5624
_collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5625
&modUnionClosure);
5626
5627
// Having transferred these marks into the modUnionTable,
5628
// rescan the marked objects on the dirty cards in the modUnionTable.
5629
// Even if this is at a synchronous collection, the initial marking
5630
// may have been done during an asynchronous collection so there
5631
// may be dirty bits in the mod-union table.
5632
_collector->_modUnionTable.dirty_range_iterate_clear(
5633
this_span, &greyRescanClosure);
5634
_collector->_modUnionTable.verifyNoOneBitsInRange(
5635
this_span.start(),
5636
this_span.end());
5637
}
5638
pst->all_tasks_completed(); // declare that i am done
5639
}
5640
5641
// . see if we can share work_queues with ParNew? XXX
5642
void
5643
CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5644
int* seed) {
5645
OopTaskQueue* work_q = work_queue(i);
5646
NOT_PRODUCT(int num_steals = 0;)
5647
oop obj_to_scan;
5648
CMSBitMap* bm = &(_collector->_markBitMap);
5649
5650
while (true) {
5651
// Completely finish any left over work from (an) earlier round(s)
5652
cl->trim_queue(0);
5653
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5654
(size_t)ParGCDesiredObjsFromOverflowList);
5655
// Now check if there's any work in the overflow list
5656
// Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5657
// only affects the number of attempts made to get work from the
5658
// overflow list and does not affect the number of workers. Just
5659
// pass ParallelGCThreads so this behavior is unchanged.
5660
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5661
work_q,
5662
ParallelGCThreads)) {
5663
// found something in global overflow list;
5664
// not yet ready to go stealing work from others.
5665
// We'd like to assert(work_q->size() != 0, ...)
5666
// because we just took work from the overflow list,
5667
// but of course we can't since all of that could have
5668
// been already stolen from us.
5669
// "He giveth and He taketh away."
5670
continue;
5671
}
5672
// Verify that we have no work before we resort to stealing
5673
assert(work_q->size() == 0, "Have work, shouldn't steal");
5674
// Try to steal from other queues that have work
5675
if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5676
NOT_PRODUCT(num_steals++;)
5677
assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5678
assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5679
// Do scanning work
5680
obj_to_scan->oop_iterate(cl);
5681
// Loop around, finish this work, and try to steal some more
5682
} else if (terminator()->offer_termination()) {
5683
break; // nirvana from the infinite cycle
5684
}
5685
}
5686
NOT_PRODUCT(
5687
if (PrintCMSStatistics != 0) {
5688
gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5689
}
5690
)
5691
assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5692
"Else our work is not yet done");
5693
}
5694
5695
// Record object boundaries in _eden_chunk_array by sampling the eden
5696
// top in the slow-path eden object allocation code path and record
5697
// the boundaries, if CMSEdenChunksRecordAlways is true. If
5698
// CMSEdenChunksRecordAlways is false, we use the other asynchronous
5699
// sampling in sample_eden() that activates during the part of the
5700
// preclean phase.
5701
void CMSCollector::sample_eden_chunk() {
5702
if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5703
if (_eden_chunk_lock->try_lock()) {
5704
// Record a sample. This is the critical section. The contents
5705
// of the _eden_chunk_array have to be non-decreasing in the
5706
// address order.
5707
_eden_chunk_array[_eden_chunk_index] = *_top_addr;
5708
assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5709
"Unexpected state of Eden");
5710
if (_eden_chunk_index == 0 ||
5711
((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5712
(pointer_delta(_eden_chunk_array[_eden_chunk_index],
5713
_eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5714
_eden_chunk_index++; // commit sample
5715
}
5716
_eden_chunk_lock->unlock();
5717
}
5718
}
5719
}
5720
5721
// Return a thread-local PLAB recording array, as appropriate.
5722
void* CMSCollector::get_data_recorder(int thr_num) {
5723
if (_survivor_plab_array != NULL &&
5724
(CMSPLABRecordAlways ||
5725
(_collectorState > Marking && _collectorState < FinalMarking))) {
5726
assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5727
ChunkArray* ca = &_survivor_plab_array[thr_num];
5728
ca->reset(); // clear it so that fresh data is recorded
5729
return (void*) ca;
5730
} else {
5731
return NULL;
5732
}
5733
}
5734
5735
// Reset all the thread-local PLAB recording arrays
5736
void CMSCollector::reset_survivor_plab_arrays() {
5737
for (uint i = 0; i < ParallelGCThreads; i++) {
5738
_survivor_plab_array[i].reset();
5739
}
5740
}
5741
5742
// Merge the per-thread plab arrays into the global survivor chunk
5743
// array which will provide the partitioning of the survivor space
5744
// for CMS initial scan and rescan.
5745
void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5746
int no_of_gc_threads) {
5747
assert(_survivor_plab_array != NULL, "Error");
5748
assert(_survivor_chunk_array != NULL, "Error");
5749
assert(_collectorState == FinalMarking ||
5750
(CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5751
for (int j = 0; j < no_of_gc_threads; j++) {
5752
_cursor[j] = 0;
5753
}
5754
HeapWord* top = surv->top();
5755
size_t i;
5756
for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5757
HeapWord* min_val = top; // Higher than any PLAB address
5758
uint min_tid = 0; // position of min_val this round
5759
for (int j = 0; j < no_of_gc_threads; j++) {
5760
ChunkArray* cur_sca = &_survivor_plab_array[j];
5761
if (_cursor[j] == cur_sca->end()) {
5762
continue;
5763
}
5764
assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5765
HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5766
assert(surv->used_region().contains(cur_val), "Out of bounds value");
5767
if (cur_val < min_val) {
5768
min_tid = j;
5769
min_val = cur_val;
5770
} else {
5771
assert(cur_val < top, "All recorded addresses should be less");
5772
}
5773
}
5774
// At this point min_val and min_tid are respectively
5775
// the least address in _survivor_plab_array[j]->nth(_cursor[j])
5776
// and the thread (j) that witnesses that address.
5777
// We record this address in the _survivor_chunk_array[i]
5778
// and increment _cursor[min_tid] prior to the next round i.
5779
if (min_val == top) {
5780
break;
5781
}
5782
_survivor_chunk_array[i] = min_val;
5783
_cursor[min_tid]++;
5784
}
5785
// We are all done; record the size of the _survivor_chunk_array
5786
_survivor_chunk_index = i; // exclusive: [0, i)
5787
if (PrintCMSStatistics > 0) {
5788
gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5789
}
5790
// Verify that we used up all the recorded entries
5791
#ifdef ASSERT
5792
size_t total = 0;
5793
for (int j = 0; j < no_of_gc_threads; j++) {
5794
assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5795
total += _cursor[j];
5796
}
5797
assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5798
// Check that the merged array is in sorted order
5799
if (total > 0) {
5800
for (size_t i = 0; i < total - 1; i++) {
5801
if (PrintCMSStatistics > 0) {
5802
gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5803
i, _survivor_chunk_array[i]);
5804
}
5805
assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5806
"Not sorted");
5807
}
5808
}
5809
#endif // ASSERT
5810
}
5811
5812
// Set up the space's par_seq_tasks structure for work claiming
5813
// for parallel initial scan and rescan of young gen.
5814
// See ParRescanTask where this is currently used.
5815
void
5816
CMSCollector::
5817
initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5818
assert(n_threads > 0, "Unexpected n_threads argument");
5819
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5820
5821
// Eden space
5822
if (!dng->eden()->is_empty()) {
5823
SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5824
assert(!pst->valid(), "Clobbering existing data?");
5825
// Each valid entry in [0, _eden_chunk_index) represents a task.
5826
size_t n_tasks = _eden_chunk_index + 1;
5827
assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5828
// Sets the condition for completion of the subtask (how many threads
5829
// need to finish in order to be done).
5830
pst->set_n_threads(n_threads);
5831
pst->set_n_tasks((int)n_tasks);
5832
}
5833
5834
// Merge the survivor plab arrays into _survivor_chunk_array
5835
if (_survivor_plab_array != NULL) {
5836
merge_survivor_plab_arrays(dng->from(), n_threads);
5837
} else {
5838
assert(_survivor_chunk_index == 0, "Error");
5839
}
5840
5841
// To space
5842
{
5843
SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5844
assert(!pst->valid(), "Clobbering existing data?");
5845
// Sets the condition for completion of the subtask (how many threads
5846
// need to finish in order to be done).
5847
pst->set_n_threads(n_threads);
5848
pst->set_n_tasks(1);
5849
assert(pst->valid(), "Error");
5850
}
5851
5852
// From space
5853
{
5854
SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5855
assert(!pst->valid(), "Clobbering existing data?");
5856
size_t n_tasks = _survivor_chunk_index + 1;
5857
assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5858
// Sets the condition for completion of the subtask (how many threads
5859
// need to finish in order to be done).
5860
pst->set_n_threads(n_threads);
5861
pst->set_n_tasks((int)n_tasks);
5862
assert(pst->valid(), "Error");
5863
}
5864
}
5865
5866
// Parallel version of remark
5867
void CMSCollector::do_remark_parallel() {
5868
GenCollectedHeap* gch = GenCollectedHeap::heap();
5869
FlexibleWorkGang* workers = gch->workers();
5870
assert(workers != NULL, "Need parallel worker threads.");
5871
// Choose to use the number of GC workers most recently set
5872
// into "active_workers". If active_workers is not set, set it
5873
// to ParallelGCThreads.
5874
int n_workers = workers->active_workers();
5875
if (n_workers == 0) {
5876
assert(n_workers > 0, "Should have been set during scavenge");
5877
n_workers = ParallelGCThreads;
5878
workers->set_active_workers(n_workers);
5879
}
5880
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5881
5882
CMSParRemarkTask tsk(this,
5883
cms_space,
5884
n_workers, workers, task_queues());
5885
5886
// Set up for parallel process_roots work.
5887
gch->set_par_threads(n_workers);
5888
// We won't be iterating over the cards in the card table updating
5889
// the younger_gen cards, so we shouldn't call the following else
5890
// the verification code as well as subsequent younger_refs_iterate
5891
// code would get confused. XXX
5892
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5893
5894
// The young gen rescan work will not be done as part of
5895
// process_roots (which currently doesn't know how to
5896
// parallelize such a scan), but rather will be broken up into
5897
// a set of parallel tasks (via the sampling that the [abortable]
5898
// preclean phase did of EdenSpace, plus the [two] tasks of
5899
// scanning the [two] survivor spaces. Further fine-grain
5900
// parallelization of the scanning of the survivor spaces
5901
// themselves, and of precleaning of the younger gen itself
5902
// is deferred to the future.
5903
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5904
5905
// The dirty card rescan work is broken up into a "sequence"
5906
// of parallel tasks (per constituent space) that are dynamically
5907
// claimed by the parallel threads.
5908
cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5909
5910
// It turns out that even when we're using 1 thread, doing the work in a
5911
// separate thread causes wide variance in run times. We can't help this
5912
// in the multi-threaded case, but we special-case n=1 here to get
5913
// repeatable measurements of the 1-thread overhead of the parallel code.
5914
if (n_workers > 1) {
5915
// Make refs discovery MT-safe, if it isn't already: it may not
5916
// necessarily be so, since it's possible that we are doing
5917
// ST marking.
5918
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5919
GenCollectedHeap::StrongRootsScope srs(gch);
5920
workers->run_task(&tsk);
5921
} else {
5922
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5923
GenCollectedHeap::StrongRootsScope srs(gch);
5924
tsk.work(0);
5925
}
5926
5927
gch->set_par_threads(0); // 0 ==> non-parallel.
5928
// restore, single-threaded for now, any preserved marks
5929
// as a result of work_q overflow
5930
restore_preserved_marks_if_any();
5931
}
5932
5933
// Non-parallel version of remark
5934
void CMSCollector::do_remark_non_parallel() {
5935
ResourceMark rm;
5936
HandleMark hm;
5937
GenCollectedHeap* gch = GenCollectedHeap::heap();
5938
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5939
5940
MarkRefsIntoAndScanClosure
5941
mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5942
&_markStack, this,
5943
false /* should_yield */, false /* not precleaning */);
5944
MarkFromDirtyCardsClosure
5945
markFromDirtyCardsClosure(this, _span,
5946
NULL, // space is set further below
5947
&_markBitMap, &_markStack, &mrias_cl);
5948
{
5949
GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5950
// Iterate over the dirty cards, setting the corresponding bits in the
5951
// mod union table.
5952
{
5953
ModUnionClosure modUnionClosure(&_modUnionTable);
5954
_ct->ct_bs()->dirty_card_iterate(
5955
_cmsGen->used_region(),
5956
&modUnionClosure);
5957
}
5958
// Having transferred these marks into the modUnionTable, we just need
5959
// to rescan the marked objects on the dirty cards in the modUnionTable.
5960
// The initial marking may have been done during an asynchronous
5961
// collection so there may be dirty bits in the mod-union table.
5962
const int alignment =
5963
CardTableModRefBS::card_size * BitsPerWord;
5964
{
5965
// ... First handle dirty cards in CMS gen
5966
markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5967
MemRegion ur = _cmsGen->used_region();
5968
HeapWord* lb = ur.start();
5969
HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5970
MemRegion cms_span(lb, ub);
5971
_modUnionTable.dirty_range_iterate_clear(cms_span,
5972
&markFromDirtyCardsClosure);
5973
verify_work_stacks_empty();
5974
if (PrintCMSStatistics != 0) {
5975
gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5976
markFromDirtyCardsClosure.num_dirty_cards());
5977
}
5978
}
5979
}
5980
if (VerifyDuringGC &&
5981
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5982
HandleMark hm; // Discard invalid handles created during verification
5983
Universe::verify();
5984
}
5985
{
5986
GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5987
5988
verify_work_stacks_empty();
5989
5990
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5991
GenCollectedHeap::StrongRootsScope srs(gch);
5992
5993
gch->gen_process_roots(_cmsGen->level(),
5994
true, // younger gens as roots
5995
false, // use the local StrongRootsScope
5996
GenCollectedHeap::ScanningOption(roots_scanning_options()),
5997
should_unload_classes(),
5998
&mrias_cl,
5999
NULL,
6000
NULL); // The dirty klasses will be handled below
6001
6002
assert(should_unload_classes()
6003
|| (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
6004
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
6005
}
6006
6007
{
6008
GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6009
6010
verify_work_stacks_empty();
6011
6012
// Scan all class loader data objects that might have been introduced
6013
// during concurrent marking.
6014
ResourceMark rm;
6015
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
6016
for (int i = 0; i < array->length(); i++) {
6017
mrias_cl.do_class_loader_data(array->at(i));
6018
}
6019
6020
// We don't need to keep track of new CLDs anymore.
6021
ClassLoaderDataGraph::remember_new_clds(false);
6022
6023
verify_work_stacks_empty();
6024
}
6025
6026
{
6027
GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6028
6029
verify_work_stacks_empty();
6030
6031
RemarkKlassClosure remark_klass_closure(&mrias_cl);
6032
ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6033
6034
verify_work_stacks_empty();
6035
}
6036
6037
// We might have added oops to ClassLoaderData::_handles during the
6038
// concurrent marking phase. These oops point to newly allocated objects
6039
// that are guaranteed to be kept alive. Either by the direct allocation
6040
// code, or when the young collector processes the roots. Hence,
6041
// we don't have to revisit the _handles block during the remark phase.
6042
6043
verify_work_stacks_empty();
6044
// Restore evacuated mark words, if any, used for overflow list links
6045
if (!CMSOverflowEarlyRestoration) {
6046
restore_preserved_marks_if_any();
6047
}
6048
verify_overflow_empty();
6049
}
6050
6051
////////////////////////////////////////////////////////
6052
// Parallel Reference Processing Task Proxy Class
6053
////////////////////////////////////////////////////////
6054
class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6055
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6056
CMSCollector* _collector;
6057
CMSBitMap* _mark_bit_map;
6058
const MemRegion _span;
6059
ProcessTask& _task;
6060
6061
public:
6062
CMSRefProcTaskProxy(ProcessTask& task,
6063
CMSCollector* collector,
6064
const MemRegion& span,
6065
CMSBitMap* mark_bit_map,
6066
AbstractWorkGang* workers,
6067
OopTaskQueueSet* task_queues):
6068
// XXX Should superclass AGTWOQ also know about AWG since it knows
6069
// about the task_queues used by the AWG? Then it could initialize
6070
// the terminator() object. See 6984287. The set_for_termination()
6071
// below is a temporary band-aid for the regression in 6984287.
6072
AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6073
task_queues),
6074
_task(task),
6075
_collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6076
{
6077
assert(_collector->_span.equals(_span) && !_span.is_empty(),
6078
"Inconsistency in _span");
6079
set_for_termination(workers->active_workers());
6080
}
6081
6082
OopTaskQueueSet* task_queues() { return queues(); }
6083
6084
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6085
6086
void do_work_steal(int i,
6087
CMSParDrainMarkingStackClosure* drain,
6088
CMSParKeepAliveClosure* keep_alive,
6089
int* seed);
6090
6091
virtual void work(uint worker_id);
6092
};
6093
6094
void CMSRefProcTaskProxy::work(uint worker_id) {
6095
ResourceMark rm;
6096
HandleMark hm;
6097
assert(_collector->_span.equals(_span), "Inconsistency in _span");
6098
CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6099
_mark_bit_map,
6100
work_queue(worker_id));
6101
CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6102
_mark_bit_map,
6103
work_queue(worker_id));
6104
CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6105
_task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6106
if (_task.marks_oops_alive()) {
6107
do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6108
_collector->hash_seed(worker_id));
6109
}
6110
assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6111
assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6112
}
6113
6114
class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6115
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6116
EnqueueTask& _task;
6117
6118
public:
6119
CMSRefEnqueueTaskProxy(EnqueueTask& task)
6120
: AbstractGangTask("Enqueue reference objects in parallel"),
6121
_task(task)
6122
{ }
6123
6124
virtual void work(uint worker_id)
6125
{
6126
_task.work(worker_id);
6127
}
6128
};
6129
6130
CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6131
MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6132
_span(span),
6133
_bit_map(bit_map),
6134
_work_queue(work_queue),
6135
_mark_and_push(collector, span, bit_map, work_queue),
6136
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6137
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6138
{ }
6139
6140
// . see if we can share work_queues with ParNew? XXX
6141
void CMSRefProcTaskProxy::do_work_steal(int i,
6142
CMSParDrainMarkingStackClosure* drain,
6143
CMSParKeepAliveClosure* keep_alive,
6144
int* seed) {
6145
OopTaskQueue* work_q = work_queue(i);
6146
NOT_PRODUCT(int num_steals = 0;)
6147
oop obj_to_scan;
6148
6149
while (true) {
6150
// Completely finish any left over work from (an) earlier round(s)
6151
drain->trim_queue(0);
6152
size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6153
(size_t)ParGCDesiredObjsFromOverflowList);
6154
// Now check if there's any work in the overflow list
6155
// Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6156
// only affects the number of attempts made to get work from the
6157
// overflow list and does not affect the number of workers. Just
6158
// pass ParallelGCThreads so this behavior is unchanged.
6159
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6160
work_q,
6161
ParallelGCThreads)) {
6162
// Found something in global overflow list;
6163
// not yet ready to go stealing work from others.
6164
// We'd like to assert(work_q->size() != 0, ...)
6165
// because we just took work from the overflow list,
6166
// but of course we can't, since all of that might have
6167
// been already stolen from us.
6168
continue;
6169
}
6170
// Verify that we have no work before we resort to stealing
6171
assert(work_q->size() == 0, "Have work, shouldn't steal");
6172
// Try to steal from other queues that have work
6173
if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6174
NOT_PRODUCT(num_steals++;)
6175
assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6176
assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6177
// Do scanning work
6178
obj_to_scan->oop_iterate(keep_alive);
6179
// Loop around, finish this work, and try to steal some more
6180
} else if (terminator()->offer_termination()) {
6181
break; // nirvana from the infinite cycle
6182
}
6183
}
6184
NOT_PRODUCT(
6185
if (PrintCMSStatistics != 0) {
6186
gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6187
}
6188
)
6189
}
6190
6191
void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6192
{
6193
GenCollectedHeap* gch = GenCollectedHeap::heap();
6194
FlexibleWorkGang* workers = gch->workers();
6195
assert(workers != NULL, "Need parallel worker threads.");
6196
CMSRefProcTaskProxy rp_task(task, &_collector,
6197
_collector.ref_processor()->span(),
6198
_collector.markBitMap(),
6199
workers, _collector.task_queues());
6200
workers->run_task(&rp_task);
6201
}
6202
6203
void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6204
{
6205
6206
GenCollectedHeap* gch = GenCollectedHeap::heap();
6207
FlexibleWorkGang* workers = gch->workers();
6208
assert(workers != NULL, "Need parallel worker threads.");
6209
CMSRefEnqueueTaskProxy enq_task(task);
6210
workers->run_task(&enq_task);
6211
}
6212
6213
void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6214
6215
ResourceMark rm;
6216
HandleMark hm;
6217
6218
ReferenceProcessor* rp = ref_processor();
6219
assert(rp->span().equals(_span), "Spans should be equal");
6220
assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6221
// Process weak references.
6222
rp->setup_policy(clear_all_soft_refs);
6223
verify_work_stacks_empty();
6224
6225
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6226
&_markStack, false /* !preclean */);
6227
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6228
_span, &_markBitMap, &_markStack,
6229
&cmsKeepAliveClosure, false /* !preclean */);
6230
{
6231
GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6232
6233
ReferenceProcessorStats stats;
6234
if (rp->processing_is_mt()) {
6235
// Set the degree of MT here. If the discovery is done MT, there
6236
// may have been a different number of threads doing the discovery
6237
// and a different number of discovered lists may have Ref objects.
6238
// That is OK as long as the Reference lists are balanced (see
6239
// balance_all_queues() and balance_queues()).
6240
GenCollectedHeap* gch = GenCollectedHeap::heap();
6241
int active_workers = ParallelGCThreads;
6242
FlexibleWorkGang* workers = gch->workers();
6243
if (workers != NULL) {
6244
active_workers = workers->active_workers();
6245
// The expectation is that active_workers will have already
6246
// been set to a reasonable value. If it has not been set,
6247
// investigate.
6248
assert(active_workers > 0, "Should have been set during scavenge");
6249
}
6250
rp->set_active_mt_degree(active_workers);
6251
CMSRefProcTaskExecutor task_executor(*this);
6252
stats = rp->process_discovered_references(&_is_alive_closure,
6253
&cmsKeepAliveClosure,
6254
&cmsDrainMarkingStackClosure,
6255
&task_executor,
6256
_gc_timer_cm,
6257
_gc_tracer_cm->gc_id());
6258
} else {
6259
stats = rp->process_discovered_references(&_is_alive_closure,
6260
&cmsKeepAliveClosure,
6261
&cmsDrainMarkingStackClosure,
6262
NULL,
6263
_gc_timer_cm,
6264
_gc_tracer_cm->gc_id());
6265
}
6266
_gc_tracer_cm->report_gc_reference_stats(stats);
6267
6268
}
6269
6270
// This is the point where the entire marking should have completed.
6271
verify_work_stacks_empty();
6272
6273
if (should_unload_classes()) {
6274
{
6275
GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6276
6277
// Unload classes and purge the SystemDictionary.
6278
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6279
6280
// Unload nmethods.
6281
CodeCache::do_unloading(&_is_alive_closure, purged_class);
6282
6283
// Prune dead klasses from subklass/sibling/implementor lists.
6284
Klass::clean_weak_klass_links(&_is_alive_closure);
6285
}
6286
6287
{
6288
GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6289
// Clean up unreferenced symbols in symbol table.
6290
SymbolTable::unlink();
6291
}
6292
6293
{
6294
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6295
// Delete entries for dead interned strings.
6296
StringTable::unlink(&_is_alive_closure);
6297
}
6298
}
6299
6300
6301
// Restore any preserved marks as a result of mark stack or
6302
// work queue overflow
6303
restore_preserved_marks_if_any(); // done single-threaded for now
6304
6305
rp->set_enqueuing_is_done(true);
6306
if (rp->processing_is_mt()) {
6307
rp->balance_all_queues();
6308
CMSRefProcTaskExecutor task_executor(*this);
6309
rp->enqueue_discovered_references(&task_executor);
6310
} else {
6311
rp->enqueue_discovered_references(NULL);
6312
}
6313
rp->verify_no_references_recorded();
6314
assert(!rp->discovery_enabled(), "should have been disabled");
6315
}
6316
6317
#ifndef PRODUCT
6318
void CMSCollector::check_correct_thread_executing() {
6319
Thread* t = Thread::current();
6320
// Only the VM thread or the CMS thread should be here.
6321
assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6322
"Unexpected thread type");
6323
// If this is the vm thread, the foreground process
6324
// should not be waiting. Note that _foregroundGCIsActive is
6325
// true while the foreground collector is waiting.
6326
if (_foregroundGCShouldWait) {
6327
// We cannot be the VM thread
6328
assert(t->is_ConcurrentGC_thread(),
6329
"Should be CMS thread");
6330
} else {
6331
// We can be the CMS thread only if we are in a stop-world
6332
// phase of CMS collection.
6333
if (t->is_ConcurrentGC_thread()) {
6334
assert(_collectorState == InitialMarking ||
6335
_collectorState == FinalMarking,
6336
"Should be a stop-world phase");
6337
// The CMS thread should be holding the CMS_token.
6338
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6339
"Potential interference with concurrently "
6340
"executing VM thread");
6341
}
6342
}
6343
}
6344
#endif
6345
6346
void CMSCollector::sweep(bool asynch) {
6347
assert(_collectorState == Sweeping, "just checking");
6348
check_correct_thread_executing();
6349
verify_work_stacks_empty();
6350
verify_overflow_empty();
6351
increment_sweep_count();
6352
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6353
6354
_inter_sweep_timer.stop();
6355
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6356
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6357
6358
assert(!_intra_sweep_timer.is_active(), "Should not be active");
6359
_intra_sweep_timer.reset();
6360
_intra_sweep_timer.start();
6361
if (asynch) {
6362
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6363
CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6364
// First sweep the old gen
6365
{
6366
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6367
bitMapLock());
6368
sweepWork(_cmsGen, asynch);
6369
}
6370
6371
// Update Universe::_heap_*_at_gc figures.
6372
// We need all the free list locks to make the abstract state
6373
// transition from Sweeping to Resetting. See detailed note
6374
// further below.
6375
{
6376
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6377
// Update heap occupancy information which is used as
6378
// input to soft ref clearing policy at the next gc.
6379
Universe::update_heap_info_at_gc();
6380
6381
// recalculate CMS used space after CMS collection
6382
_cmsGen->cmsSpace()->recalculate_used_stable();
6383
6384
_collectorState = Resizing;
6385
}
6386
} else {
6387
// already have needed locks
6388
sweepWork(_cmsGen, asynch);
6389
// Update heap occupancy information which is used as
6390
// input to soft ref clearing policy at the next gc.
6391
Universe::update_heap_info_at_gc();
6392
_collectorState = Resizing;
6393
}
6394
verify_work_stacks_empty();
6395
verify_overflow_empty();
6396
6397
if (should_unload_classes()) {
6398
// Delay purge to the beginning of the next safepoint. Metaspace::contains
6399
// requires that the virtual spaces are stable and not deleted.
6400
ClassLoaderDataGraph::set_should_purge(true);
6401
}
6402
6403
_intra_sweep_timer.stop();
6404
_intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6405
6406
_inter_sweep_timer.reset();
6407
_inter_sweep_timer.start();
6408
6409
// We need to use a monotonically non-deccreasing time in ms
6410
// or we will see time-warp warnings and os::javaTimeMillis()
6411
// does not guarantee monotonicity.
6412
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6413
update_time_of_last_gc(now);
6414
6415
// NOTE on abstract state transitions:
6416
// Mutators allocate-live and/or mark the mod-union table dirty
6417
// based on the state of the collection. The former is done in
6418
// the interval [Marking, Sweeping] and the latter in the interval
6419
// [Marking, Sweeping). Thus the transitions into the Marking state
6420
// and out of the Sweeping state must be synchronously visible
6421
// globally to the mutators.
6422
// The transition into the Marking state happens with the world
6423
// stopped so the mutators will globally see it. Sweeping is
6424
// done asynchronously by the background collector so the transition
6425
// from the Sweeping state to the Resizing state must be done
6426
// under the freelistLock (as is the check for whether to
6427
// allocate-live and whether to dirty the mod-union table).
6428
assert(_collectorState == Resizing, "Change of collector state to"
6429
" Resizing must be done under the freelistLocks (plural)");
6430
6431
// Now that sweeping has been completed, we clear
6432
// the incremental_collection_failed flag,
6433
// thus inviting a younger gen collection to promote into
6434
// this generation. If such a promotion may still fail,
6435
// the flag will be set again when a young collection is
6436
// attempted.
6437
GenCollectedHeap* gch = GenCollectedHeap::heap();
6438
gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
6439
gch->update_full_collections_completed(_collection_count_start);
6440
}
6441
6442
// FIX ME!!! Looks like this belongs in CFLSpace, with
6443
// CMSGen merely delegating to it.
6444
void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6445
double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6446
HeapWord* minAddr = _cmsSpace->bottom();
6447
HeapWord* largestAddr =
6448
(HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6449
if (largestAddr == NULL) {
6450
// The dictionary appears to be empty. In this case
6451
// try to coalesce at the end of the heap.
6452
largestAddr = _cmsSpace->end();
6453
}
6454
size_t largestOffset = pointer_delta(largestAddr, minAddr);
6455
size_t nearLargestOffset =
6456
(size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6457
if (PrintFLSStatistics != 0) {
6458
gclog_or_tty->print_cr(
6459
"CMS: Large Block: " PTR_FORMAT ";"
6460
" Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6461
largestAddr,
6462
_cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6463
}
6464
_cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6465
}
6466
6467
bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6468
return addr >= _cmsSpace->nearLargestChunk();
6469
}
6470
6471
FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6472
return _cmsSpace->find_chunk_at_end();
6473
}
6474
6475
void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6476
bool full) {
6477
// The next lower level has been collected. Gather any statistics
6478
// that are of interest at this point.
6479
if (!full && (current_level + 1) == level()) {
6480
// Gather statistics on the young generation collection.
6481
collector()->stats().record_gc0_end(used());
6482
}
6483
_cmsSpace->recalculate_used_stable();
6484
}
6485
6486
CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6487
GenCollectedHeap* gch = GenCollectedHeap::heap();
6488
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6489
"Wrong type of heap");
6490
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6491
gch->gen_policy()->size_policy();
6492
assert(sp->is_gc_cms_adaptive_size_policy(),
6493
"Wrong type of size policy");
6494
return sp;
6495
}
6496
6497
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6498
if (PrintGCDetails && Verbose) {
6499
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6500
}
6501
_debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6502
_debug_collection_type =
6503
(CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6504
if (PrintGCDetails && Verbose) {
6505
gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6506
}
6507
}
6508
6509
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6510
bool asynch) {
6511
// We iterate over the space(s) underlying this generation,
6512
// checking the mark bit map to see if the bits corresponding
6513
// to specific blocks are marked or not. Blocks that are
6514
// marked are live and are not swept up. All remaining blocks
6515
// are swept up, with coalescing on-the-fly as we sweep up
6516
// contiguous free and/or garbage blocks:
6517
// We need to ensure that the sweeper synchronizes with allocators
6518
// and stop-the-world collectors. In particular, the following
6519
// locks are used:
6520
// . CMS token: if this is held, a stop the world collection cannot occur
6521
// . freelistLock: if this is held no allocation can occur from this
6522
// generation by another thread
6523
// . bitMapLock: if this is held, no other thread can access or update
6524
//
6525
6526
// Note that we need to hold the freelistLock if we use
6527
// block iterate below; else the iterator might go awry if
6528
// a mutator (or promotion) causes block contents to change
6529
// (for instance if the allocator divvies up a block).
6530
// If we hold the free list lock, for all practical purposes
6531
// young generation GC's can't occur (they'll usually need to
6532
// promote), so we might as well prevent all young generation
6533
// GC's while we do a sweeping step. For the same reason, we might
6534
// as well take the bit map lock for the entire duration
6535
6536
// check that we hold the requisite locks
6537
assert(have_cms_token(), "Should hold cms token");
6538
assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6539
|| (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6540
"Should possess CMS token to sweep");
6541
assert_lock_strong(gen->freelistLock());
6542
assert_lock_strong(bitMapLock());
6543
6544
assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6545
assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6546
gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6547
_inter_sweep_estimate.padded_average(),
6548
_intra_sweep_estimate.padded_average());
6549
gen->setNearLargestChunk();
6550
6551
{
6552
SweepClosure sweepClosure(this, gen, &_markBitMap,
6553
CMSYield && asynch);
6554
gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6555
// We need to free-up/coalesce garbage/blocks from a
6556
// co-terminal free run. This is done in the SweepClosure
6557
// destructor; so, do not remove this scope, else the
6558
// end-of-sweep-census below will be off by a little bit.
6559
}
6560
gen->cmsSpace()->sweep_completed();
6561
gen->cmsSpace()->endSweepFLCensus(sweep_count());
6562
if (should_unload_classes()) { // unloaded classes this cycle,
6563
_concurrent_cycles_since_last_unload = 0; // ... reset count
6564
} else { // did not unload classes,
6565
_concurrent_cycles_since_last_unload++; // ... increment count
6566
}
6567
}
6568
6569
// Reset CMS data structures (for now just the marking bit map)
6570
// preparatory for the next cycle.
6571
void CMSCollector::reset(bool asynch) {
6572
GenCollectedHeap* gch = GenCollectedHeap::heap();
6573
CMSAdaptiveSizePolicy* sp = size_policy();
6574
AdaptiveSizePolicyOutput(sp, gch->total_collections());
6575
if (asynch) {
6576
CMSTokenSyncWithLocks ts(true, bitMapLock());
6577
6578
// If the state is not "Resetting", the foreground thread
6579
// has done a collection and the resetting.
6580
if (_collectorState != Resetting) {
6581
assert(_collectorState == Idling, "The state should only change"
6582
" because the foreground collector has finished the collection");
6583
return;
6584
}
6585
6586
// Clear the mark bitmap (no grey objects to start with)
6587
// for the next cycle.
6588
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6589
CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6590
6591
HeapWord* curAddr = _markBitMap.startWord();
6592
while (curAddr < _markBitMap.endWord()) {
6593
size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6594
MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6595
_markBitMap.clear_large_range(chunk);
6596
if (ConcurrentMarkSweepThread::should_yield() &&
6597
!foregroundGCIsActive() &&
6598
CMSYield) {
6599
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6600
"CMS thread should hold CMS token");
6601
assert_lock_strong(bitMapLock());
6602
bitMapLock()->unlock();
6603
ConcurrentMarkSweepThread::desynchronize(true);
6604
ConcurrentMarkSweepThread::acknowledge_yield_request();
6605
stopTimer();
6606
if (PrintCMSStatistics != 0) {
6607
incrementYields();
6608
}
6609
icms_wait();
6610
6611
// See the comment in coordinator_yield()
6612
for (unsigned i = 0; i < CMSYieldSleepCount &&
6613
ConcurrentMarkSweepThread::should_yield() &&
6614
!CMSCollector::foregroundGCIsActive(); ++i) {
6615
os::sleep(Thread::current(), 1, false);
6616
ConcurrentMarkSweepThread::acknowledge_yield_request();
6617
}
6618
6619
ConcurrentMarkSweepThread::synchronize(true);
6620
bitMapLock()->lock_without_safepoint_check();
6621
startTimer();
6622
}
6623
curAddr = chunk.end();
6624
}
6625
// A successful mostly concurrent collection has been done.
6626
// Because only the full (i.e., concurrent mode failure) collections
6627
// are being measured for gc overhead limits, clean the "near" flag
6628
// and count.
6629
sp->reset_gc_overhead_limit_count();
6630
_collectorState = Idling;
6631
} else {
6632
// already have the lock
6633
assert(_collectorState == Resetting, "just checking");
6634
assert_lock_strong(bitMapLock());
6635
_markBitMap.clear_all();
6636
_collectorState = Idling;
6637
}
6638
6639
// Stop incremental mode after a cycle completes, so that any future cycles
6640
// are triggered by allocation.
6641
stop_icms();
6642
6643
NOT_PRODUCT(
6644
if (RotateCMSCollectionTypes) {
6645
_cmsGen->rotate_debug_collection_type();
6646
}
6647
)
6648
6649
register_gc_end();
6650
}
6651
6652
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6653
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6654
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6655
TraceCollectorStats tcs(counters());
6656
6657
switch (op) {
6658
case CMS_op_checkpointRootsInitial: {
6659
SvcGCMarker sgcm(SvcGCMarker::OTHER);
6660
checkpointRootsInitial(true); // asynch
6661
if (PrintGC) {
6662
_cmsGen->printOccupancy("initial-mark");
6663
}
6664
break;
6665
}
6666
case CMS_op_checkpointRootsFinal: {
6667
SvcGCMarker sgcm(SvcGCMarker::OTHER);
6668
checkpointRootsFinal(true, // asynch
6669
false, // !clear_all_soft_refs
6670
false); // !init_mark_was_synchronous
6671
if (PrintGC) {
6672
_cmsGen->printOccupancy("remark");
6673
}
6674
break;
6675
}
6676
default:
6677
fatal("No such CMS_op");
6678
}
6679
}
6680
6681
#ifndef PRODUCT
6682
size_t const CMSCollector::skip_header_HeapWords() {
6683
return FreeChunk::header_size();
6684
}
6685
6686
// Try and collect here conditions that should hold when
6687
// CMS thread is exiting. The idea is that the foreground GC
6688
// thread should not be blocked if it wants to terminate
6689
// the CMS thread and yet continue to run the VM for a while
6690
// after that.
6691
void CMSCollector::verify_ok_to_terminate() const {
6692
assert(Thread::current()->is_ConcurrentGC_thread(),
6693
"should be called by CMS thread");
6694
assert(!_foregroundGCShouldWait, "should be false");
6695
// We could check here that all the various low-level locks
6696
// are not held by the CMS thread, but that is overkill; see
6697
// also CMSThread::verify_ok_to_terminate() where the CGC_lock
6698
// is checked.
6699
}
6700
#endif
6701
6702
size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6703
assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6704
"missing Printezis mark?");
6705
HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6706
size_t size = pointer_delta(nextOneAddr + 1, addr);
6707
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6708
"alignment problem");
6709
assert(size >= 3, "Necessary for Printezis marks to work");
6710
return size;
6711
}
6712
6713
// A variant of the above (block_size_using_printezis_bits()) except
6714
// that we return 0 if the P-bits are not yet set.
6715
size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6716
if (_markBitMap.isMarked(addr + 1)) {
6717
assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6718
HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6719
size_t size = pointer_delta(nextOneAddr + 1, addr);
6720
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6721
"alignment problem");
6722
assert(size >= 3, "Necessary for Printezis marks to work");
6723
return size;
6724
}
6725
return 0;
6726
}
6727
6728
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6729
size_t sz = 0;
6730
oop p = (oop)addr;
6731
if (p->klass_or_null_acquire() != NULL) {
6732
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6733
} else {
6734
sz = block_size_using_printezis_bits(addr);
6735
}
6736
assert(sz > 0, "size must be nonzero");
6737
HeapWord* next_block = addr + sz;
6738
HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6739
CardTableModRefBS::card_size);
6740
assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6741
round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6742
"must be different cards");
6743
return next_card;
6744
}
6745
6746
6747
// CMS Bit Map Wrapper /////////////////////////////////////////
6748
6749
// Construct a CMS bit map infrastructure, but don't create the
6750
// bit vector itself. That is done by a separate call CMSBitMap::allocate()
6751
// further below.
6752
CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6753
_bm(),
6754
_shifter(shifter),
6755
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6756
{
6757
_bmStartWord = 0;
6758
_bmWordSize = 0;
6759
}
6760
6761
bool CMSBitMap::allocate(MemRegion mr) {
6762
_bmStartWord = mr.start();
6763
_bmWordSize = mr.word_size();
6764
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6765
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6766
if (!brs.is_reserved()) {
6767
warning("CMS bit map allocation failure");
6768
return false;
6769
}
6770
// For now we'll just commit all of the bit map up fromt.
6771
// Later on we'll try to be more parsimonious with swap.
6772
if (!_virtual_space.initialize(brs, brs.size())) {
6773
warning("CMS bit map backing store failure");
6774
return false;
6775
}
6776
assert(_virtual_space.committed_size() == brs.size(),
6777
"didn't reserve backing store for all of CMS bit map?");
6778
_bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6779
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6780
_bmWordSize, "inconsistency in bit map sizing");
6781
_bm.set_size(_bmWordSize >> _shifter);
6782
6783
// bm.clear(); // can we rely on getting zero'd memory? verify below
6784
assert(isAllClear(),
6785
"Expected zero'd memory from ReservedSpace constructor");
6786
assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6787
"consistency check");
6788
return true;
6789
}
6790
6791
void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6792
HeapWord *next_addr, *end_addr, *last_addr;
6793
assert_locked();
6794
assert(covers(mr), "out-of-range error");
6795
// XXX assert that start and end are appropriately aligned
6796
for (next_addr = mr.start(), end_addr = mr.end();
6797
next_addr < end_addr; next_addr = last_addr) {
6798
MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6799
last_addr = dirty_region.end();
6800
if (!dirty_region.is_empty()) {
6801
cl->do_MemRegion(dirty_region);
6802
} else {
6803
assert(last_addr == end_addr, "program logic");
6804
return;
6805
}
6806
}
6807
}
6808
6809
void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6810
_bm.print_on_error(st, prefix);
6811
}
6812
6813
#ifndef PRODUCT
6814
void CMSBitMap::assert_locked() const {
6815
CMSLockVerifier::assert_locked(lock());
6816
}
6817
6818
bool CMSBitMap::covers(MemRegion mr) const {
6819
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6820
assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6821
"size inconsistency");
6822
return (mr.start() >= _bmStartWord) &&
6823
(mr.end() <= endWord());
6824
}
6825
6826
bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6827
return (start >= _bmStartWord && (start + size) <= endWord());
6828
}
6829
6830
void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6831
// verify that there are no 1 bits in the interval [left, right)
6832
FalseBitMapClosure falseBitMapClosure;
6833
iterate(&falseBitMapClosure, left, right);
6834
}
6835
6836
void CMSBitMap::region_invariant(MemRegion mr)
6837
{
6838
assert_locked();
6839
// mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6840
assert(!mr.is_empty(), "unexpected empty region");
6841
assert(covers(mr), "mr should be covered by bit map");
6842
// convert address range into offset range
6843
size_t start_ofs = heapWordToOffset(mr.start());
6844
// Make sure that end() is appropriately aligned
6845
assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6846
((intptr_t) 1 << (_shifter+LogHeapWordSize))),
6847
"Misaligned mr.end()");
6848
size_t end_ofs = heapWordToOffset(mr.end());
6849
assert(end_ofs > start_ofs, "Should mark at least one bit");
6850
}
6851
6852
#endif
6853
6854
bool CMSMarkStack::allocate(size_t size) {
6855
// allocate a stack of the requisite depth
6856
ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6857
size * sizeof(oop)));
6858
if (!rs.is_reserved()) {
6859
warning("CMSMarkStack allocation failure");
6860
return false;
6861
}
6862
if (!_virtual_space.initialize(rs, rs.size())) {
6863
warning("CMSMarkStack backing store failure");
6864
return false;
6865
}
6866
assert(_virtual_space.committed_size() == rs.size(),
6867
"didn't reserve backing store for all of CMS stack?");
6868
_base = (oop*)(_virtual_space.low());
6869
_index = 0;
6870
_capacity = size;
6871
NOT_PRODUCT(_max_depth = 0);
6872
return true;
6873
}
6874
6875
// XXX FIX ME !!! In the MT case we come in here holding a
6876
// leaf lock. For printing we need to take a further lock
6877
// which has lower rank. We need to recallibrate the two
6878
// lock-ranks involved in order to be able to rpint the
6879
// messages below. (Or defer the printing to the caller.
6880
// For now we take the expedient path of just disabling the
6881
// messages for the problematic case.)
6882
void CMSMarkStack::expand() {
6883
assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6884
if (_capacity == MarkStackSizeMax) {
6885
if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6886
// We print a warning message only once per CMS cycle.
6887
gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6888
}
6889
return;
6890
}
6891
// Double capacity if possible
6892
size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6893
// Do not give up existing stack until we have managed to
6894
// get the double capacity that we desired.
6895
ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6896
new_capacity * sizeof(oop)));
6897
if (rs.is_reserved()) {
6898
// Release the backing store associated with old stack
6899
_virtual_space.release();
6900
// Reinitialize virtual space for new stack
6901
if (!_virtual_space.initialize(rs, rs.size())) {
6902
fatal("Not enough swap for expanded marking stack");
6903
}
6904
_base = (oop*)(_virtual_space.low());
6905
_index = 0;
6906
_capacity = new_capacity;
6907
} else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6908
// Failed to double capacity, continue;
6909
// we print a detail message only once per CMS cycle.
6910
gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to "
6911
SIZE_FORMAT "K",
6912
_capacity / K, new_capacity / K);
6913
}
6914
}
6915
6916
6917
// Closures
6918
// XXX: there seems to be a lot of code duplication here;
6919
// should refactor and consolidate common code.
6920
6921
// This closure is used to mark refs into the CMS generation in
6922
// the CMS bit map. Called at the first checkpoint. This closure
6923
// assumes that we do not need to re-mark dirty cards; if the CMS
6924
// generation on which this is used is not an oldest
6925
// generation then this will lose younger_gen cards!
6926
6927
MarkRefsIntoClosure::MarkRefsIntoClosure(
6928
MemRegion span, CMSBitMap* bitMap):
6929
_span(span),
6930
_bitMap(bitMap)
6931
{
6932
assert(_ref_processor == NULL, "deliberately left NULL");
6933
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6934
}
6935
6936
void MarkRefsIntoClosure::do_oop(oop obj) {
6937
// if p points into _span, then mark corresponding bit in _markBitMap
6938
assert(obj->is_oop(), "expected an oop");
6939
HeapWord* addr = (HeapWord*)obj;
6940
if (_span.contains(addr)) {
6941
// this should be made more efficient
6942
_bitMap->mark(addr);
6943
}
6944
}
6945
6946
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6947
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6948
6949
Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6950
MemRegion span, CMSBitMap* bitMap):
6951
_span(span),
6952
_bitMap(bitMap)
6953
{
6954
assert(_ref_processor == NULL, "deliberately left NULL");
6955
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6956
}
6957
6958
void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6959
// if p points into _span, then mark corresponding bit in _markBitMap
6960
assert(obj->is_oop(), "expected an oop");
6961
HeapWord* addr = (HeapWord*)obj;
6962
if (_span.contains(addr)) {
6963
// this should be made more efficient
6964
_bitMap->par_mark(addr);
6965
}
6966
}
6967
6968
void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6969
void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6970
6971
// A variant of the above, used for CMS marking verification.
6972
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6973
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6974
_span(span),
6975
_verification_bm(verification_bm),
6976
_cms_bm(cms_bm)
6977
{
6978
assert(_ref_processor == NULL, "deliberately left NULL");
6979
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6980
}
6981
6982
void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6983
// if p points into _span, then mark corresponding bit in _markBitMap
6984
assert(obj->is_oop(), "expected an oop");
6985
HeapWord* addr = (HeapWord*)obj;
6986
if (_span.contains(addr)) {
6987
_verification_bm->mark(addr);
6988
if (!_cms_bm->isMarked(addr)) {
6989
oop(addr)->print();
6990
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6991
fatal("... aborting");
6992
}
6993
}
6994
}
6995
6996
void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6997
void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6998
6999
//////////////////////////////////////////////////
7000
// MarkRefsIntoAndScanClosure
7001
//////////////////////////////////////////////////
7002
7003
MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
7004
ReferenceProcessor* rp,
7005
CMSBitMap* bit_map,
7006
CMSBitMap* mod_union_table,
7007
CMSMarkStack* mark_stack,
7008
CMSCollector* collector,
7009
bool should_yield,
7010
bool concurrent_precleaning):
7011
_collector(collector),
7012
_span(span),
7013
_bit_map(bit_map),
7014
_mark_stack(mark_stack),
7015
_pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
7016
mark_stack, concurrent_precleaning),
7017
_yield(should_yield),
7018
_concurrent_precleaning(concurrent_precleaning),
7019
_freelistLock(NULL)
7020
{
7021
_ref_processor = rp;
7022
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7023
}
7024
7025
// This closure is used to mark refs into the CMS generation at the
7026
// second (final) checkpoint, and to scan and transitively follow
7027
// the unmarked oops. It is also used during the concurrent precleaning
7028
// phase while scanning objects on dirty cards in the CMS generation.
7029
// The marks are made in the marking bit map and the marking stack is
7030
// used for keeping the (newly) grey objects during the scan.
7031
// The parallel version (Par_...) appears further below.
7032
void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7033
if (obj != NULL) {
7034
assert(obj->is_oop(), "expected an oop");
7035
HeapWord* addr = (HeapWord*)obj;
7036
assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7037
assert(_collector->overflow_list_is_empty(),
7038
"overflow list should be empty");
7039
if (_span.contains(addr) &&
7040
!_bit_map->isMarked(addr)) {
7041
// mark bit map (object is now grey)
7042
_bit_map->mark(addr);
7043
// push on marking stack (stack should be empty), and drain the
7044
// stack by applying this closure to the oops in the oops popped
7045
// from the stack (i.e. blacken the grey objects)
7046
bool res = _mark_stack->push(obj);
7047
assert(res, "Should have space to push on empty stack");
7048
do {
7049
oop new_oop = _mark_stack->pop();
7050
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7051
assert(_bit_map->isMarked((HeapWord*)new_oop),
7052
"only grey objects on this stack");
7053
// iterate over the oops in this oop, marking and pushing
7054
// the ones in CMS heap (i.e. in _span).
7055
new_oop->oop_iterate(&_pushAndMarkClosure);
7056
// check if it's time to yield
7057
do_yield_check();
7058
} while (!_mark_stack->isEmpty() ||
7059
(!_concurrent_precleaning && take_from_overflow_list()));
7060
// if marking stack is empty, and we are not doing this
7061
// during precleaning, then check the overflow list
7062
}
7063
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7064
assert(_collector->overflow_list_is_empty(),
7065
"overflow list was drained above");
7066
// We could restore evacuated mark words, if any, used for
7067
// overflow list links here because the overflow list is
7068
// provably empty here. That would reduce the maximum
7069
// size requirements for preserved_{oop,mark}_stack.
7070
// But we'll just postpone it until we are all done
7071
// so we can just stream through.
7072
if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7073
_collector->restore_preserved_marks_if_any();
7074
assert(_collector->no_preserved_marks(), "No preserved marks");
7075
}
7076
assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7077
"All preserved marks should have been restored above");
7078
}
7079
}
7080
7081
void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7082
void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7083
7084
void MarkRefsIntoAndScanClosure::do_yield_work() {
7085
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7086
"CMS thread should hold CMS token");
7087
assert_lock_strong(_freelistLock);
7088
assert_lock_strong(_bit_map->lock());
7089
// relinquish the free_list_lock and bitMaplock()
7090
_bit_map->lock()->unlock();
7091
_freelistLock->unlock();
7092
ConcurrentMarkSweepThread::desynchronize(true);
7093
ConcurrentMarkSweepThread::acknowledge_yield_request();
7094
_collector->stopTimer();
7095
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7096
if (PrintCMSStatistics != 0) {
7097
_collector->incrementYields();
7098
}
7099
_collector->icms_wait();
7100
7101
// See the comment in coordinator_yield()
7102
for (unsigned i = 0;
7103
i < CMSYieldSleepCount &&
7104
ConcurrentMarkSweepThread::should_yield() &&
7105
!CMSCollector::foregroundGCIsActive();
7106
++i) {
7107
os::sleep(Thread::current(), 1, false);
7108
ConcurrentMarkSweepThread::acknowledge_yield_request();
7109
}
7110
7111
ConcurrentMarkSweepThread::synchronize(true);
7112
_freelistLock->lock_without_safepoint_check();
7113
_bit_map->lock()->lock_without_safepoint_check();
7114
_collector->startTimer();
7115
}
7116
7117
///////////////////////////////////////////////////////////
7118
// Par_MarkRefsIntoAndScanClosure: a parallel version of
7119
// MarkRefsIntoAndScanClosure
7120
///////////////////////////////////////////////////////////
7121
Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7122
CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7123
CMSBitMap* bit_map, OopTaskQueue* work_queue):
7124
_span(span),
7125
_bit_map(bit_map),
7126
_work_queue(work_queue),
7127
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7128
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7129
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7130
{
7131
_ref_processor = rp;
7132
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7133
}
7134
7135
// This closure is used to mark refs into the CMS generation at the
7136
// second (final) checkpoint, and to scan and transitively follow
7137
// the unmarked oops. The marks are made in the marking bit map and
7138
// the work_queue is used for keeping the (newly) grey objects during
7139
// the scan phase whence they are also available for stealing by parallel
7140
// threads. Since the marking bit map is shared, updates are
7141
// synchronized (via CAS).
7142
void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7143
if (obj != NULL) {
7144
// Ignore mark word because this could be an already marked oop
7145
// that may be chained at the end of the overflow list.
7146
assert(obj->is_oop(true), "expected an oop");
7147
HeapWord* addr = (HeapWord*)obj;
7148
if (_span.contains(addr) &&
7149
!_bit_map->isMarked(addr)) {
7150
// mark bit map (object will become grey):
7151
// It is possible for several threads to be
7152
// trying to "claim" this object concurrently;
7153
// the unique thread that succeeds in marking the
7154
// object first will do the subsequent push on
7155
// to the work queue (or overflow list).
7156
if (_bit_map->par_mark(addr)) {
7157
// push on work_queue (which may not be empty), and trim the
7158
// queue to an appropriate length by applying this closure to
7159
// the oops in the oops popped from the stack (i.e. blacken the
7160
// grey objects)
7161
bool res = _work_queue->push(obj);
7162
assert(res, "Low water mark should be less than capacity?");
7163
trim_queue(_low_water_mark);
7164
} // Else, another thread claimed the object
7165
}
7166
}
7167
}
7168
7169
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7170
void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7171
7172
// This closure is used to rescan the marked objects on the dirty cards
7173
// in the mod union table and the card table proper.
7174
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7175
oop p, MemRegion mr) {
7176
7177
size_t size = 0;
7178
HeapWord* addr = (HeapWord*)p;
7179
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7180
assert(_span.contains(addr), "we are scanning the CMS generation");
7181
// check if it's time to yield
7182
if (do_yield_check()) {
7183
// We yielded for some foreground stop-world work,
7184
// and we have been asked to abort this ongoing preclean cycle.
7185
return 0;
7186
}
7187
if (_bitMap->isMarked(addr)) {
7188
// it's marked; is it potentially uninitialized?
7189
if (p->klass_or_null_acquire() != NULL) {
7190
// an initialized object; ignore mark word in verification below
7191
// since we are running concurrent with mutators
7192
assert(p->is_oop(true), "should be an oop");
7193
if (p->is_objArray()) {
7194
// objArrays are precisely marked; restrict scanning
7195
// to dirty cards only.
7196
size = CompactibleFreeListSpace::adjustObjectSize(
7197
p->oop_iterate(_scanningClosure, mr));
7198
} else {
7199
// A non-array may have been imprecisely marked; we need
7200
// to scan object in its entirety.
7201
size = CompactibleFreeListSpace::adjustObjectSize(
7202
p->oop_iterate(_scanningClosure));
7203
}
7204
#ifdef ASSERT
7205
size_t direct_size =
7206
CompactibleFreeListSpace::adjustObjectSize(p->size());
7207
assert(size == direct_size, "Inconsistency in size");
7208
assert(size >= 3, "Necessary for Printezis marks to work");
7209
if (!_bitMap->isMarked(addr+1)) {
7210
_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7211
} else {
7212
_bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7213
assert(_bitMap->isMarked(addr+size-1),
7214
"inconsistent Printezis mark");
7215
}
7216
#endif // ASSERT
7217
} else {
7218
// an unitialized object
7219
assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7220
HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7221
size = pointer_delta(nextOneAddr + 1, addr);
7222
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7223
"alignment problem");
7224
// Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7225
// will dirty the card when the klass pointer is installed in the
7226
// object (signalling the completion of initialization).
7227
}
7228
} else {
7229
// Either a not yet marked object or an uninitialized object
7230
if (p->klass_or_null_acquire() == NULL) {
7231
// An uninitialized object, skip to the next card, since
7232
// we may not be able to read its P-bits yet.
7233
assert(size == 0, "Initial value");
7234
} else {
7235
// An object not (yet) reached by marking: we merely need to
7236
// compute its size so as to go look at the next block.
7237
assert(p->is_oop(true), "should be an oop");
7238
size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7239
}
7240
}
7241
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7242
return size;
7243
}
7244
7245
void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7246
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7247
"CMS thread should hold CMS token");
7248
assert_lock_strong(_freelistLock);
7249
assert_lock_strong(_bitMap->lock());
7250
// relinquish the free_list_lock and bitMaplock()
7251
_bitMap->lock()->unlock();
7252
_freelistLock->unlock();
7253
ConcurrentMarkSweepThread::desynchronize(true);
7254
ConcurrentMarkSweepThread::acknowledge_yield_request();
7255
_collector->stopTimer();
7256
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7257
if (PrintCMSStatistics != 0) {
7258
_collector->incrementYields();
7259
}
7260
_collector->icms_wait();
7261
7262
// See the comment in coordinator_yield()
7263
for (unsigned i = 0; i < CMSYieldSleepCount &&
7264
ConcurrentMarkSweepThread::should_yield() &&
7265
!CMSCollector::foregroundGCIsActive(); ++i) {
7266
os::sleep(Thread::current(), 1, false);
7267
ConcurrentMarkSweepThread::acknowledge_yield_request();
7268
}
7269
7270
ConcurrentMarkSweepThread::synchronize(true);
7271
_freelistLock->lock_without_safepoint_check();
7272
_bitMap->lock()->lock_without_safepoint_check();
7273
_collector->startTimer();
7274
}
7275
7276
7277
//////////////////////////////////////////////////////////////////
7278
// SurvivorSpacePrecleanClosure
7279
//////////////////////////////////////////////////////////////////
7280
// This (single-threaded) closure is used to preclean the oops in
7281
// the survivor spaces.
7282
size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7283
7284
HeapWord* addr = (HeapWord*)p;
7285
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7286
assert(!_span.contains(addr), "we are scanning the survivor spaces");
7287
assert(p->klass_or_null() != NULL, "object should be initializd");
7288
// an initialized object; ignore mark word in verification below
7289
// since we are running concurrent with mutators
7290
assert(p->is_oop(true), "should be an oop");
7291
// Note that we do not yield while we iterate over
7292
// the interior oops of p, pushing the relevant ones
7293
// on our marking stack.
7294
size_t size = p->oop_iterate(_scanning_closure);
7295
do_yield_check();
7296
// Observe that below, we do not abandon the preclean
7297
// phase as soon as we should; rather we empty the
7298
// marking stack before returning. This is to satisfy
7299
// some existing assertions. In general, it may be a
7300
// good idea to abort immediately and complete the marking
7301
// from the grey objects at a later time.
7302
while (!_mark_stack->isEmpty()) {
7303
oop new_oop = _mark_stack->pop();
7304
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7305
assert(_bit_map->isMarked((HeapWord*)new_oop),
7306
"only grey objects on this stack");
7307
// iterate over the oops in this oop, marking and pushing
7308
// the ones in CMS heap (i.e. in _span).
7309
new_oop->oop_iterate(_scanning_closure);
7310
// check if it's time to yield
7311
do_yield_check();
7312
}
7313
unsigned int after_count =
7314
GenCollectedHeap::heap()->total_collections();
7315
bool abort = (_before_count != after_count) ||
7316
_collector->should_abort_preclean();
7317
return abort ? 0 : size;
7318
}
7319
7320
void SurvivorSpacePrecleanClosure::do_yield_work() {
7321
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7322
"CMS thread should hold CMS token");
7323
assert_lock_strong(_bit_map->lock());
7324
// Relinquish the bit map lock
7325
_bit_map->lock()->unlock();
7326
ConcurrentMarkSweepThread::desynchronize(true);
7327
ConcurrentMarkSweepThread::acknowledge_yield_request();
7328
_collector->stopTimer();
7329
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7330
if (PrintCMSStatistics != 0) {
7331
_collector->incrementYields();
7332
}
7333
_collector->icms_wait();
7334
7335
// See the comment in coordinator_yield()
7336
for (unsigned i = 0; i < CMSYieldSleepCount &&
7337
ConcurrentMarkSweepThread::should_yield() &&
7338
!CMSCollector::foregroundGCIsActive(); ++i) {
7339
os::sleep(Thread::current(), 1, false);
7340
ConcurrentMarkSweepThread::acknowledge_yield_request();
7341
}
7342
7343
ConcurrentMarkSweepThread::synchronize(true);
7344
_bit_map->lock()->lock_without_safepoint_check();
7345
_collector->startTimer();
7346
}
7347
7348
// This closure is used to rescan the marked objects on the dirty cards
7349
// in the mod union table and the card table proper. In the parallel
7350
// case, although the bitMap is shared, we do a single read so the
7351
// isMarked() query is "safe".
7352
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7353
// Ignore mark word because we are running concurrent with mutators
7354
assert(p->is_oop_or_null(true), "expected an oop or null");
7355
HeapWord* addr = (HeapWord*)p;
7356
assert(_span.contains(addr), "we are scanning the CMS generation");
7357
bool is_obj_array = false;
7358
#ifdef ASSERT
7359
if (!_parallel) {
7360
assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7361
assert(_collector->overflow_list_is_empty(),
7362
"overflow list should be empty");
7363
7364
}
7365
#endif // ASSERT
7366
if (_bit_map->isMarked(addr)) {
7367
// Obj arrays are precisely marked, non-arrays are not;
7368
// so we scan objArrays precisely and non-arrays in their
7369
// entirety.
7370
if (p->is_objArray()) {
7371
is_obj_array = true;
7372
if (_parallel) {
7373
p->oop_iterate(_par_scan_closure, mr);
7374
} else {
7375
p->oop_iterate(_scan_closure, mr);
7376
}
7377
} else {
7378
if (_parallel) {
7379
p->oop_iterate(_par_scan_closure);
7380
} else {
7381
p->oop_iterate(_scan_closure);
7382
}
7383
}
7384
}
7385
#ifdef ASSERT
7386
if (!_parallel) {
7387
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7388
assert(_collector->overflow_list_is_empty(),
7389
"overflow list should be empty");
7390
7391
}
7392
#endif // ASSERT
7393
return is_obj_array;
7394
}
7395
7396
MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7397
MemRegion span,
7398
CMSBitMap* bitMap, CMSMarkStack* markStack,
7399
bool should_yield, bool verifying):
7400
_collector(collector),
7401
_span(span),
7402
_bitMap(bitMap),
7403
_mut(&collector->_modUnionTable),
7404
_markStack(markStack),
7405
_yield(should_yield),
7406
_skipBits(0)
7407
{
7408
assert(_markStack->isEmpty(), "stack should be empty");
7409
_finger = _bitMap->startWord();
7410
_threshold = _finger;
7411
assert(_collector->_restart_addr == NULL, "Sanity check");
7412
assert(_span.contains(_finger), "Out of bounds _finger?");
7413
DEBUG_ONLY(_verifying = verifying;)
7414
}
7415
7416
void MarkFromRootsClosure::reset(HeapWord* addr) {
7417
assert(_markStack->isEmpty(), "would cause duplicates on stack");
7418
assert(_span.contains(addr), "Out of bounds _finger?");
7419
_finger = addr;
7420
_threshold = (HeapWord*)round_to(
7421
(intptr_t)_finger, CardTableModRefBS::card_size);
7422
}
7423
7424
// Should revisit to see if this should be restructured for
7425
// greater efficiency.
7426
bool MarkFromRootsClosure::do_bit(size_t offset) {
7427
if (_skipBits > 0) {
7428
_skipBits--;
7429
return true;
7430
}
7431
// convert offset into a HeapWord*
7432
HeapWord* addr = _bitMap->startWord() + offset;
7433
assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7434
"address out of range");
7435
assert(_bitMap->isMarked(addr), "tautology");
7436
if (_bitMap->isMarked(addr+1)) {
7437
// this is an allocated but not yet initialized object
7438
assert(_skipBits == 0, "tautology");
7439
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
7440
oop p = oop(addr);
7441
if (p->klass_or_null_acquire() == NULL) {
7442
DEBUG_ONLY(if (!_verifying) {)
7443
// We re-dirty the cards on which this object lies and increase
7444
// the _threshold so that we'll come back to scan this object
7445
// during the preclean or remark phase. (CMSCleanOnEnter)
7446
if (CMSCleanOnEnter) {
7447
size_t sz = _collector->block_size_using_printezis_bits(addr);
7448
HeapWord* end_card_addr = (HeapWord*)round_to(
7449
(intptr_t)(addr+sz), CardTableModRefBS::card_size);
7450
MemRegion redirty_range = MemRegion(addr, end_card_addr);
7451
assert(!redirty_range.is_empty(), "Arithmetical tautology");
7452
// Bump _threshold to end_card_addr; note that
7453
// _threshold cannot possibly exceed end_card_addr, anyhow.
7454
// This prevents future clearing of the card as the scan proceeds
7455
// to the right.
7456
assert(_threshold <= end_card_addr,
7457
"Because we are just scanning into this object");
7458
if (_threshold < end_card_addr) {
7459
_threshold = end_card_addr;
7460
}
7461
if (p->klass_or_null_acquire() != NULL) {
7462
// Redirty the range of cards...
7463
_mut->mark_range(redirty_range);
7464
} // ...else the setting of klass will dirty the card anyway.
7465
}
7466
DEBUG_ONLY(})
7467
return true;
7468
}
7469
}
7470
scanOopsInOop(addr);
7471
return true;
7472
}
7473
7474
// We take a break if we've been at this for a while,
7475
// so as to avoid monopolizing the locks involved.
7476
void MarkFromRootsClosure::do_yield_work() {
7477
// First give up the locks, then yield, then re-lock
7478
// We should probably use a constructor/destructor idiom to
7479
// do this unlock/lock or modify the MutexUnlocker class to
7480
// serve our purpose. XXX
7481
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7482
"CMS thread should hold CMS token");
7483
assert_lock_strong(_bitMap->lock());
7484
_bitMap->lock()->unlock();
7485
ConcurrentMarkSweepThread::desynchronize(true);
7486
ConcurrentMarkSweepThread::acknowledge_yield_request();
7487
_collector->stopTimer();
7488
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7489
if (PrintCMSStatistics != 0) {
7490
_collector->incrementYields();
7491
}
7492
_collector->icms_wait();
7493
7494
// See the comment in coordinator_yield()
7495
for (unsigned i = 0; i < CMSYieldSleepCount &&
7496
ConcurrentMarkSweepThread::should_yield() &&
7497
!CMSCollector::foregroundGCIsActive(); ++i) {
7498
os::sleep(Thread::current(), 1, false);
7499
ConcurrentMarkSweepThread::acknowledge_yield_request();
7500
}
7501
7502
ConcurrentMarkSweepThread::synchronize(true);
7503
_bitMap->lock()->lock_without_safepoint_check();
7504
_collector->startTimer();
7505
}
7506
7507
void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7508
assert(_bitMap->isMarked(ptr), "expected bit to be set");
7509
assert(_markStack->isEmpty(),
7510
"should drain stack to limit stack usage");
7511
// convert ptr to an oop preparatory to scanning
7512
oop obj = oop(ptr);
7513
// Ignore mark word in verification below, since we
7514
// may be running concurrent with mutators.
7515
assert(obj->is_oop(true), "should be an oop");
7516
assert(_finger <= ptr, "_finger runneth ahead");
7517
// advance the finger to right end of this object
7518
_finger = ptr + obj->size();
7519
assert(_finger > ptr, "we just incremented it above");
7520
// On large heaps, it may take us some time to get through
7521
// the marking phase (especially if running iCMS). During
7522
// this time it's possible that a lot of mutations have
7523
// accumulated in the card table and the mod union table --
7524
// these mutation records are redundant until we have
7525
// actually traced into the corresponding card.
7526
// Here, we check whether advancing the finger would make
7527
// us cross into a new card, and if so clear corresponding
7528
// cards in the MUT (preclean them in the card-table in the
7529
// future).
7530
7531
DEBUG_ONLY(if (!_verifying) {)
7532
// The clean-on-enter optimization is disabled by default,
7533
// until we fix 6178663.
7534
if (CMSCleanOnEnter && (_finger > _threshold)) {
7535
// [_threshold, _finger) represents the interval
7536
// of cards to be cleared in MUT (or precleaned in card table).
7537
// The set of cards to be cleared is all those that overlap
7538
// with the interval [_threshold, _finger); note that
7539
// _threshold is always kept card-aligned but _finger isn't
7540
// always card-aligned.
7541
HeapWord* old_threshold = _threshold;
7542
assert(old_threshold == (HeapWord*)round_to(
7543
(intptr_t)old_threshold, CardTableModRefBS::card_size),
7544
"_threshold should always be card-aligned");
7545
_threshold = (HeapWord*)round_to(
7546
(intptr_t)_finger, CardTableModRefBS::card_size);
7547
MemRegion mr(old_threshold, _threshold);
7548
assert(!mr.is_empty(), "Control point invariant");
7549
assert(_span.contains(mr), "Should clear within span");
7550
_mut->clear_range(mr);
7551
}
7552
DEBUG_ONLY(})
7553
// Note: the finger doesn't advance while we drain
7554
// the stack below.
7555
PushOrMarkClosure pushOrMarkClosure(_collector,
7556
_span, _bitMap, _markStack,
7557
_finger, this);
7558
bool res = _markStack->push(obj);
7559
assert(res, "Empty non-zero size stack should have space for single push");
7560
while (!_markStack->isEmpty()) {
7561
oop new_oop = _markStack->pop();
7562
// Skip verifying header mark word below because we are
7563
// running concurrent with mutators.
7564
assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7565
// now scan this oop's oops
7566
new_oop->oop_iterate(&pushOrMarkClosure);
7567
do_yield_check();
7568
}
7569
assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7570
}
7571
7572
Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7573
CMSCollector* collector, MemRegion span,
7574
CMSBitMap* bit_map,
7575
OopTaskQueue* work_queue,
7576
CMSMarkStack* overflow_stack,
7577
bool should_yield):
7578
_collector(collector),
7579
_whole_span(collector->_span),
7580
_span(span),
7581
_bit_map(bit_map),
7582
_mut(&collector->_modUnionTable),
7583
_work_queue(work_queue),
7584
_overflow_stack(overflow_stack),
7585
_yield(should_yield),
7586
_skip_bits(0),
7587
_task(task)
7588
{
7589
assert(_work_queue->size() == 0, "work_queue should be empty");
7590
_finger = span.start();
7591
_threshold = _finger; // XXX Defer clear-on-enter optimization for now
7592
assert(_span.contains(_finger), "Out of bounds _finger?");
7593
}
7594
7595
// Should revisit to see if this should be restructured for
7596
// greater efficiency.
7597
bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7598
if (_skip_bits > 0) {
7599
_skip_bits--;
7600
return true;
7601
}
7602
// convert offset into a HeapWord*
7603
HeapWord* addr = _bit_map->startWord() + offset;
7604
assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7605
"address out of range");
7606
assert(_bit_map->isMarked(addr), "tautology");
7607
if (_bit_map->isMarked(addr+1)) {
7608
// this is an allocated object that might not yet be initialized
7609
assert(_skip_bits == 0, "tautology");
7610
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7611
oop p = oop(addr);
7612
if (p->klass_or_null_acquire() == NULL) {
7613
// in the case of Clean-on-Enter optimization, redirty card
7614
// and avoid clearing card by increasing the threshold.
7615
return true;
7616
}
7617
}
7618
scan_oops_in_oop(addr);
7619
return true;
7620
}
7621
7622
void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7623
assert(_bit_map->isMarked(ptr), "expected bit to be set");
7624
// Should we assert that our work queue is empty or
7625
// below some drain limit?
7626
assert(_work_queue->size() == 0,
7627
"should drain stack to limit stack usage");
7628
// convert ptr to an oop preparatory to scanning
7629
oop obj = oop(ptr);
7630
// Ignore mark word in verification below, since we
7631
// may be running concurrent with mutators.
7632
assert(obj->is_oop(true), "should be an oop");
7633
assert(_finger <= ptr, "_finger runneth ahead");
7634
// advance the finger to right end of this object
7635
_finger = ptr + obj->size();
7636
assert(_finger > ptr, "we just incremented it above");
7637
// On large heaps, it may take us some time to get through
7638
// the marking phase (especially if running iCMS). During
7639
// this time it's possible that a lot of mutations have
7640
// accumulated in the card table and the mod union table --
7641
// these mutation records are redundant until we have
7642
// actually traced into the corresponding card.
7643
// Here, we check whether advancing the finger would make
7644
// us cross into a new card, and if so clear corresponding
7645
// cards in the MUT (preclean them in the card-table in the
7646
// future).
7647
7648
// The clean-on-enter optimization is disabled by default,
7649
// until we fix 6178663.
7650
if (CMSCleanOnEnter && (_finger > _threshold)) {
7651
// [_threshold, _finger) represents the interval
7652
// of cards to be cleared in MUT (or precleaned in card table).
7653
// The set of cards to be cleared is all those that overlap
7654
// with the interval [_threshold, _finger); note that
7655
// _threshold is always kept card-aligned but _finger isn't
7656
// always card-aligned.
7657
HeapWord* old_threshold = _threshold;
7658
assert(old_threshold == (HeapWord*)round_to(
7659
(intptr_t)old_threshold, CardTableModRefBS::card_size),
7660
"_threshold should always be card-aligned");
7661
_threshold = (HeapWord*)round_to(
7662
(intptr_t)_finger, CardTableModRefBS::card_size);
7663
MemRegion mr(old_threshold, _threshold);
7664
assert(!mr.is_empty(), "Control point invariant");
7665
assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7666
_mut->clear_range(mr);
7667
}
7668
7669
// Note: the local finger doesn't advance while we drain
7670
// the stack below, but the global finger sure can and will.
7671
HeapWord** gfa = _task->global_finger_addr();
7672
Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7673
_span, _bit_map,
7674
_work_queue,
7675
_overflow_stack,
7676
_finger,
7677
gfa, this);
7678
bool res = _work_queue->push(obj); // overflow could occur here
7679
assert(res, "Will hold once we use workqueues");
7680
while (true) {
7681
oop new_oop;
7682
if (!_work_queue->pop_local(new_oop)) {
7683
// We emptied our work_queue; check if there's stuff that can
7684
// be gotten from the overflow stack.
7685
if (CMSConcMarkingTask::get_work_from_overflow_stack(
7686
_overflow_stack, _work_queue)) {
7687
do_yield_check();
7688
continue;
7689
} else { // done
7690
break;
7691
}
7692
}
7693
// Skip verifying header mark word below because we are
7694
// running concurrent with mutators.
7695
assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7696
// now scan this oop's oops
7697
new_oop->oop_iterate(&pushOrMarkClosure);
7698
do_yield_check();
7699
}
7700
assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7701
}
7702
7703
// Yield in response to a request from VM Thread or
7704
// from mutators.
7705
void Par_MarkFromRootsClosure::do_yield_work() {
7706
assert(_task != NULL, "sanity");
7707
_task->yield();
7708
}
7709
7710
// A variant of the above used for verifying CMS marking work.
7711
MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7712
MemRegion span,
7713
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7714
CMSMarkStack* mark_stack):
7715
_collector(collector),
7716
_span(span),
7717
_verification_bm(verification_bm),
7718
_cms_bm(cms_bm),
7719
_mark_stack(mark_stack),
7720
_pam_verify_closure(collector, span, verification_bm, cms_bm,
7721
mark_stack)
7722
{
7723
assert(_mark_stack->isEmpty(), "stack should be empty");
7724
_finger = _verification_bm->startWord();
7725
assert(_collector->_restart_addr == NULL, "Sanity check");
7726
assert(_span.contains(_finger), "Out of bounds _finger?");
7727
}
7728
7729
void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7730
assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7731
assert(_span.contains(addr), "Out of bounds _finger?");
7732
_finger = addr;
7733
}
7734
7735
// Should revisit to see if this should be restructured for
7736
// greater efficiency.
7737
bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7738
// convert offset into a HeapWord*
7739
HeapWord* addr = _verification_bm->startWord() + offset;
7740
assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7741
"address out of range");
7742
assert(_verification_bm->isMarked(addr), "tautology");
7743
assert(_cms_bm->isMarked(addr), "tautology");
7744
7745
assert(_mark_stack->isEmpty(),
7746
"should drain stack to limit stack usage");
7747
// convert addr to an oop preparatory to scanning
7748
oop obj = oop(addr);
7749
assert(obj->is_oop(), "should be an oop");
7750
assert(_finger <= addr, "_finger runneth ahead");
7751
// advance the finger to right end of this object
7752
_finger = addr + obj->size();
7753
assert(_finger > addr, "we just incremented it above");
7754
// Note: the finger doesn't advance while we drain
7755
// the stack below.
7756
bool res = _mark_stack->push(obj);
7757
assert(res, "Empty non-zero size stack should have space for single push");
7758
while (!_mark_stack->isEmpty()) {
7759
oop new_oop = _mark_stack->pop();
7760
assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7761
// now scan this oop's oops
7762
new_oop->oop_iterate(&_pam_verify_closure);
7763
}
7764
assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7765
return true;
7766
}
7767
7768
PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7769
CMSCollector* collector, MemRegion span,
7770
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7771
CMSMarkStack* mark_stack):
7772
MetadataAwareOopClosure(collector->ref_processor()),
7773
_collector(collector),
7774
_span(span),
7775
_verification_bm(verification_bm),
7776
_cms_bm(cms_bm),
7777
_mark_stack(mark_stack)
7778
{ }
7779
7780
void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7781
void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7782
7783
// Upon stack overflow, we discard (part of) the stack,
7784
// remembering the least address amongst those discarded
7785
// in CMSCollector's _restart_address.
7786
void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7787
// Remember the least grey address discarded
7788
HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7789
_collector->lower_restart_addr(ra);
7790
_mark_stack->reset(); // discard stack contents
7791
_mark_stack->expand(); // expand the stack if possible
7792
}
7793
7794
void PushAndMarkVerifyClosure::do_oop(oop obj) {
7795
assert(obj->is_oop_or_null(), "expected an oop or NULL");
7796
HeapWord* addr = (HeapWord*)obj;
7797
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7798
// Oop lies in _span and isn't yet grey or black
7799
_verification_bm->mark(addr); // now grey
7800
if (!_cms_bm->isMarked(addr)) {
7801
oop(addr)->print();
7802
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7803
addr);
7804
fatal("... aborting");
7805
}
7806
7807
if (!_mark_stack->push(obj)) { // stack overflow
7808
if (PrintCMSStatistics != 0) {
7809
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7810
SIZE_FORMAT, _mark_stack->capacity());
7811
}
7812
assert(_mark_stack->isFull(), "Else push should have succeeded");
7813
handle_stack_overflow(addr);
7814
}
7815
// anything including and to the right of _finger
7816
// will be scanned as we iterate over the remainder of the
7817
// bit map
7818
}
7819
}
7820
7821
PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7822
MemRegion span,
7823
CMSBitMap* bitMap, CMSMarkStack* markStack,
7824
HeapWord* finger, MarkFromRootsClosure* parent) :
7825
MetadataAwareOopClosure(collector->ref_processor()),
7826
_collector(collector),
7827
_span(span),
7828
_bitMap(bitMap),
7829
_markStack(markStack),
7830
_finger(finger),
7831
_parent(parent)
7832
{ }
7833
7834
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7835
MemRegion span,
7836
CMSBitMap* bit_map,
7837
OopTaskQueue* work_queue,
7838
CMSMarkStack* overflow_stack,
7839
HeapWord* finger,
7840
HeapWord** global_finger_addr,
7841
Par_MarkFromRootsClosure* parent) :
7842
MetadataAwareOopClosure(collector->ref_processor()),
7843
_collector(collector),
7844
_whole_span(collector->_span),
7845
_span(span),
7846
_bit_map(bit_map),
7847
_work_queue(work_queue),
7848
_overflow_stack(overflow_stack),
7849
_finger(finger),
7850
_global_finger_addr(global_finger_addr),
7851
_parent(parent)
7852
{ }
7853
7854
// Assumes thread-safe access by callers, who are
7855
// responsible for mutual exclusion.
7856
void CMSCollector::lower_restart_addr(HeapWord* low) {
7857
assert(_span.contains(low), "Out of bounds addr");
7858
if (_restart_addr == NULL) {
7859
_restart_addr = low;
7860
} else {
7861
_restart_addr = MIN2(_restart_addr, low);
7862
}
7863
}
7864
7865
// Upon stack overflow, we discard (part of) the stack,
7866
// remembering the least address amongst those discarded
7867
// in CMSCollector's _restart_address.
7868
void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7869
// Remember the least grey address discarded
7870
HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7871
_collector->lower_restart_addr(ra);
7872
_markStack->reset(); // discard stack contents
7873
_markStack->expand(); // expand the stack if possible
7874
}
7875
7876
// Upon stack overflow, we discard (part of) the stack,
7877
// remembering the least address amongst those discarded
7878
// in CMSCollector's _restart_address.
7879
void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7880
// We need to do this under a mutex to prevent other
7881
// workers from interfering with the work done below.
7882
MutexLockerEx ml(_overflow_stack->par_lock(),
7883
Mutex::_no_safepoint_check_flag);
7884
// Remember the least grey address discarded
7885
HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7886
_collector->lower_restart_addr(ra);
7887
_overflow_stack->reset(); // discard stack contents
7888
_overflow_stack->expand(); // expand the stack if possible
7889
}
7890
7891
void PushOrMarkClosure::do_oop(oop obj) {
7892
// Ignore mark word because we are running concurrent with mutators.
7893
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7894
HeapWord* addr = (HeapWord*)obj;
7895
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7896
// Oop lies in _span and isn't yet grey or black
7897
_bitMap->mark(addr); // now grey
7898
if (addr < _finger) {
7899
// the bit map iteration has already either passed, or
7900
// sampled, this bit in the bit map; we'll need to
7901
// use the marking stack to scan this oop's oops.
7902
bool simulate_overflow = false;
7903
NOT_PRODUCT(
7904
if (CMSMarkStackOverflowALot &&
7905
_collector->simulate_overflow()) {
7906
// simulate a stack overflow
7907
simulate_overflow = true;
7908
}
7909
)
7910
if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7911
if (PrintCMSStatistics != 0) {
7912
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7913
SIZE_FORMAT, _markStack->capacity());
7914
}
7915
assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7916
handle_stack_overflow(addr);
7917
}
7918
}
7919
// anything including and to the right of _finger
7920
// will be scanned as we iterate over the remainder of the
7921
// bit map
7922
do_yield_check();
7923
}
7924
}
7925
7926
void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7927
void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7928
7929
void Par_PushOrMarkClosure::do_oop(oop obj) {
7930
// Ignore mark word because we are running concurrent with mutators.
7931
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7932
HeapWord* addr = (HeapWord*)obj;
7933
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7934
// Oop lies in _span and isn't yet grey or black
7935
// We read the global_finger (volatile read) strictly after marking oop
7936
bool res = _bit_map->par_mark(addr); // now grey
7937
volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7938
// Should we push this marked oop on our stack?
7939
// -- if someone else marked it, nothing to do
7940
// -- if target oop is above global finger nothing to do
7941
// -- if target oop is in chunk and above local finger
7942
// then nothing to do
7943
// -- else push on work queue
7944
if ( !res // someone else marked it, they will deal with it
7945
|| (addr >= *gfa) // will be scanned in a later task
7946
|| (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7947
return;
7948
}
7949
// the bit map iteration has already either passed, or
7950
// sampled, this bit in the bit map; we'll need to
7951
// use the marking stack to scan this oop's oops.
7952
bool simulate_overflow = false;
7953
NOT_PRODUCT(
7954
if (CMSMarkStackOverflowALot &&
7955
_collector->simulate_overflow()) {
7956
// simulate a stack overflow
7957
simulate_overflow = true;
7958
}
7959
)
7960
if (simulate_overflow ||
7961
!(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7962
// stack overflow
7963
if (PrintCMSStatistics != 0) {
7964
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7965
SIZE_FORMAT, _overflow_stack->capacity());
7966
}
7967
// We cannot assert that the overflow stack is full because
7968
// it may have been emptied since.
7969
assert(simulate_overflow ||
7970
_work_queue->size() == _work_queue->max_elems(),
7971
"Else push should have succeeded");
7972
handle_stack_overflow(addr);
7973
}
7974
do_yield_check();
7975
}
7976
}
7977
7978
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7979
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7980
7981
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7982
MemRegion span,
7983
ReferenceProcessor* rp,
7984
CMSBitMap* bit_map,
7985
CMSBitMap* mod_union_table,
7986
CMSMarkStack* mark_stack,
7987
bool concurrent_precleaning):
7988
MetadataAwareOopClosure(rp),
7989
_collector(collector),
7990
_span(span),
7991
_bit_map(bit_map),
7992
_mod_union_table(mod_union_table),
7993
_mark_stack(mark_stack),
7994
_concurrent_precleaning(concurrent_precleaning)
7995
{
7996
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7997
}
7998
7999
// Grey object rescan during pre-cleaning and second checkpoint phases --
8000
// the non-parallel version (the parallel version appears further below.)
8001
void PushAndMarkClosure::do_oop(oop obj) {
8002
// Ignore mark word verification. If during concurrent precleaning,
8003
// the object monitor may be locked. If during the checkpoint
8004
// phases, the object may already have been reached by a different
8005
// path and may be at the end of the global overflow list (so
8006
// the mark word may be NULL).
8007
assert(obj->is_oop_or_null(true /* ignore mark word */),
8008
"expected an oop or NULL");
8009
HeapWord* addr = (HeapWord*)obj;
8010
// Check if oop points into the CMS generation
8011
// and is not marked
8012
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8013
// a white object ...
8014
_bit_map->mark(addr); // ... now grey
8015
// push on the marking stack (grey set)
8016
bool simulate_overflow = false;
8017
NOT_PRODUCT(
8018
if (CMSMarkStackOverflowALot &&
8019
_collector->simulate_overflow()) {
8020
// simulate a stack overflow
8021
simulate_overflow = true;
8022
}
8023
)
8024
if (simulate_overflow || !_mark_stack->push(obj)) {
8025
if (_concurrent_precleaning) {
8026
// During precleaning we can just dirty the appropriate card(s)
8027
// in the mod union table, thus ensuring that the object remains
8028
// in the grey set and continue. In the case of object arrays
8029
// we need to dirty all of the cards that the object spans,
8030
// since the rescan of object arrays will be limited to the
8031
// dirty cards.
8032
// Note that no one can be intefering with us in this action
8033
// of dirtying the mod union table, so no locking or atomics
8034
// are required.
8035
if (obj->is_objArray()) {
8036
size_t sz = obj->size();
8037
HeapWord* end_card_addr = (HeapWord*)round_to(
8038
(intptr_t)(addr+sz), CardTableModRefBS::card_size);
8039
MemRegion redirty_range = MemRegion(addr, end_card_addr);
8040
assert(!redirty_range.is_empty(), "Arithmetical tautology");
8041
_mod_union_table->mark_range(redirty_range);
8042
} else {
8043
_mod_union_table->mark(addr);
8044
}
8045
_collector->_ser_pmc_preclean_ovflw++;
8046
} else {
8047
// During the remark phase, we need to remember this oop
8048
// in the overflow list.
8049
_collector->push_on_overflow_list(obj);
8050
_collector->_ser_pmc_remark_ovflw++;
8051
}
8052
}
8053
}
8054
}
8055
8056
Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8057
MemRegion span,
8058
ReferenceProcessor* rp,
8059
CMSBitMap* bit_map,
8060
OopTaskQueue* work_queue):
8061
MetadataAwareOopClosure(rp),
8062
_collector(collector),
8063
_span(span),
8064
_bit_map(bit_map),
8065
_work_queue(work_queue)
8066
{
8067
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8068
}
8069
8070
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
8071
void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8072
8073
// Grey object rescan during second checkpoint phase --
8074
// the parallel version.
8075
void Par_PushAndMarkClosure::do_oop(oop obj) {
8076
// In the assert below, we ignore the mark word because
8077
// this oop may point to an already visited object that is
8078
// on the overflow stack (in which case the mark word has
8079
// been hijacked for chaining into the overflow stack --
8080
// if this is the last object in the overflow stack then
8081
// its mark word will be NULL). Because this object may
8082
// have been subsequently popped off the global overflow
8083
// stack, and the mark word possibly restored to the prototypical
8084
// value, by the time we get to examined this failing assert in
8085
// the debugger, is_oop_or_null(false) may subsequently start
8086
// to hold.
8087
assert(obj->is_oop_or_null(true),
8088
"expected an oop or NULL");
8089
HeapWord* addr = (HeapWord*)obj;
8090
// Check if oop points into the CMS generation
8091
// and is not marked
8092
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8093
// a white object ...
8094
// If we manage to "claim" the object, by being the
8095
// first thread to mark it, then we push it on our
8096
// marking stack
8097
if (_bit_map->par_mark(addr)) { // ... now grey
8098
// push on work queue (grey set)
8099
bool simulate_overflow = false;
8100
NOT_PRODUCT(
8101
if (CMSMarkStackOverflowALot &&
8102
_collector->par_simulate_overflow()) {
8103
// simulate a stack overflow
8104
simulate_overflow = true;
8105
}
8106
)
8107
if (simulate_overflow || !_work_queue->push(obj)) {
8108
_collector->par_push_on_overflow_list(obj);
8109
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
8110
}
8111
} // Else, some other thread got there first
8112
}
8113
}
8114
8115
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8116
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8117
8118
void CMSPrecleanRefsYieldClosure::do_yield_work() {
8119
Mutex* bml = _collector->bitMapLock();
8120
assert_lock_strong(bml);
8121
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8122
"CMS thread should hold CMS token");
8123
8124
bml->unlock();
8125
ConcurrentMarkSweepThread::desynchronize(true);
8126
8127
ConcurrentMarkSweepThread::acknowledge_yield_request();
8128
8129
_collector->stopTimer();
8130
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8131
if (PrintCMSStatistics != 0) {
8132
_collector->incrementYields();
8133
}
8134
_collector->icms_wait();
8135
8136
// See the comment in coordinator_yield()
8137
for (unsigned i = 0; i < CMSYieldSleepCount &&
8138
ConcurrentMarkSweepThread::should_yield() &&
8139
!CMSCollector::foregroundGCIsActive(); ++i) {
8140
os::sleep(Thread::current(), 1, false);
8141
ConcurrentMarkSweepThread::acknowledge_yield_request();
8142
}
8143
8144
ConcurrentMarkSweepThread::synchronize(true);
8145
bml->lock();
8146
8147
_collector->startTimer();
8148
}
8149
8150
bool CMSPrecleanRefsYieldClosure::should_return() {
8151
if (ConcurrentMarkSweepThread::should_yield()) {
8152
do_yield_work();
8153
}
8154
return _collector->foregroundGCIsActive();
8155
}
8156
8157
void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8158
assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8159
"mr should be aligned to start at a card boundary");
8160
// We'd like to assert:
8161
// assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8162
// "mr should be a range of cards");
8163
// However, that would be too strong in one case -- the last
8164
// partition ends at _unallocated_block which, in general, can be
8165
// an arbitrary boundary, not necessarily card aligned.
8166
if (PrintCMSStatistics != 0) {
8167
_num_dirty_cards +=
8168
mr.word_size()/CardTableModRefBS::card_size_in_words;
8169
}
8170
_space->object_iterate_mem(mr, &_scan_cl);
8171
}
8172
8173
SweepClosure::SweepClosure(CMSCollector* collector,
8174
ConcurrentMarkSweepGeneration* g,
8175
CMSBitMap* bitMap, bool should_yield) :
8176
_collector(collector),
8177
_g(g),
8178
_sp(g->cmsSpace()),
8179
_limit(_sp->sweep_limit()),
8180
_freelistLock(_sp->freelistLock()),
8181
_bitMap(bitMap),
8182
_yield(should_yield),
8183
_inFreeRange(false), // No free range at beginning of sweep
8184
_freeRangeInFreeLists(false), // No free range at beginning of sweep
8185
_lastFreeRangeCoalesced(false),
8186
_freeFinger(g->used_region().start())
8187
{
8188
NOT_PRODUCT(
8189
_numObjectsFreed = 0;
8190
_numWordsFreed = 0;
8191
_numObjectsLive = 0;
8192
_numWordsLive = 0;
8193
_numObjectsAlreadyFree = 0;
8194
_numWordsAlreadyFree = 0;
8195
_last_fc = NULL;
8196
8197
_sp->initializeIndexedFreeListArrayReturnedBytes();
8198
_sp->dictionary()->initialize_dict_returned_bytes();
8199
)
8200
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8201
"sweep _limit out of bounds");
8202
if (CMSTraceSweeper) {
8203
gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8204
_limit);
8205
}
8206
}
8207
8208
void SweepClosure::print_on(outputStream* st) const {
8209
tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8210
_sp->bottom(), _sp->end());
8211
tty->print_cr("_limit = " PTR_FORMAT, _limit);
8212
tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8213
NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8214
tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8215
_inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8216
}
8217
8218
#ifndef PRODUCT
8219
// Assertion checking only: no useful work in product mode --
8220
// however, if any of the flags below become product flags,
8221
// you may need to review this code to see if it needs to be
8222
// enabled in product mode.
8223
SweepClosure::~SweepClosure() {
8224
assert_lock_strong(_freelistLock);
8225
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8226
"sweep _limit out of bounds");
8227
if (inFreeRange()) {
8228
warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8229
print();
8230
ShouldNotReachHere();
8231
}
8232
if (Verbose && PrintGC) {
8233
gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
8234
_numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8235
gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects, "
8236
SIZE_FORMAT " bytes "
8237
"Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
8238
_numObjectsLive, _numWordsLive*sizeof(HeapWord),
8239
_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8240
size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8241
* sizeof(HeapWord);
8242
gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
8243
8244
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8245
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8246
size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8247
size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8248
gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes);
8249
gclog_or_tty->print(" Indexed List Returned " SIZE_FORMAT " bytes",
8250
indexListReturnedBytes);
8251
gclog_or_tty->print_cr(" Dictionary Returned " SIZE_FORMAT " bytes",
8252
dict_returned_bytes);
8253
}
8254
}
8255
if (CMSTraceSweeper) {
8256
gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8257
_limit);
8258
}
8259
}
8260
#endif // PRODUCT
8261
8262
void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8263
bool freeRangeInFreeLists) {
8264
if (CMSTraceSweeper) {
8265
gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8266
freeFinger, freeRangeInFreeLists);
8267
}
8268
assert(!inFreeRange(), "Trampling existing free range");
8269
set_inFreeRange(true);
8270
set_lastFreeRangeCoalesced(false);
8271
8272
set_freeFinger(freeFinger);
8273
set_freeRangeInFreeLists(freeRangeInFreeLists);
8274
if (CMSTestInFreeList) {
8275
if (freeRangeInFreeLists) {
8276
FreeChunk* fc = (FreeChunk*) freeFinger;
8277
assert(fc->is_free(), "A chunk on the free list should be free.");
8278
assert(fc->size() > 0, "Free range should have a size");
8279
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8280
}
8281
}
8282
}
8283
8284
// Note that the sweeper runs concurrently with mutators. Thus,
8285
// it is possible for direct allocation in this generation to happen
8286
// in the middle of the sweep. Note that the sweeper also coalesces
8287
// contiguous free blocks. Thus, unless the sweeper and the allocator
8288
// synchronize appropriately freshly allocated blocks may get swept up.
8289
// This is accomplished by the sweeper locking the free lists while
8290
// it is sweeping. Thus blocks that are determined to be free are
8291
// indeed free. There is however one additional complication:
8292
// blocks that have been allocated since the final checkpoint and
8293
// mark, will not have been marked and so would be treated as
8294
// unreachable and swept up. To prevent this, the allocator marks
8295
// the bit map when allocating during the sweep phase. This leads,
8296
// however, to a further complication -- objects may have been allocated
8297
// but not yet initialized -- in the sense that the header isn't yet
8298
// installed. The sweeper can not then determine the size of the block
8299
// in order to skip over it. To deal with this case, we use a technique
8300
// (due to Printezis) to encode such uninitialized block sizes in the
8301
// bit map. Since the bit map uses a bit per every HeapWord, but the
8302
// CMS generation has a minimum object size of 3 HeapWords, it follows
8303
// that "normal marks" won't be adjacent in the bit map (there will
8304
// always be at least two 0 bits between successive 1 bits). We make use
8305
// of these "unused" bits to represent uninitialized blocks -- the bit
8306
// corresponding to the start of the uninitialized object and the next
8307
// bit are both set. Finally, a 1 bit marks the end of the object that
8308
// started with the two consecutive 1 bits to indicate its potentially
8309
// uninitialized state.
8310
8311
size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8312
FreeChunk* fc = (FreeChunk*)addr;
8313
size_t res;
8314
8315
// Check if we are done sweeping. Below we check "addr >= _limit" rather
8316
// than "addr == _limit" because although _limit was a block boundary when
8317
// we started the sweep, it may no longer be one because heap expansion
8318
// may have caused us to coalesce the block ending at the address _limit
8319
// with a newly expanded chunk (this happens when _limit was set to the
8320
// previous _end of the space), so we may have stepped past _limit:
8321
// see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8322
if (addr >= _limit) { // we have swept up to or past the limit: finish up
8323
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8324
"sweep _limit out of bounds");
8325
assert(addr < _sp->end(), "addr out of bounds");
8326
// Flush any free range we might be holding as a single
8327
// coalesced chunk to the appropriate free list.
8328
if (inFreeRange()) {
8329
assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8330
err_msg("freeFinger() " PTR_FORMAT " is out-of-bounds", freeFinger()));
8331
flush_cur_free_chunk(freeFinger(),
8332
pointer_delta(addr, freeFinger()));
8333
if (CMSTraceSweeper) {
8334
gclog_or_tty->print("Sweep: last chunk: ");
8335
gclog_or_tty->print("put_free_blk 0x%x (" SIZE_FORMAT ") "
8336
"[coalesced:" SIZE_FORMAT "]\n",
8337
freeFinger(), pointer_delta(addr, freeFinger()),
8338
lastFreeRangeCoalesced());
8339
}
8340
}
8341
8342
// help the iterator loop finish
8343
return pointer_delta(_sp->end(), addr);
8344
}
8345
8346
assert(addr < _limit, "sweep invariant");
8347
// check if we should yield
8348
do_yield_check(addr);
8349
if (fc->is_free()) {
8350
// Chunk that is already free
8351
res = fc->size();
8352
do_already_free_chunk(fc);
8353
debug_only(_sp->verifyFreeLists());
8354
// If we flush the chunk at hand in lookahead_and_flush()
8355
// and it's coalesced with a preceding chunk, then the
8356
// process of "mangling" the payload of the coalesced block
8357
// will cause erasure of the size information from the
8358
// (erstwhile) header of all the coalesced blocks but the
8359
// first, so the first disjunct in the assert will not hold
8360
// in that specific case (in which case the second disjunct
8361
// will hold).
8362
assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8363
"Otherwise the size info doesn't change at this step");
8364
NOT_PRODUCT(
8365
_numObjectsAlreadyFree++;
8366
_numWordsAlreadyFree += res;
8367
)
8368
NOT_PRODUCT(_last_fc = fc;)
8369
} else if (!_bitMap->isMarked(addr)) {
8370
// Chunk is fresh garbage
8371
res = do_garbage_chunk(fc);
8372
debug_only(_sp->verifyFreeLists());
8373
NOT_PRODUCT(
8374
_numObjectsFreed++;
8375
_numWordsFreed += res;
8376
)
8377
} else {
8378
// Chunk that is alive.
8379
res = do_live_chunk(fc);
8380
debug_only(_sp->verifyFreeLists());
8381
NOT_PRODUCT(
8382
_numObjectsLive++;
8383
_numWordsLive += res;
8384
)
8385
}
8386
return res;
8387
}
8388
8389
// For the smart allocation, record following
8390
// split deaths - a free chunk is removed from its free list because
8391
// it is being split into two or more chunks.
8392
// split birth - a free chunk is being added to its free list because
8393
// a larger free chunk has been split and resulted in this free chunk.
8394
// coal death - a free chunk is being removed from its free list because
8395
// it is being coalesced into a large free chunk.
8396
// coal birth - a free chunk is being added to its free list because
8397
// it was created when two or more free chunks where coalesced into
8398
// this free chunk.
8399
//
8400
// These statistics are used to determine the desired number of free
8401
// chunks of a given size. The desired number is chosen to be relative
8402
// to the end of a CMS sweep. The desired number at the end of a sweep
8403
// is the
8404
// count-at-end-of-previous-sweep (an amount that was enough)
8405
// - count-at-beginning-of-current-sweep (the excess)
8406
// + split-births (gains in this size during interval)
8407
// - split-deaths (demands on this size during interval)
8408
// where the interval is from the end of one sweep to the end of the
8409
// next.
8410
//
8411
// When sweeping the sweeper maintains an accumulated chunk which is
8412
// the chunk that is made up of chunks that have been coalesced. That
8413
// will be termed the left-hand chunk. A new chunk of garbage that
8414
// is being considered for coalescing will be referred to as the
8415
// right-hand chunk.
8416
//
8417
// When making a decision on whether to coalesce a right-hand chunk with
8418
// the current left-hand chunk, the current count vs. the desired count
8419
// of the left-hand chunk is considered. Also if the right-hand chunk
8420
// is near the large chunk at the end of the heap (see
8421
// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8422
// left-hand chunk is coalesced.
8423
//
8424
// When making a decision about whether to split a chunk, the desired count
8425
// vs. the current count of the candidate to be split is also considered.
8426
// If the candidate is underpopulated (currently fewer chunks than desired)
8427
// a chunk of an overpopulated (currently more chunks than desired) size may
8428
// be chosen. The "hint" associated with a free list, if non-null, points
8429
// to a free list which may be overpopulated.
8430
//
8431
8432
void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8433
const size_t size = fc->size();
8434
// Chunks that cannot be coalesced are not in the
8435
// free lists.
8436
if (CMSTestInFreeList && !fc->cantCoalesce()) {
8437
assert(_sp->verify_chunk_in_free_list(fc),
8438
"free chunk should be in free lists");
8439
}
8440
// a chunk that is already free, should not have been
8441
// marked in the bit map
8442
HeapWord* const addr = (HeapWord*) fc;
8443
assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8444
// Verify that the bit map has no bits marked between
8445
// addr and purported end of this block.
8446
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8447
8448
// Some chunks cannot be coalesced under any circumstances.
8449
// See the definition of cantCoalesce().
8450
if (!fc->cantCoalesce()) {
8451
// This chunk can potentially be coalesced.
8452
if (_sp->adaptive_freelists()) {
8453
// All the work is done in
8454
do_post_free_or_garbage_chunk(fc, size);
8455
} else { // Not adaptive free lists
8456
// this is a free chunk that can potentially be coalesced by the sweeper;
8457
if (!inFreeRange()) {
8458
// if the next chunk is a free block that can't be coalesced
8459
// it doesn't make sense to remove this chunk from the free lists
8460
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8461
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8462
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
8463
nextChunk->is_free() && // ... which is free...
8464
nextChunk->cantCoalesce()) { // ... but can't be coalesced
8465
// nothing to do
8466
} else {
8467
// Potentially the start of a new free range:
8468
// Don't eagerly remove it from the free lists.
8469
// No need to remove it if it will just be put
8470
// back again. (Also from a pragmatic point of view
8471
// if it is a free block in a region that is beyond
8472
// any allocated blocks, an assertion will fail)
8473
// Remember the start of a free run.
8474
initialize_free_range(addr, true);
8475
// end - can coalesce with next chunk
8476
}
8477
} else {
8478
// the midst of a free range, we are coalescing
8479
print_free_block_coalesced(fc);
8480
if (CMSTraceSweeper) {
8481
gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8482
}
8483
// remove it from the free lists
8484
_sp->removeFreeChunkFromFreeLists(fc);
8485
set_lastFreeRangeCoalesced(true);
8486
// If the chunk is being coalesced and the current free range is
8487
// in the free lists, remove the current free range so that it
8488
// will be returned to the free lists in its entirety - all
8489
// the coalesced pieces included.
8490
if (freeRangeInFreeLists()) {
8491
FreeChunk* ffc = (FreeChunk*) freeFinger();
8492
assert(ffc->size() == pointer_delta(addr, freeFinger()),
8493
"Size of free range is inconsistent with chunk size.");
8494
if (CMSTestInFreeList) {
8495
assert(_sp->verify_chunk_in_free_list(ffc),
8496
"free range is not in free lists");
8497
}
8498
_sp->removeFreeChunkFromFreeLists(ffc);
8499
set_freeRangeInFreeLists(false);
8500
}
8501
}
8502
}
8503
// Note that if the chunk is not coalescable (the else arm
8504
// below), we unconditionally flush, without needing to do
8505
// a "lookahead," as we do below.
8506
if (inFreeRange()) lookahead_and_flush(fc, size);
8507
} else {
8508
// Code path common to both original and adaptive free lists.
8509
8510
// cant coalesce with previous block; this should be treated
8511
// as the end of a free run if any
8512
if (inFreeRange()) {
8513
// we kicked some butt; time to pick up the garbage
8514
assert(freeFinger() < addr, "freeFinger points too high");
8515
flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8516
}
8517
// else, nothing to do, just continue
8518
}
8519
}
8520
8521
size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8522
// This is a chunk of garbage. It is not in any free list.
8523
// Add it to a free list or let it possibly be coalesced into
8524
// a larger chunk.
8525
HeapWord* const addr = (HeapWord*) fc;
8526
const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8527
8528
if (_sp->adaptive_freelists()) {
8529
// Verify that the bit map has no bits marked between
8530
// addr and purported end of just dead object.
8531
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8532
8533
do_post_free_or_garbage_chunk(fc, size);
8534
} else {
8535
if (!inFreeRange()) {
8536
// start of a new free range
8537
assert(size > 0, "A free range should have a size");
8538
initialize_free_range(addr, false);
8539
} else {
8540
// this will be swept up when we hit the end of the
8541
// free range
8542
if (CMSTraceSweeper) {
8543
gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8544
}
8545
// If the chunk is being coalesced and the current free range is
8546
// in the free lists, remove the current free range so that it
8547
// will be returned to the free lists in its entirety - all
8548
// the coalesced pieces included.
8549
if (freeRangeInFreeLists()) {
8550
FreeChunk* ffc = (FreeChunk*)freeFinger();
8551
assert(ffc->size() == pointer_delta(addr, freeFinger()),
8552
"Size of free range is inconsistent with chunk size.");
8553
if (CMSTestInFreeList) {
8554
assert(_sp->verify_chunk_in_free_list(ffc),
8555
"free range is not in free lists");
8556
}
8557
_sp->removeFreeChunkFromFreeLists(ffc);
8558
set_freeRangeInFreeLists(false);
8559
}
8560
set_lastFreeRangeCoalesced(true);
8561
}
8562
// this will be swept up when we hit the end of the free range
8563
8564
// Verify that the bit map has no bits marked between
8565
// addr and purported end of just dead object.
8566
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8567
}
8568
assert(_limit >= addr + size,
8569
"A freshly garbage chunk can't possibly straddle over _limit");
8570
if (inFreeRange()) lookahead_and_flush(fc, size);
8571
return size;
8572
}
8573
8574
size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8575
HeapWord* addr = (HeapWord*) fc;
8576
// The sweeper has just found a live object. Return any accumulated
8577
// left hand chunk to the free lists.
8578
if (inFreeRange()) {
8579
assert(freeFinger() < addr, "freeFinger points too high");
8580
flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8581
}
8582
8583
// This object is live: we'd normally expect this to be
8584
// an oop, and like to assert the following:
8585
// assert(oop(addr)->is_oop(), "live block should be an oop");
8586
// However, as we commented above, this may be an object whose
8587
// header hasn't yet been initialized.
8588
size_t size;
8589
assert(_bitMap->isMarked(addr), "Tautology for this control point");
8590
if (_bitMap->isMarked(addr + 1)) {
8591
// Determine the size from the bit map, rather than trying to
8592
// compute it from the object header.
8593
HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8594
size = pointer_delta(nextOneAddr + 1, addr);
8595
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8596
"alignment problem");
8597
8598
#ifdef ASSERT
8599
if (oop(addr)->klass_or_null_acquire() != NULL) {
8600
// Ignore mark word because we are running concurrent with mutators
8601
assert(oop(addr)->is_oop(true), "live block should be an oop");
8602
assert(size ==
8603
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8604
"P-mark and computed size do not agree");
8605
}
8606
#endif
8607
8608
} else {
8609
// This should be an initialized object that's alive.
8610
assert(oop(addr)->klass_or_null_acquire() != NULL,
8611
"Should be an initialized object");
8612
// Ignore mark word because we are running concurrent with mutators
8613
assert(oop(addr)->is_oop(true), "live block should be an oop");
8614
// Verify that the bit map has no bits marked between
8615
// addr and purported end of this block.
8616
size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8617
assert(size >= 3, "Necessary for Printezis marks to work");
8618
assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8619
DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8620
}
8621
return size;
8622
}
8623
8624
void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8625
size_t chunkSize) {
8626
// do_post_free_or_garbage_chunk() should only be called in the case
8627
// of the adaptive free list allocator.
8628
const bool fcInFreeLists = fc->is_free();
8629
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8630
assert((HeapWord*)fc <= _limit, "sweep invariant");
8631
if (CMSTestInFreeList && fcInFreeLists) {
8632
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8633
}
8634
8635
if (CMSTraceSweeper) {
8636
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8637
}
8638
8639
HeapWord* const fc_addr = (HeapWord*) fc;
8640
8641
bool coalesce = false;
8642
const size_t left = pointer_delta(fc_addr, freeFinger());
8643
const size_t right = chunkSize;
8644
switch (FLSCoalescePolicy) {
8645
// numeric value forms a coalition aggressiveness metric
8646
case 0: { // never coalesce
8647
coalesce = false;
8648
break;
8649
}
8650
case 1: { // coalesce if left & right chunks on overpopulated lists
8651
coalesce = _sp->coalOverPopulated(left) &&
8652
_sp->coalOverPopulated(right);
8653
break;
8654
}
8655
case 2: { // coalesce if left chunk on overpopulated list (default)
8656
coalesce = _sp->coalOverPopulated(left);
8657
break;
8658
}
8659
case 3: { // coalesce if left OR right chunk on overpopulated list
8660
coalesce = _sp->coalOverPopulated(left) ||
8661
_sp->coalOverPopulated(right);
8662
break;
8663
}
8664
case 4: { // always coalesce
8665
coalesce = true;
8666
break;
8667
}
8668
default:
8669
ShouldNotReachHere();
8670
}
8671
8672
// Should the current free range be coalesced?
8673
// If the chunk is in a free range and either we decided to coalesce above
8674
// or the chunk is near the large block at the end of the heap
8675
// (isNearLargestChunk() returns true), then coalesce this chunk.
8676
const bool doCoalesce = inFreeRange()
8677
&& (coalesce || _g->isNearLargestChunk(fc_addr));
8678
if (doCoalesce) {
8679
// Coalesce the current free range on the left with the new
8680
// chunk on the right. If either is on a free list,
8681
// it must be removed from the list and stashed in the closure.
8682
if (freeRangeInFreeLists()) {
8683
FreeChunk* const ffc = (FreeChunk*)freeFinger();
8684
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8685
"Size of free range is inconsistent with chunk size.");
8686
if (CMSTestInFreeList) {
8687
assert(_sp->verify_chunk_in_free_list(ffc),
8688
"Chunk is not in free lists");
8689
}
8690
_sp->coalDeath(ffc->size());
8691
_sp->removeFreeChunkFromFreeLists(ffc);
8692
set_freeRangeInFreeLists(false);
8693
}
8694
if (fcInFreeLists) {
8695
_sp->coalDeath(chunkSize);
8696
assert(fc->size() == chunkSize,
8697
"The chunk has the wrong size or is not in the free lists");
8698
_sp->removeFreeChunkFromFreeLists(fc);
8699
}
8700
set_lastFreeRangeCoalesced(true);
8701
print_free_block_coalesced(fc);
8702
} else { // not in a free range and/or should not coalesce
8703
// Return the current free range and start a new one.
8704
if (inFreeRange()) {
8705
// In a free range but cannot coalesce with the right hand chunk.
8706
// Put the current free range into the free lists.
8707
flush_cur_free_chunk(freeFinger(),
8708
pointer_delta(fc_addr, freeFinger()));
8709
}
8710
// Set up for new free range. Pass along whether the right hand
8711
// chunk is in the free lists.
8712
initialize_free_range((HeapWord*)fc, fcInFreeLists);
8713
}
8714
}
8715
8716
// Lookahead flush:
8717
// If we are tracking a free range, and this is the last chunk that
8718
// we'll look at because its end crosses past _limit, we'll preemptively
8719
// flush it along with any free range we may be holding on to. Note that
8720
// this can be the case only for an already free or freshly garbage
8721
// chunk. If this block is an object, it can never straddle
8722
// over _limit. The "straddling" occurs when _limit is set at
8723
// the previous end of the space when this cycle started, and
8724
// a subsequent heap expansion caused the previously co-terminal
8725
// free block to be coalesced with the newly expanded portion,
8726
// thus rendering _limit a non-block-boundary making it dangerous
8727
// for the sweeper to step over and examine.
8728
void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8729
assert(inFreeRange(), "Should only be called if currently in a free range.");
8730
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8731
assert(_sp->used_region().contains(eob - 1),
8732
err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
8733
" out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8734
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8735
eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8736
if (eob >= _limit) {
8737
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8738
if (CMSTraceSweeper) {
8739
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8740
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
8741
"[" PTR_FORMAT "," PTR_FORMAT ")",
8742
_limit, fc, eob, _sp->bottom(), _sp->end());
8743
}
8744
// Return the storage we are tracking back into the free lists.
8745
if (CMSTraceSweeper) {
8746
gclog_or_tty->print_cr("Flushing ... ");
8747
}
8748
assert(freeFinger() < eob, "Error");
8749
flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8750
}
8751
}
8752
8753
void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8754
assert(inFreeRange(), "Should only be called if currently in a free range.");
8755
assert(size > 0,
8756
"A zero sized chunk cannot be added to the free lists.");
8757
if (!freeRangeInFreeLists()) {
8758
if (CMSTestInFreeList) {
8759
FreeChunk* fc = (FreeChunk*) chunk;
8760
fc->set_size(size);
8761
assert(!_sp->verify_chunk_in_free_list(fc),
8762
"chunk should not be in free lists yet");
8763
}
8764
if (CMSTraceSweeper) {
8765
gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8766
chunk, size);
8767
}
8768
// A new free range is going to be starting. The current
8769
// free range has not been added to the free lists yet or
8770
// was removed so add it back.
8771
// If the current free range was coalesced, then the death
8772
// of the free range was recorded. Record a birth now.
8773
if (lastFreeRangeCoalesced()) {
8774
_sp->coalBirth(size);
8775
}
8776
_sp->addChunkAndRepairOffsetTable(chunk, size,
8777
lastFreeRangeCoalesced());
8778
} else if (CMSTraceSweeper) {
8779
gclog_or_tty->print_cr("Already in free list: nothing to flush");
8780
}
8781
set_inFreeRange(false);
8782
set_freeRangeInFreeLists(false);
8783
}
8784
8785
// We take a break if we've been at this for a while,
8786
// so as to avoid monopolizing the locks involved.
8787
void SweepClosure::do_yield_work(HeapWord* addr) {
8788
// Return current free chunk being used for coalescing (if any)
8789
// to the appropriate freelist. After yielding, the next
8790
// free block encountered will start a coalescing range of
8791
// free blocks. If the next free block is adjacent to the
8792
// chunk just flushed, they will need to wait for the next
8793
// sweep to be coalesced.
8794
if (inFreeRange()) {
8795
flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8796
}
8797
8798
// First give up the locks, then yield, then re-lock.
8799
// We should probably use a constructor/destructor idiom to
8800
// do this unlock/lock or modify the MutexUnlocker class to
8801
// serve our purpose. XXX
8802
assert_lock_strong(_bitMap->lock());
8803
assert_lock_strong(_freelistLock);
8804
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8805
"CMS thread should hold CMS token");
8806
_bitMap->lock()->unlock();
8807
_freelistLock->unlock();
8808
ConcurrentMarkSweepThread::desynchronize(true);
8809
ConcurrentMarkSweepThread::acknowledge_yield_request();
8810
_collector->stopTimer();
8811
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8812
if (PrintCMSStatistics != 0) {
8813
_collector->incrementYields();
8814
}
8815
_collector->icms_wait();
8816
8817
// See the comment in coordinator_yield()
8818
for (unsigned i = 0; i < CMSYieldSleepCount &&
8819
ConcurrentMarkSweepThread::should_yield() &&
8820
!CMSCollector::foregroundGCIsActive(); ++i) {
8821
os::sleep(Thread::current(), 1, false);
8822
ConcurrentMarkSweepThread::acknowledge_yield_request();
8823
}
8824
8825
ConcurrentMarkSweepThread::synchronize(true);
8826
_freelistLock->lock();
8827
_bitMap->lock()->lock_without_safepoint_check();
8828
_collector->startTimer();
8829
}
8830
8831
#ifndef PRODUCT
8832
// This is actually very useful in a product build if it can
8833
// be called from the debugger. Compile it into the product
8834
// as needed.
8835
bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8836
return debug_cms_space->verify_chunk_in_free_list(fc);
8837
}
8838
#endif
8839
8840
void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8841
if (CMSTraceSweeper) {
8842
gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8843
fc, fc->size());
8844
}
8845
}
8846
8847
// CMSIsAliveClosure
8848
bool CMSIsAliveClosure::do_object_b(oop obj) {
8849
HeapWord* addr = (HeapWord*)obj;
8850
return addr != NULL &&
8851
(!_span.contains(addr) || _bit_map->isMarked(addr));
8852
}
8853
8854
8855
CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8856
MemRegion span,
8857
CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8858
bool cpc):
8859
_collector(collector),
8860
_span(span),
8861
_bit_map(bit_map),
8862
_mark_stack(mark_stack),
8863
_concurrent_precleaning(cpc) {
8864
assert(!_span.is_empty(), "Empty span could spell trouble");
8865
}
8866
8867
8868
// CMSKeepAliveClosure: the serial version
8869
void CMSKeepAliveClosure::do_oop(oop obj) {
8870
HeapWord* addr = (HeapWord*)obj;
8871
if (_span.contains(addr) &&
8872
!_bit_map->isMarked(addr)) {
8873
_bit_map->mark(addr);
8874
bool simulate_overflow = false;
8875
NOT_PRODUCT(
8876
if (CMSMarkStackOverflowALot &&
8877
_collector->simulate_overflow()) {
8878
// simulate a stack overflow
8879
simulate_overflow = true;
8880
}
8881
)
8882
if (simulate_overflow || !_mark_stack->push(obj)) {
8883
if (_concurrent_precleaning) {
8884
// We dirty the overflown object and let the remark
8885
// phase deal with it.
8886
assert(_collector->overflow_list_is_empty(), "Error");
8887
// In the case of object arrays, we need to dirty all of
8888
// the cards that the object spans. No locking or atomics
8889
// are needed since no one else can be mutating the mod union
8890
// table.
8891
if (obj->is_objArray()) {
8892
size_t sz = obj->size();
8893
HeapWord* end_card_addr =
8894
(HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8895
MemRegion redirty_range = MemRegion(addr, end_card_addr);
8896
assert(!redirty_range.is_empty(), "Arithmetical tautology");
8897
_collector->_modUnionTable.mark_range(redirty_range);
8898
} else {
8899
_collector->_modUnionTable.mark(addr);
8900
}
8901
_collector->_ser_kac_preclean_ovflw++;
8902
} else {
8903
_collector->push_on_overflow_list(obj);
8904
_collector->_ser_kac_ovflw++;
8905
}
8906
}
8907
}
8908
}
8909
8910
void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8911
void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8912
8913
// CMSParKeepAliveClosure: a parallel version of the above.
8914
// The work queues are private to each closure (thread),
8915
// but (may be) available for stealing by other threads.
8916
void CMSParKeepAliveClosure::do_oop(oop obj) {
8917
HeapWord* addr = (HeapWord*)obj;
8918
if (_span.contains(addr) &&
8919
!_bit_map->isMarked(addr)) {
8920
// In general, during recursive tracing, several threads
8921
// may be concurrently getting here; the first one to
8922
// "tag" it, claims it.
8923
if (_bit_map->par_mark(addr)) {
8924
bool res = _work_queue->push(obj);
8925
assert(res, "Low water mark should be much less than capacity");
8926
// Do a recursive trim in the hope that this will keep
8927
// stack usage lower, but leave some oops for potential stealers
8928
trim_queue(_low_water_mark);
8929
} // Else, another thread got there first
8930
}
8931
}
8932
8933
void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8934
void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8935
8936
void CMSParKeepAliveClosure::trim_queue(uint max) {
8937
while (_work_queue->size() > max) {
8938
oop new_oop;
8939
if (_work_queue->pop_local(new_oop)) {
8940
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8941
assert(_bit_map->isMarked((HeapWord*)new_oop),
8942
"no white objects on this stack!");
8943
assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8944
// iterate over the oops in this oop, marking and pushing
8945
// the ones in CMS heap (i.e. in _span).
8946
new_oop->oop_iterate(&_mark_and_push);
8947
}
8948
}
8949
}
8950
8951
CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8952
CMSCollector* collector,
8953
MemRegion span, CMSBitMap* bit_map,
8954
OopTaskQueue* work_queue):
8955
_collector(collector),
8956
_span(span),
8957
_bit_map(bit_map),
8958
_work_queue(work_queue) { }
8959
8960
void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8961
HeapWord* addr = (HeapWord*)obj;
8962
if (_span.contains(addr) &&
8963
!_bit_map->isMarked(addr)) {
8964
if (_bit_map->par_mark(addr)) {
8965
bool simulate_overflow = false;
8966
NOT_PRODUCT(
8967
if (CMSMarkStackOverflowALot &&
8968
_collector->par_simulate_overflow()) {
8969
// simulate a stack overflow
8970
simulate_overflow = true;
8971
}
8972
)
8973
if (simulate_overflow || !_work_queue->push(obj)) {
8974
_collector->par_push_on_overflow_list(obj);
8975
_collector->_par_kac_ovflw++;
8976
}
8977
} // Else another thread got there already
8978
}
8979
}
8980
8981
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8982
void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8983
8984
//////////////////////////////////////////////////////////////////
8985
// CMSExpansionCause /////////////////////////////
8986
//////////////////////////////////////////////////////////////////
8987
const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8988
switch (cause) {
8989
case _no_expansion:
8990
return "No expansion";
8991
case _satisfy_free_ratio:
8992
return "Free ratio";
8993
case _satisfy_promotion:
8994
return "Satisfy promotion";
8995
case _satisfy_allocation:
8996
return "allocation";
8997
case _allocate_par_lab:
8998
return "Par LAB";
8999
case _allocate_par_spooling_space:
9000
return "Par Spooling Space";
9001
case _adaptive_size_policy:
9002
return "Ergonomics";
9003
default:
9004
return "unknown";
9005
}
9006
}
9007
9008
void CMSDrainMarkingStackClosure::do_void() {
9009
// the max number to take from overflow list at a time
9010
const size_t num = _mark_stack->capacity()/4;
9011
assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
9012
"Overflow list should be NULL during concurrent phases");
9013
while (!_mark_stack->isEmpty() ||
9014
// if stack is empty, check the overflow list
9015
_collector->take_from_overflow_list(num, _mark_stack)) {
9016
oop obj = _mark_stack->pop();
9017
HeapWord* addr = (HeapWord*)obj;
9018
assert(_span.contains(addr), "Should be within span");
9019
assert(_bit_map->isMarked(addr), "Should be marked");
9020
assert(obj->is_oop(), "Should be an oop");
9021
obj->oop_iterate(_keep_alive);
9022
}
9023
}
9024
9025
void CMSParDrainMarkingStackClosure::do_void() {
9026
// drain queue
9027
trim_queue(0);
9028
}
9029
9030
// Trim our work_queue so its length is below max at return
9031
void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
9032
while (_work_queue->size() > max) {
9033
oop new_oop;
9034
if (_work_queue->pop_local(new_oop)) {
9035
assert(new_oop->is_oop(), "Expected an oop");
9036
assert(_bit_map->isMarked((HeapWord*)new_oop),
9037
"no white objects on this stack!");
9038
assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9039
// iterate over the oops in this oop, marking and pushing
9040
// the ones in CMS heap (i.e. in _span).
9041
new_oop->oop_iterate(&_mark_and_push);
9042
}
9043
}
9044
}
9045
9046
////////////////////////////////////////////////////////////////////
9047
// Support for Marking Stack Overflow list handling and related code
9048
////////////////////////////////////////////////////////////////////
9049
// Much of the following code is similar in shape and spirit to the
9050
// code used in ParNewGC. We should try and share that code
9051
// as much as possible in the future.
9052
9053
#ifndef PRODUCT
9054
// Debugging support for CMSStackOverflowALot
9055
9056
// It's OK to call this multi-threaded; the worst thing
9057
// that can happen is that we'll get a bunch of closely
9058
// spaced simulated oveflows, but that's OK, in fact
9059
// probably good as it would exercise the overflow code
9060
// under contention.
9061
bool CMSCollector::simulate_overflow() {
9062
if (_overflow_counter-- <= 0) { // just being defensive
9063
_overflow_counter = CMSMarkStackOverflowInterval;
9064
return true;
9065
} else {
9066
return false;
9067
}
9068
}
9069
9070
bool CMSCollector::par_simulate_overflow() {
9071
return simulate_overflow();
9072
}
9073
#endif
9074
9075
// Single-threaded
9076
bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9077
assert(stack->isEmpty(), "Expected precondition");
9078
assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9079
size_t i = num;
9080
oop cur = _overflow_list;
9081
const markOop proto = markOopDesc::prototype();
9082
NOT_PRODUCT(ssize_t n = 0;)
9083
for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9084
next = oop(cur->mark());
9085
cur->set_mark(proto); // until proven otherwise
9086
assert(cur->is_oop(), "Should be an oop");
9087
bool res = stack->push(cur);
9088
assert(res, "Bit off more than can chew?");
9089
NOT_PRODUCT(n++;)
9090
}
9091
_overflow_list = cur;
9092
#ifndef PRODUCT
9093
assert(_num_par_pushes >= n, "Too many pops?");
9094
_num_par_pushes -=n;
9095
#endif
9096
return !stack->isEmpty();
9097
}
9098
9099
#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
9100
// (MT-safe) Get a prefix of at most "num" from the list.
9101
// The overflow list is chained through the mark word of
9102
// each object in the list. We fetch the entire list,
9103
// break off a prefix of the right size and return the
9104
// remainder. If other threads try to take objects from
9105
// the overflow list at that time, they will wait for
9106
// some time to see if data becomes available. If (and
9107
// only if) another thread places one or more object(s)
9108
// on the global list before we have returned the suffix
9109
// to the global list, we will walk down our local list
9110
// to find its end and append the global list to
9111
// our suffix before returning it. This suffix walk can
9112
// prove to be expensive (quadratic in the amount of traffic)
9113
// when there are many objects in the overflow list and
9114
// there is much producer-consumer contention on the list.
9115
// *NOTE*: The overflow list manipulation code here and
9116
// in ParNewGeneration:: are very similar in shape,
9117
// except that in the ParNew case we use the old (from/eden)
9118
// copy of the object to thread the list via its klass word.
9119
// Because of the common code, if you make any changes in
9120
// the code below, please check the ParNew version to see if
9121
// similar changes might be needed.
9122
// CR 6797058 has been filed to consolidate the common code.
9123
bool CMSCollector::par_take_from_overflow_list(size_t num,
9124
OopTaskQueue* work_q,
9125
int no_of_gc_threads) {
9126
assert(work_q->size() == 0, "First empty local work queue");
9127
assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9128
if (_overflow_list == NULL) {
9129
return false;
9130
}
9131
// Grab the entire list; we'll put back a suffix
9132
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9133
Thread* tid = Thread::current();
9134
// Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9135
// set to ParallelGCThreads.
9136
size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9137
size_t sleep_time_millis = MAX2((size_t)1, num/100);
9138
// If the list is busy, we spin for a short while,
9139
// sleeping between attempts to get the list.
9140
for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9141
os::sleep(tid, sleep_time_millis, false);
9142
if (_overflow_list == NULL) {
9143
// Nothing left to take
9144
return false;
9145
} else if (_overflow_list != BUSY) {
9146
// Try and grab the prefix
9147
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9148
}
9149
}
9150
// If the list was found to be empty, or we spun long
9151
// enough, we give up and return empty-handed. If we leave
9152
// the list in the BUSY state below, it must be the case that
9153
// some other thread holds the overflow list and will set it
9154
// to a non-BUSY state in the future.
9155
if (prefix == NULL || prefix == BUSY) {
9156
// Nothing to take or waited long enough
9157
if (prefix == NULL) {
9158
// Write back the NULL in case we overwrote it with BUSY above
9159
// and it is still the same value.
9160
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9161
}
9162
return false;
9163
}
9164
assert(prefix != NULL && prefix != BUSY, "Error");
9165
size_t i = num;
9166
oop cur = prefix;
9167
// Walk down the first "num" objects, unless we reach the end.
9168
for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9169
if (cur->mark() == NULL) {
9170
// We have "num" or fewer elements in the list, so there
9171
// is nothing to return to the global list.
9172
// Write back the NULL in lieu of the BUSY we wrote
9173
// above, if it is still the same value.
9174
if (_overflow_list == BUSY) {
9175
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9176
}
9177
} else {
9178
// Chop off the suffix and rerturn it to the global list.
9179
assert(cur->mark() != BUSY, "Error");
9180
oop suffix_head = cur->mark(); // suffix will be put back on global list
9181
cur->set_mark(NULL); // break off suffix
9182
// It's possible that the list is still in the empty(busy) state
9183
// we left it in a short while ago; in that case we may be
9184
// able to place back the suffix without incurring the cost
9185
// of a walk down the list.
9186
oop observed_overflow_list = _overflow_list;
9187
oop cur_overflow_list = observed_overflow_list;
9188
bool attached = false;
9189
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9190
observed_overflow_list =
9191
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9192
if (cur_overflow_list == observed_overflow_list) {
9193
attached = true;
9194
break;
9195
} else cur_overflow_list = observed_overflow_list;
9196
}
9197
if (!attached) {
9198
// Too bad, someone else sneaked in (at least) an element; we'll need
9199
// to do a splice. Find tail of suffix so we can prepend suffix to global
9200
// list.
9201
for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9202
oop suffix_tail = cur;
9203
assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9204
"Tautology");
9205
observed_overflow_list = _overflow_list;
9206
do {
9207
cur_overflow_list = observed_overflow_list;
9208
if (cur_overflow_list != BUSY) {
9209
// Do the splice ...
9210
suffix_tail->set_mark(markOop(cur_overflow_list));
9211
} else { // cur_overflow_list == BUSY
9212
suffix_tail->set_mark(NULL);
9213
}
9214
// ... and try to place spliced list back on overflow_list ...
9215
observed_overflow_list =
9216
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9217
} while (cur_overflow_list != observed_overflow_list);
9218
// ... until we have succeeded in doing so.
9219
}
9220
}
9221
9222
// Push the prefix elements on work_q
9223
assert(prefix != NULL, "control point invariant");
9224
const markOop proto = markOopDesc::prototype();
9225
oop next;
9226
NOT_PRODUCT(ssize_t n = 0;)
9227
for (cur = prefix; cur != NULL; cur = next) {
9228
next = oop(cur->mark());
9229
cur->set_mark(proto); // until proven otherwise
9230
assert(cur->is_oop(), "Should be an oop");
9231
bool res = work_q->push(cur);
9232
assert(res, "Bit off more than we can chew?");
9233
NOT_PRODUCT(n++;)
9234
}
9235
#ifndef PRODUCT
9236
assert(_num_par_pushes >= n, "Too many pops?");
9237
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9238
#endif
9239
return true;
9240
}
9241
9242
// Single-threaded
9243
void CMSCollector::push_on_overflow_list(oop p) {
9244
NOT_PRODUCT(_num_par_pushes++;)
9245
assert(p->is_oop(), "Not an oop");
9246
preserve_mark_if_necessary(p);
9247
p->set_mark((markOop)_overflow_list);
9248
_overflow_list = p;
9249
}
9250
9251
// Multi-threaded; use CAS to prepend to overflow list
9252
void CMSCollector::par_push_on_overflow_list(oop p) {
9253
NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9254
assert(p->is_oop(), "Not an oop");
9255
par_preserve_mark_if_necessary(p);
9256
oop observed_overflow_list = _overflow_list;
9257
oop cur_overflow_list;
9258
do {
9259
cur_overflow_list = observed_overflow_list;
9260
if (cur_overflow_list != BUSY) {
9261
p->set_mark(markOop(cur_overflow_list));
9262
} else {
9263
p->set_mark(NULL);
9264
}
9265
observed_overflow_list =
9266
(oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9267
} while (cur_overflow_list != observed_overflow_list);
9268
}
9269
#undef BUSY
9270
9271
// Single threaded
9272
// General Note on GrowableArray: pushes may silently fail
9273
// because we are (temporarily) out of C-heap for expanding
9274
// the stack. The problem is quite ubiquitous and affects
9275
// a lot of code in the JVM. The prudent thing for GrowableArray
9276
// to do (for now) is to exit with an error. However, that may
9277
// be too draconian in some cases because the caller may be
9278
// able to recover without much harm. For such cases, we
9279
// should probably introduce a "soft_push" method which returns
9280
// an indication of success or failure with the assumption that
9281
// the caller may be able to recover from a failure; code in
9282
// the VM can then be changed, incrementally, to deal with such
9283
// failures where possible, thus, incrementally hardening the VM
9284
// in such low resource situations.
9285
void CMSCollector::preserve_mark_work(oop p, markOop m) {
9286
_preserved_oop_stack.push(p);
9287
_preserved_mark_stack.push(m);
9288
assert(m == p->mark(), "Mark word changed");
9289
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9290
"bijection");
9291
}
9292
9293
// Single threaded
9294
void CMSCollector::preserve_mark_if_necessary(oop p) {
9295
markOop m = p->mark();
9296
if (m->must_be_preserved(p)) {
9297
preserve_mark_work(p, m);
9298
}
9299
}
9300
9301
void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9302
markOop m = p->mark();
9303
if (m->must_be_preserved(p)) {
9304
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9305
// Even though we read the mark word without holding
9306
// the lock, we are assured that it will not change
9307
// because we "own" this oop, so no other thread can
9308
// be trying to push it on the overflow list; see
9309
// the assertion in preserve_mark_work() that checks
9310
// that m == p->mark().
9311
preserve_mark_work(p, m);
9312
}
9313
}
9314
9315
// We should be able to do this multi-threaded,
9316
// a chunk of stack being a task (this is
9317
// correct because each oop only ever appears
9318
// once in the overflow list. However, it's
9319
// not very easy to completely overlap this with
9320
// other operations, so will generally not be done
9321
// until all work's been completed. Because we
9322
// expect the preserved oop stack (set) to be small,
9323
// it's probably fine to do this single-threaded.
9324
// We can explore cleverer concurrent/overlapped/parallel
9325
// processing of preserved marks if we feel the
9326
// need for this in the future. Stack overflow should
9327
// be so rare in practice and, when it happens, its
9328
// effect on performance so great that this will
9329
// likely just be in the noise anyway.
9330
void CMSCollector::restore_preserved_marks_if_any() {
9331
assert(SafepointSynchronize::is_at_safepoint(),
9332
"world should be stopped");
9333
assert(Thread::current()->is_ConcurrentGC_thread() ||
9334
Thread::current()->is_VM_thread(),
9335
"should be single-threaded");
9336
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9337
"bijection");
9338
9339
while (!_preserved_oop_stack.is_empty()) {
9340
oop p = _preserved_oop_stack.pop();
9341
assert(p->is_oop(), "Should be an oop");
9342
assert(_span.contains(p), "oop should be in _span");
9343
assert(p->mark() == markOopDesc::prototype(),
9344
"Set when taken from overflow list");
9345
markOop m = _preserved_mark_stack.pop();
9346
p->set_mark(m);
9347
}
9348
assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9349
"stacks were cleared above");
9350
}
9351
9352
#ifndef PRODUCT
9353
bool CMSCollector::no_preserved_marks() const {
9354
return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9355
}
9356
#endif
9357
9358
CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9359
{
9360
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9361
CMSAdaptiveSizePolicy* size_policy =
9362
(CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9363
assert(size_policy->is_gc_cms_adaptive_size_policy(),
9364
"Wrong type for size policy");
9365
return size_policy;
9366
}
9367
9368
void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9369
size_t desired_promo_size) {
9370
if (cur_promo_size < desired_promo_size) {
9371
size_t expand_bytes = desired_promo_size - cur_promo_size;
9372
if (PrintAdaptiveSizePolicy && Verbose) {
9373
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9374
"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9375
expand_bytes);
9376
}
9377
expand(expand_bytes,
9378
MinHeapDeltaBytes,
9379
CMSExpansionCause::_adaptive_size_policy);
9380
} else if (desired_promo_size < cur_promo_size) {
9381
size_t shrink_bytes = cur_promo_size - desired_promo_size;
9382
if (PrintAdaptiveSizePolicy && Verbose) {
9383
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9384
"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9385
shrink_bytes);
9386
}
9387
shrink(shrink_bytes);
9388
}
9389
}
9390
9391
CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9392
GenCollectedHeap* gch = GenCollectedHeap::heap();
9393
CMSGCAdaptivePolicyCounters* counters =
9394
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9395
assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9396
"Wrong kind of counters");
9397
return counters;
9398
}
9399
9400
9401
void ASConcurrentMarkSweepGeneration::update_counters() {
9402
if (UsePerfData) {
9403
_space_counters->update_all();
9404
_gen_counters->update_all();
9405
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9406
GenCollectedHeap* gch = GenCollectedHeap::heap();
9407
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9408
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9409
"Wrong gc statistics type");
9410
counters->update_counters(gc_stats_l);
9411
}
9412
}
9413
9414
void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9415
if (UsePerfData) {
9416
_space_counters->update_used(used);
9417
_space_counters->update_capacity();
9418
_gen_counters->update_all();
9419
9420
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9421
GenCollectedHeap* gch = GenCollectedHeap::heap();
9422
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9423
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9424
"Wrong gc statistics type");
9425
counters->update_counters(gc_stats_l);
9426
}
9427
}
9428
9429
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9430
assert_locked_or_safepoint(Heap_lock);
9431
assert_lock_strong(freelistLock());
9432
HeapWord* old_end = _cmsSpace->end();
9433
HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9434
assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9435
FreeChunk* chunk_at_end = find_chunk_at_end();
9436
if (chunk_at_end == NULL) {
9437
// No room to shrink
9438
if (PrintGCDetails && Verbose) {
9439
gclog_or_tty->print_cr("No room to shrink: old_end "
9440
PTR_FORMAT " unallocated_start " PTR_FORMAT
9441
" chunk_at_end " PTR_FORMAT,
9442
old_end, unallocated_start, chunk_at_end);
9443
}
9444
return;
9445
} else {
9446
9447
// Find the chunk at the end of the space and determine
9448
// how much it can be shrunk.
9449
size_t shrinkable_size_in_bytes = chunk_at_end->size();
9450
size_t aligned_shrinkable_size_in_bytes =
9451
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9452
assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9453
"Inconsistent chunk at end of space");
9454
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9455
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9456
9457
// Shrink the underlying space
9458
_virtual_space.shrink_by(bytes);
9459
if (PrintGCDetails && Verbose) {
9460
gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9461
" desired_bytes " SIZE_FORMAT
9462
" shrinkable_size_in_bytes " SIZE_FORMAT
9463
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9464
" bytes " SIZE_FORMAT,
9465
desired_bytes, shrinkable_size_in_bytes,
9466
aligned_shrinkable_size_in_bytes, bytes);
9467
gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9468
" unallocated_start " SIZE_FORMAT,
9469
old_end, unallocated_start);
9470
}
9471
9472
// If the space did shrink (shrinking is not guaranteed),
9473
// shrink the chunk at the end by the appropriate amount.
9474
if (((HeapWord*)_virtual_space.high()) < old_end) {
9475
size_t new_word_size =
9476
heap_word_size(_virtual_space.committed_size());
9477
9478
// Have to remove the chunk from the dictionary because it is changing
9479
// size and might be someplace elsewhere in the dictionary.
9480
9481
// Get the chunk at end, shrink it, and put it
9482
// back.
9483
_cmsSpace->removeChunkFromDictionary(chunk_at_end);
9484
size_t word_size_change = word_size_before - new_word_size;
9485
size_t chunk_at_end_old_size = chunk_at_end->size();
9486
assert(chunk_at_end_old_size >= word_size_change,
9487
"Shrink is too large");
9488
chunk_at_end->set_size(chunk_at_end_old_size -
9489
word_size_change);
9490
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9491
word_size_change);
9492
9493
_cmsSpace->returnChunkToDictionary(chunk_at_end);
9494
9495
MemRegion mr(_cmsSpace->bottom(), new_word_size);
9496
_bts->resize(new_word_size); // resize the block offset shared array
9497
Universe::heap()->barrier_set()->resize_covered_region(mr);
9498
_cmsSpace->assert_locked();
9499
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
9500
9501
NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9502
9503
// update the space and generation capacity counters
9504
if (UsePerfData) {
9505
_space_counters->update_capacity();
9506
_gen_counters->update_all();
9507
}
9508
9509
if (Verbose && PrintGCDetails) {
9510
size_t new_mem_size = _virtual_space.committed_size();
9511
size_t old_mem_size = new_mem_size + bytes;
9512
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9513
name(), old_mem_size/K, bytes/K, new_mem_size/K);
9514
}
9515
}
9516
9517
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9518
"Inconsistency at end of space");
9519
assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9520
"Shrinking is inconsistent");
9521
return;
9522
}
9523
}
9524
// Transfer some number of overflown objects to usual marking
9525
// stack. Return true if some objects were transferred.
9526
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9527
size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9528
(size_t)ParGCDesiredObjsFromOverflowList);
9529
9530
bool res = _collector->take_from_overflow_list(num, _mark_stack);
9531
assert(_collector->overflow_list_is_empty() || res,
9532
"If list is not empty, we should have taken something");
9533
assert(!res || !_mark_stack->isEmpty(),
9534
"If we took something, it should now be on our stack");
9535
return res;
9536
}
9537
9538
size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9539
size_t res = _sp->block_size_no_stall(addr, _collector);
9540
if (_sp->block_is_obj(addr)) {
9541
if (_live_bit_map->isMarked(addr)) {
9542
// It can't have been dead in a previous cycle
9543
guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9544
} else {
9545
_dead_bit_map->mark(addr); // mark the dead object
9546
}
9547
}
9548
// Could be 0, if the block size could not be computed without stalling.
9549
return res;
9550
}
9551
9552
TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9553
9554
switch (phase) {
9555
case CMSCollector::InitialMarking:
9556
initialize(true /* fullGC */ ,
9557
cause /* cause of the GC */,
9558
true /* allMemoryPoolsAffected */,
9559
true /* recordGCBeginTime */,
9560
true /* recordPreGCUsage */,
9561
false /* recordPeakUsage */,
9562
false /* recordPostGCusage */,
9563
true /* recordAccumulatedGCTime */,
9564
false /* recordGCEndTime */,
9565
false /* countCollection */ );
9566
break;
9567
9568
case CMSCollector::FinalMarking:
9569
initialize(true /* fullGC */ ,
9570
cause /* cause of the GC */,
9571
true /* allMemoryPoolsAffected */,
9572
false /* recordGCBeginTime */,
9573
false /* recordPreGCUsage */,
9574
false /* recordPeakUsage */,
9575
false /* recordPostGCusage */,
9576
true /* recordAccumulatedGCTime */,
9577
false /* recordGCEndTime */,
9578
false /* countCollection */ );
9579
break;
9580
9581
case CMSCollector::Sweeping:
9582
initialize(true /* fullGC */ ,
9583
cause /* cause of the GC */,
9584
true /* allMemoryPoolsAffected */,
9585
false /* recordGCBeginTime */,
9586
false /* recordPreGCUsage */,
9587
true /* recordPeakUsage */,
9588
true /* recordPostGCusage */,
9589
false /* recordAccumulatedGCTime */,
9590
true /* recordGCEndTime */,
9591
true /* countCollection */ );
9592
break;
9593
9594
default:
9595
ShouldNotReachHere();
9596
}
9597
}
9598
9599