Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/generation.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_MEMORY_GENERATION_HPP
26
#define SHARE_VM_MEMORY_GENERATION_HPP
27
28
#include "gc_implementation/shared/collectorCounters.hpp"
29
#include "memory/allocation.hpp"
30
#include "memory/memRegion.hpp"
31
#include "memory/referenceProcessor.hpp"
32
#include "memory/universe.hpp"
33
#include "memory/watermark.hpp"
34
#include "runtime/mutex.hpp"
35
#include "runtime/perfData.hpp"
36
#include "runtime/virtualspace.hpp"
37
38
// A Generation models a heap area for similarly-aged objects.
39
// It will contain one ore more spaces holding the actual objects.
40
//
41
// The Generation class hierarchy:
42
//
43
// Generation - abstract base class
44
// - DefNewGeneration - allocation area (copy collected)
45
// - ParNewGeneration - a DefNewGeneration that is collected by
46
// several threads
47
// - CardGeneration - abstract class adding offset array behavior
48
// - OneContigSpaceCardGeneration - abstract class holding a single
49
// contiguous space with card marking
50
// - TenuredGeneration - tenured (old object) space (markSweepCompact)
51
// - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
52
// (Detlefs-Printezis refinement of
53
// Boehm-Demers-Schenker)
54
//
55
// The system configurations currently allowed are:
56
//
57
// DefNewGeneration + TenuredGeneration
58
// DefNewGeneration + ConcurrentMarkSweepGeneration
59
//
60
// ParNewGeneration + TenuredGeneration
61
// ParNewGeneration + ConcurrentMarkSweepGeneration
62
//
63
64
class DefNewGeneration;
65
class GenerationSpec;
66
class CompactibleSpace;
67
class ContiguousSpace;
68
class CompactPoint;
69
class OopsInGenClosure;
70
class OopClosure;
71
class ScanClosure;
72
class FastScanClosure;
73
class GenCollectedHeap;
74
class GenRemSet;
75
class GCStats;
76
77
// A "ScratchBlock" represents a block of memory in one generation usable by
78
// another. It represents "num_words" free words, starting at and including
79
// the address of "this".
80
struct ScratchBlock {
81
ScratchBlock* next;
82
size_t num_words;
83
HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
84
// first two fields are word-sized.)
85
};
86
87
88
class Generation: public CHeapObj<mtGC> {
89
friend class VMStructs;
90
private:
91
jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
92
MemRegion _prev_used_region; // for collectors that want to "remember" a value for
93
// used region at some specific point during collection.
94
95
protected:
96
// Minimum and maximum addresses for memory reserved (not necessarily
97
// committed) for generation.
98
// Used by card marking code. Must not overlap with address ranges of
99
// other generations.
100
MemRegion _reserved;
101
102
// Memory area reserved for generation
103
VirtualSpace _virtual_space;
104
105
// Level in the generation hierarchy.
106
int _level;
107
108
// ("Weak") Reference processing support
109
ReferenceProcessor* _ref_processor;
110
111
// Performance Counters
112
CollectorCounters* _gc_counters;
113
114
// Statistics for garbage collection
115
GCStats* _gc_stats;
116
117
// Returns the next generation in the configuration, or else NULL if this
118
// is the highest generation.
119
Generation* next_gen() const;
120
121
// Initialize the generation.
122
Generation(ReservedSpace rs, size_t initial_byte_size, int level);
123
124
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
125
// "sp" that point into younger generations.
126
// The iteration is only over objects allocated at the start of the
127
// iterations; objects allocated as a result of applying the closure are
128
// not included.
129
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
130
131
public:
132
// The set of possible generation kinds.
133
enum Name {
134
ASParNew,
135
ASConcurrentMarkSweep,
136
DefNew,
137
ParNew,
138
MarkSweepCompact,
139
ConcurrentMarkSweep,
140
Other
141
};
142
143
enum SomePublicConstants {
144
// Generations are GenGrain-aligned and have size that are multiples of
145
// GenGrain.
146
// Note: on ARM we add 1 bit for card_table_base to be properly aligned
147
// (we expect its low byte to be zero - see implementation of post_barrier)
148
LogOfGenGrain = 16 ARM32_ONLY(+1),
149
GenGrain = 1 << LogOfGenGrain
150
};
151
152
// allocate and initialize ("weak") refs processing support
153
virtual void ref_processor_init();
154
void set_ref_processor(ReferenceProcessor* rp) {
155
assert(_ref_processor == NULL, "clobbering existing _ref_processor");
156
_ref_processor = rp;
157
}
158
159
virtual Generation::Name kind() { return Generation::Other; }
160
GenerationSpec* spec();
161
162
// This properly belongs in the collector, but for now this
163
// will do.
164
virtual bool refs_discovery_is_atomic() const { return true; }
165
virtual bool refs_discovery_is_mt() const { return false; }
166
167
// Space enquiries (results in bytes)
168
virtual size_t capacity() const = 0; // The maximum number of object bytes the
169
// generation can currently hold.
170
virtual size_t used() const = 0; // The number of used bytes in the gen.
171
virtual size_t used_stable() const; // The number of used bytes for memory monitoring tools.
172
virtual size_t free() const = 0; // The number of free bytes in the gen.
173
174
// Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
175
// Returns the total number of bytes available in a generation
176
// for the allocation of objects.
177
virtual size_t max_capacity() const;
178
179
// If this is a young generation, the maximum number of bytes that can be
180
// allocated in this generation before a GC is triggered.
181
virtual size_t capacity_before_gc() const { return 0; }
182
183
// The largest number of contiguous free bytes in the generation,
184
// including expansion (Assumes called at a safepoint.)
185
virtual size_t contiguous_available() const = 0;
186
// The largest number of contiguous free bytes in this or any higher generation.
187
virtual size_t max_contiguous_available() const;
188
189
// Returns true if promotions of the specified amount are
190
// likely to succeed without a promotion failure.
191
// Promotion of the full amount is not guaranteed but
192
// might be attempted in the worst case.
193
virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
194
195
// For a non-young generation, this interface can be used to inform a
196
// generation that a promotion attempt into that generation failed.
197
// Typically used to enable diagnostic output for post-mortem analysis,
198
// but other uses of the interface are not ruled out.
199
virtual void promotion_failure_occurred() { /* does nothing */ }
200
201
// Return an estimate of the maximum allocation that could be performed
202
// in the generation without triggering any collection or expansion
203
// activity. It is "unsafe" because no locks are taken; the result
204
// should be treated as an approximation, not a guarantee, for use in
205
// heuristic resizing decisions.
206
virtual size_t unsafe_max_alloc_nogc() const = 0;
207
208
// Returns true if this generation cannot be expanded further
209
// without a GC. Override as appropriate.
210
virtual bool is_maximal_no_gc() const {
211
return _virtual_space.uncommitted_size() == 0;
212
}
213
214
MemRegion reserved() const { return _reserved; }
215
216
// Returns a region guaranteed to contain all the objects in the
217
// generation.
218
virtual MemRegion used_region() const { return _reserved; }
219
220
MemRegion prev_used_region() const { return _prev_used_region; }
221
virtual void save_used_region() { _prev_used_region = used_region(); }
222
223
// Returns "TRUE" iff "p" points into the committed areas in the generation.
224
// For some kinds of generations, this may be an expensive operation.
225
// To avoid performance problems stemming from its inadvertent use in
226
// product jvm's, we restrict its use to assertion checking or
227
// verification only.
228
virtual bool is_in(const void* p) const;
229
230
/* Returns "TRUE" iff "p" points into the reserved area of the generation. */
231
bool is_in_reserved(const void* p) const {
232
return _reserved.contains(p);
233
}
234
235
// Check that the generation kind is DefNewGeneration or a sub
236
// class of DefNewGeneration and return a DefNewGeneration*
237
DefNewGeneration* as_DefNewGeneration();
238
239
// If some space in the generation contains the given "addr", return a
240
// pointer to that space, else return "NULL".
241
virtual Space* space_containing(const void* addr) const;
242
243
// Iteration - do not use for time critical operations
244
virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0;
245
246
// Returns the first space, if any, in the generation that can participate
247
// in compaction, or else "NULL".
248
virtual CompactibleSpace* first_compaction_space() const = 0;
249
250
// Returns "true" iff this generation should be used to allocate an
251
// object of the given size. Young generations might
252
// wish to exclude very large objects, for example, since, if allocated
253
// often, they would greatly increase the frequency of young-gen
254
// collection.
255
virtual bool should_allocate(size_t word_size, bool is_tlab) {
256
bool result = false;
257
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
258
if (!is_tlab || supports_tlab_allocation()) {
259
result = (word_size > 0) && (word_size < overflow_limit);
260
}
261
return result;
262
}
263
264
// Allocate and returns a block of the requested size, or returns "NULL".
265
// Assumes the caller has done any necessary locking.
266
virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
267
268
// Like "allocate", but performs any necessary locking internally.
269
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
270
271
// A 'younger' gen has reached an allocation limit, and uses this to notify
272
// the next older gen. The return value is a new limit, or NULL if none. The
273
// caller must do the necessary locking.
274
virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
275
size_t word_size) {
276
return NULL;
277
}
278
279
// Some generation may offer a region for shared, contiguous allocation,
280
// via inlined code (by exporting the address of the top and end fields
281
// defining the extent of the contiguous allocation region.)
282
283
// This function returns "true" iff the heap supports this kind of
284
// allocation. (More precisely, this means the style of allocation that
285
// increments *top_addr()" with a CAS.) (Default is "no".)
286
// A generation that supports this allocation style must use lock-free
287
// allocation for *all* allocation, since there are times when lock free
288
// allocation will be concurrent with plain "allocate" calls.
289
virtual bool supports_inline_contig_alloc() const { return false; }
290
291
// These functions return the addresses of the fields that define the
292
// boundaries of the contiguous allocation area. (These fields should be
293
// physicall near to one another.)
294
virtual HeapWord** top_addr() const { return NULL; }
295
virtual HeapWord** end_addr() const { return NULL; }
296
297
// Thread-local allocation buffers
298
virtual bool supports_tlab_allocation() const { return false; }
299
virtual size_t tlab_capacity() const {
300
guarantee(false, "Generation doesn't support thread local allocation buffers");
301
return 0;
302
}
303
virtual size_t tlab_used() const {
304
guarantee(false, "Generation doesn't support thread local allocation buffers");
305
return 0;
306
}
307
virtual size_t unsafe_max_tlab_alloc() const {
308
guarantee(false, "Generation doesn't support thread local allocation buffers");
309
return 0;
310
}
311
312
// "obj" is the address of an object in a younger generation. Allocate space
313
// for "obj" in the current (or some higher) generation, and copy "obj" into
314
// the newly allocated space, if possible, returning the result (or NULL if
315
// the allocation failed).
316
//
317
// The "obj_size" argument is just obj->size(), passed along so the caller can
318
// avoid repeating the virtual call to retrieve it.
319
virtual oop promote(oop obj, size_t obj_size);
320
321
// Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
322
// object "obj", whose original mark word was "m", and whose size is
323
// "word_sz". If possible, allocate space for "obj", copy obj into it
324
// (taking care to copy "m" into the mark word when done, since the mark
325
// word of "obj" may have been overwritten with a forwarding pointer, and
326
// also taking care to copy the klass pointer *last*. Returns the new
327
// object if successful, or else NULL.
328
virtual oop par_promote(int thread_num,
329
oop obj, markOop m, size_t word_sz);
330
331
// Undo, if possible, the most recent par_promote_alloc allocation by
332
// "thread_num" ("obj", of "word_sz").
333
virtual void par_promote_alloc_undo(int thread_num,
334
HeapWord* obj, size_t word_sz);
335
336
// Informs the current generation that all par_promote_alloc's in the
337
// collection have been completed; any supporting data structures can be
338
// reset. Default is to do nothing.
339
virtual void par_promote_alloc_done(int thread_num) {}
340
341
// Informs the current generation that all oop_since_save_marks_iterates
342
// performed by "thread_num" in the current collection, if any, have been
343
// completed; any supporting data structures can be reset. Default is to
344
// do nothing.
345
virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
346
347
// This generation will collect all younger generations
348
// during a full collection.
349
virtual bool full_collects_younger_generations() const { return false; }
350
351
// This generation does in-place marking, meaning that mark words
352
// are mutated during the marking phase and presumably reinitialized
353
// to a canonical value after the GC. This is currently used by the
354
// biased locking implementation to determine whether additional
355
// work is required during the GC prologue and epilogue.
356
virtual bool performs_in_place_marking() const { return true; }
357
358
// Returns "true" iff collect() should subsequently be called on this
359
// this generation. See comment below.
360
// This is a generic implementation which can be overridden.
361
//
362
// Note: in the current (1.4) implementation, when genCollectedHeap's
363
// incremental_collection_will_fail flag is set, all allocations are
364
// slow path (the only fast-path place to allocate is DefNew, which
365
// will be full if the flag is set).
366
// Thus, older generations which collect younger generations should
367
// test this flag and collect if it is set.
368
virtual bool should_collect(bool full,
369
size_t word_size,
370
bool is_tlab) {
371
return (full || should_allocate(word_size, is_tlab));
372
}
373
374
// Returns true if the collection is likely to be safely
375
// completed. Even if this method returns true, a collection
376
// may not be guaranteed to succeed, and the system should be
377
// able to safely unwind and recover from that failure, albeit
378
// at some additional cost.
379
virtual bool collection_attempt_is_safe() {
380
guarantee(false, "Are you sure you want to call this method?");
381
return true;
382
}
383
384
// Perform a garbage collection.
385
// If full is true attempt a full garbage collection of this generation.
386
// Otherwise, attempting to (at least) free enough space to support an
387
// allocation of the given "word_size".
388
virtual void collect(bool full,
389
bool clear_all_soft_refs,
390
size_t word_size,
391
bool is_tlab) = 0;
392
393
// Perform a heap collection, attempting to create (at least) enough
394
// space to support an allocation of the given "word_size". If
395
// successful, perform the allocation and return the resulting
396
// "oop" (initializing the allocated block). If the allocation is
397
// still unsuccessful, return "NULL".
398
virtual HeapWord* expand_and_allocate(size_t word_size,
399
bool is_tlab,
400
bool parallel = false) = 0;
401
402
// Some generations may require some cleanup or preparation actions before
403
// allowing a collection. The default is to do nothing.
404
virtual void gc_prologue(bool full) {};
405
406
// Some generations may require some cleanup actions after a collection.
407
// The default is to do nothing.
408
virtual void gc_epilogue(bool full) {};
409
410
// Save the high water marks for the used space in a generation.
411
virtual void record_spaces_top() {};
412
413
// Some generations may need to be "fixed-up" after some allocation
414
// activity to make them parsable again. The default is to do nothing.
415
virtual void ensure_parsability() {};
416
417
// Time (in ms) when we were last collected or now if a collection is
418
// in progress.
419
virtual jlong time_of_last_gc(jlong now) {
420
// Both _time_of_last_gc and now are set using a time source
421
// that guarantees monotonically non-decreasing values provided
422
// the underlying platform provides such a source. So we still
423
// have to guard against non-monotonicity.
424
NOT_PRODUCT(
425
if (now < _time_of_last_gc) {
426
warning("time warp: " INT64_FORMAT " to " INT64_FORMAT, (int64_t) _time_of_last_gc, (int64_t) now);
427
}
428
)
429
return _time_of_last_gc;
430
}
431
432
virtual void update_time_of_last_gc(jlong now) {
433
_time_of_last_gc = now;
434
}
435
436
// Generations may keep statistics about collection. This
437
// method updates those statistics. current_level is
438
// the level of the collection that has most recently
439
// occurred. This allows the generation to decide what
440
// statistics are valid to collect. For example, the
441
// generation can decide to gather the amount of promoted data
442
// if the collection of the younger generations has completed.
443
GCStats* gc_stats() const { return _gc_stats; }
444
virtual void update_gc_stats(int current_level, bool full) {}
445
446
// Mark sweep support phase2
447
virtual void prepare_for_compaction(CompactPoint* cp);
448
// Mark sweep support phase3
449
virtual void adjust_pointers();
450
// Mark sweep support phase4
451
virtual void compact();
452
virtual void post_compact() {ShouldNotReachHere();}
453
454
// Support for CMS's rescan. In this general form we return a pointer
455
// to an abstract object that can be used, based on specific previously
456
// decided protocols, to exchange information between generations,
457
// information that may be useful for speeding up certain types of
458
// garbage collectors. A NULL value indicates to the client that
459
// no data recording is expected by the provider. The data-recorder is
460
// expected to be GC worker thread-local, with the worker index
461
// indicated by "thr_num".
462
virtual void* get_data_recorder(int thr_num) { return NULL; }
463
virtual void sample_eden_chunk() {}
464
465
// Some generations may require some cleanup actions before allowing
466
// a verification.
467
virtual void prepare_for_verify() {};
468
469
// Accessing "marks".
470
471
// This function gives a generation a chance to note a point between
472
// collections. For example, a contiguous generation might note the
473
// beginning allocation point post-collection, which might allow some later
474
// operations to be optimized.
475
virtual void save_marks() {}
476
477
// This function allows generations to initialize any "saved marks". That
478
// is, should only be called when the generation is empty.
479
virtual void reset_saved_marks() {}
480
481
// This function is "true" iff any no allocations have occurred in the
482
// generation since the last call to "save_marks".
483
virtual bool no_allocs_since_save_marks() = 0;
484
485
// Apply "cl->apply" to (the addresses of) all reference fields in objects
486
// allocated in the current generation since the last call to "save_marks".
487
// If more objects are allocated in this generation as a result of applying
488
// the closure, iterates over reference fields in those objects as well.
489
// Calls "save_marks" at the end of the iteration.
490
// General signature...
491
virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
492
// ...and specializations for de-virtualization. (The general
493
// implemention of the _nv versions call the virtual version.
494
// Note that the _nv suffix is not really semantically necessary,
495
// but it avoids some not-so-useful warnings on Solaris.)
496
#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
497
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
498
oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
499
}
500
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
501
502
#undef Generation_SINCE_SAVE_MARKS_DECL
503
504
// The "requestor" generation is performing some garbage collection
505
// action for which it would be useful to have scratch space. If
506
// the target is not the requestor, no gc actions will be required
507
// of the target. The requestor promises to allocate no more than
508
// "max_alloc_words" in the target generation (via promotion say,
509
// if the requestor is a young generation and the target is older).
510
// If the target generation can provide any scratch space, it adds
511
// it to "list", leaving "list" pointing to the head of the
512
// augmented list. The default is to offer no space.
513
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
514
size_t max_alloc_words) {}
515
516
// Give each generation an opportunity to do clean up for any
517
// contributed scratch.
518
virtual void reset_scratch() {};
519
520
// When an older generation has been collected, and perhaps resized,
521
// this method will be invoked on all younger generations (from older to
522
// younger), allowing them to resize themselves as appropriate.
523
virtual void compute_new_size() = 0;
524
525
// Printing
526
virtual const char* name() const = 0;
527
virtual const char* short_name() const = 0;
528
529
int level() const { return _level; }
530
531
// Attributes
532
533
// True iff the given generation may only be the youngest generation.
534
virtual bool must_be_youngest() const = 0;
535
// True iff the given generation may only be the oldest generation.
536
virtual bool must_be_oldest() const = 0;
537
538
// Reference Processing accessor
539
ReferenceProcessor* const ref_processor() { return _ref_processor; }
540
541
// Iteration.
542
543
// Iterate over all the ref-containing fields of all objects in the
544
// generation, calling "cl.do_oop" on each.
545
virtual void oop_iterate(ExtendedOopClosure* cl);
546
547
// Iterate over all objects in the generation, calling "cl.do_object" on
548
// each.
549
virtual void object_iterate(ObjectClosure* cl);
550
551
// Iterate over all safe objects in the generation, calling "cl.do_object" on
552
// each. An object is safe if its references point to other objects in
553
// the heap. This defaults to object_iterate() unless overridden.
554
virtual void safe_object_iterate(ObjectClosure* cl);
555
556
// Apply "cl->do_oop" to (the address of) all and only all the ref fields
557
// in the current generation that contain pointers to objects in younger
558
// generations. Objects allocated since the last "save_marks" call are
559
// excluded.
560
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
561
562
// Inform a generation that it longer contains references to objects
563
// in any younger generation. [e.g. Because younger gens are empty,
564
// clear the card table.]
565
virtual void clear_remembered_set() { }
566
567
// Inform a generation that some of its objects have moved. [e.g. The
568
// generation's spaces were compacted, invalidating the card table.]
569
virtual void invalidate_remembered_set() { }
570
571
// Block abstraction.
572
573
// Returns the address of the start of the "block" that contains the
574
// address "addr". We say "blocks" instead of "object" since some heaps
575
// may not pack objects densely; a chunk may either be an object or a
576
// non-object.
577
virtual HeapWord* block_start(const void* addr) const;
578
579
// Requires "addr" to be the start of a chunk, and returns its size.
580
// "addr + size" is required to be the start of a new chunk, or the end
581
// of the active area of the heap.
582
virtual size_t block_size(const HeapWord* addr) const ;
583
584
// Requires "addr" to be the start of a block, and returns "TRUE" iff
585
// the block is an object.
586
virtual bool block_is_obj(const HeapWord* addr) const;
587
588
589
// PrintGC, PrintGCDetails support
590
void print_heap_change(size_t prev_used) const;
591
592
// PrintHeapAtGC support
593
virtual void print() const;
594
virtual void print_on(outputStream* st) const;
595
596
virtual void verify() = 0;
597
598
struct StatRecord {
599
int invocations;
600
elapsedTimer accumulated_time;
601
StatRecord() :
602
invocations(0),
603
accumulated_time(elapsedTimer()) {}
604
};
605
private:
606
StatRecord _stat_record;
607
public:
608
StatRecord* stat_record() { return &_stat_record; }
609
610
virtual void print_summary_info();
611
virtual void print_summary_info_on(outputStream* st);
612
613
// Performance Counter support
614
virtual void update_counters() = 0;
615
virtual CollectorCounters* counters() { return _gc_counters; }
616
};
617
618
// Class CardGeneration is a generation that is covered by a card table,
619
// and uses a card-size block-offset array to implement block_start.
620
621
// class BlockOffsetArray;
622
// class BlockOffsetArrayContigSpace;
623
class BlockOffsetSharedArray;
624
625
class CardGeneration: public Generation {
626
friend class VMStructs;
627
protected:
628
// This is shared with other generations.
629
GenRemSet* _rs;
630
// This is local to this generation.
631
BlockOffsetSharedArray* _bts;
632
633
// current shrinking effect: this damps shrinking when the heap gets empty.
634
size_t _shrink_factor;
635
636
size_t _min_heap_delta_bytes; // Minimum amount to expand.
637
638
// Some statistics from before gc started.
639
// These are gathered in the gc_prologue (and should_collect)
640
// to control growing/shrinking policy in spite of promotions.
641
size_t _capacity_at_prologue;
642
size_t _used_at_prologue;
643
644
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
645
GenRemSet* remset);
646
647
public:
648
649
// Attempt to expand the generation by "bytes". Expand by at a
650
// minimum "expand_bytes". Return true if some amount (not
651
// necessarily the full "bytes") was done.
652
virtual bool expand(size_t bytes, size_t expand_bytes);
653
654
// Shrink generation with specified size (returns false if unable to shrink)
655
virtual void shrink(size_t bytes) = 0;
656
657
virtual void compute_new_size();
658
659
virtual void clear_remembered_set();
660
661
virtual void invalidate_remembered_set();
662
663
virtual void prepare_for_verify();
664
665
// Grow generation with specified size (returns false if unable to grow)
666
virtual bool grow_by(size_t bytes) = 0;
667
// Grow generation to reserved size.
668
virtual bool grow_to_reserved() = 0;
669
};
670
671
// OneContigSpaceCardGeneration models a heap of old objects contained in a single
672
// contiguous space.
673
//
674
// Garbage collection is performed using mark-compact.
675
676
class OneContigSpaceCardGeneration: public CardGeneration {
677
friend class VMStructs;
678
// Abstractly, this is a subtype that gets access to protected fields.
679
friend class VM_PopulateDumpSharedSpace;
680
681
protected:
682
ContiguousSpace* _the_space; // actual space holding objects
683
WaterMark _last_gc; // watermark between objects allocated before
684
// and after last GC.
685
686
// Grow generation with specified size (returns false if unable to grow)
687
virtual bool grow_by(size_t bytes);
688
// Grow generation to reserved size.
689
virtual bool grow_to_reserved();
690
// Shrink generation with specified size (returns false if unable to shrink)
691
void shrink_by(size_t bytes);
692
693
// Allocation failure
694
virtual bool expand(size_t bytes, size_t expand_bytes);
695
void shrink(size_t bytes);
696
697
// Accessing spaces
698
ContiguousSpace* the_space() const { return _the_space; }
699
700
public:
701
OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
702
int level, GenRemSet* remset,
703
ContiguousSpace* space) :
704
CardGeneration(rs, initial_byte_size, level, remset),
705
_the_space(space)
706
{}
707
708
inline bool is_in(const void* p) const;
709
710
// Space enquiries
711
size_t capacity() const;
712
size_t used() const;
713
size_t free() const;
714
715
MemRegion used_region() const;
716
717
size_t unsafe_max_alloc_nogc() const;
718
size_t contiguous_available() const;
719
720
// Iteration
721
void object_iterate(ObjectClosure* blk);
722
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
723
724
void younger_refs_iterate(OopsInGenClosure* blk);
725
726
inline CompactibleSpace* first_compaction_space() const;
727
728
virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
729
virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
730
731
// Accessing marks
732
inline WaterMark top_mark();
733
inline WaterMark bottom_mark();
734
735
#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
736
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
737
OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
738
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
739
740
void save_marks();
741
void reset_saved_marks();
742
bool no_allocs_since_save_marks();
743
744
inline size_t block_size(const HeapWord* addr) const;
745
746
inline bool block_is_obj(const HeapWord* addr) const;
747
748
virtual void collect(bool full,
749
bool clear_all_soft_refs,
750
size_t size,
751
bool is_tlab);
752
HeapWord* expand_and_allocate(size_t size,
753
bool is_tlab,
754
bool parallel = false);
755
756
virtual void prepare_for_verify();
757
758
virtual void gc_epilogue(bool full);
759
760
virtual void record_spaces_top();
761
762
virtual void verify();
763
virtual void print_on(outputStream* st) const;
764
};
765
766
#endif // SHARE_VM_MEMORY_GENERATION_HPP
767
768