Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/defNewGeneration.hpp
32285 views
1
/*
2
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
26
#define SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
27
28
#include "gc_implementation/shared/ageTable.hpp"
29
#include "gc_implementation/shared/cSpaceCounters.hpp"
30
#include "gc_implementation/shared/generationCounters.hpp"
31
#include "gc_implementation/shared/copyFailedInfo.hpp"
32
#include "memory/generation.inline.hpp"
33
#include "utilities/stack.hpp"
34
35
class EdenSpace;
36
class ContiguousSpace;
37
class ScanClosure;
38
class STWGCTimer;
39
40
// DefNewGeneration is a young generation containing eden, from- and
41
// to-space.
42
43
class DefNewGeneration: public Generation {
44
friend class VMStructs;
45
46
protected:
47
Generation* _next_gen;
48
uint _tenuring_threshold; // Tenuring threshold for next collection.
49
ageTable _age_table;
50
// Size of object to pretenure in words; command line provides bytes
51
size_t _pretenure_size_threshold_words;
52
53
ageTable* age_table() { return &_age_table; }
54
55
// Initialize state to optimistically assume no promotion failure will
56
// happen.
57
void init_assuming_no_promotion_failure();
58
// True iff a promotion has failed in the current collection.
59
bool _promotion_failed;
60
bool promotion_failed() { return _promotion_failed; }
61
PromotionFailedInfo _promotion_failed_info;
62
63
// Handling promotion failure. A young generation collection
64
// can fail if a live object cannot be copied out of its
65
// location in eden or from-space during the collection. If
66
// a collection fails, the young generation is left in a
67
// consistent state such that it can be collected by a
68
// full collection.
69
// Before the collection
70
// Objects are in eden or from-space
71
// All roots into the young generation point into eden or from-space.
72
//
73
// After a failed collection
74
// Objects may be in eden, from-space, or to-space
75
// An object A in eden or from-space may have a copy B
76
// in to-space. If B exists, all roots that once pointed
77
// to A must now point to B.
78
// All objects in the young generation are unmarked.
79
// Eden, from-space, and to-space will all be collected by
80
// the full collection.
81
void handle_promotion_failure(oop);
82
83
// In the absence of promotion failure, we wouldn't look at "from-space"
84
// objects after a young-gen collection. When promotion fails, however,
85
// the subsequent full collection will look at from-space objects:
86
// therefore we must remove their forwarding pointers.
87
void remove_forwarding_pointers();
88
89
// Preserve the mark of "obj", if necessary, in preparation for its mark
90
// word being overwritten with a self-forwarding-pointer.
91
void preserve_mark_if_necessary(oop obj, markOop m);
92
void preserve_mark(oop obj, markOop m); // work routine used by the above
93
94
// Together, these keep <object with a preserved mark, mark value> pairs.
95
// They should always contain the same number of elements.
96
Stack<oop, mtGC> _objs_with_preserved_marks;
97
Stack<markOop, mtGC> _preserved_marks_of_objs;
98
99
// Promotion failure handling
100
ExtendedOopClosure *_promo_failure_scan_stack_closure;
101
void set_promo_failure_scan_stack_closure(ExtendedOopClosure *scan_stack_closure) {
102
_promo_failure_scan_stack_closure = scan_stack_closure;
103
}
104
105
Stack<oop, mtGC> _promo_failure_scan_stack;
106
void drain_promo_failure_scan_stack(void);
107
bool _promo_failure_drain_in_progress;
108
109
// Performance Counters
110
GenerationCounters* _gen_counters;
111
CSpaceCounters* _eden_counters;
112
CSpaceCounters* _from_counters;
113
CSpaceCounters* _to_counters;
114
115
// sizing information
116
size_t _max_eden_size;
117
size_t _max_survivor_size;
118
119
// Allocation support
120
bool _should_allocate_from_space;
121
bool should_allocate_from_space() const {
122
return _should_allocate_from_space;
123
}
124
void clear_should_allocate_from_space() {
125
_should_allocate_from_space = false;
126
}
127
void set_should_allocate_from_space() {
128
_should_allocate_from_space = true;
129
}
130
131
// Tenuring
132
void adjust_desired_tenuring_threshold(GCTracer &tracer);
133
134
// Spaces
135
EdenSpace* _eden_space;
136
ContiguousSpace* _from_space;
137
ContiguousSpace* _to_space;
138
139
STWGCTimer* _gc_timer;
140
141
enum SomeProtectedConstants {
142
// Generations are GenGrain-aligned and have size that are multiples of
143
// GenGrain.
144
MinFreeScratchWords = 100
145
};
146
147
// Return the size of a survivor space if this generation were of size
148
// gen_size.
149
size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
150
size_t n = gen_size / (SurvivorRatio + 2);
151
return n > alignment ? align_size_down(n, alignment) : alignment;
152
}
153
154
public: // was "protected" but caused compile error on win32
155
class IsAliveClosure: public BoolObjectClosure {
156
Generation* _g;
157
public:
158
IsAliveClosure(Generation* g);
159
bool do_object_b(oop p);
160
};
161
162
class KeepAliveClosure: public OopClosure {
163
protected:
164
ScanWeakRefClosure* _cl;
165
CardTableRS* _rs;
166
template <class T> void do_oop_work(T* p);
167
public:
168
KeepAliveClosure(ScanWeakRefClosure* cl);
169
virtual void do_oop(oop* p);
170
virtual void do_oop(narrowOop* p);
171
};
172
173
class FastKeepAliveClosure: public KeepAliveClosure {
174
protected:
175
HeapWord* _boundary;
176
template <class T> void do_oop_work(T* p);
177
public:
178
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
179
virtual void do_oop(oop* p);
180
virtual void do_oop(narrowOop* p);
181
};
182
183
class EvacuateFollowersClosure: public VoidClosure {
184
GenCollectedHeap* _gch;
185
int _level;
186
ScanClosure* _scan_cur_or_nonheap;
187
ScanClosure* _scan_older;
188
public:
189
EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
190
ScanClosure* cur, ScanClosure* older);
191
void do_void();
192
};
193
194
class FastEvacuateFollowersClosure: public VoidClosure {
195
GenCollectedHeap* _gch;
196
int _level;
197
DefNewGeneration* _gen;
198
FastScanClosure* _scan_cur_or_nonheap;
199
FastScanClosure* _scan_older;
200
public:
201
FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
202
DefNewGeneration* gen,
203
FastScanClosure* cur,
204
FastScanClosure* older);
205
void do_void();
206
};
207
208
public:
209
DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
210
const char* policy="Copy");
211
212
virtual void ref_processor_init();
213
214
virtual Generation::Name kind() { return Generation::DefNew; }
215
216
// Accessing spaces
217
EdenSpace* eden() const { return _eden_space; }
218
ContiguousSpace* from() const { return _from_space; }
219
ContiguousSpace* to() const { return _to_space; }
220
221
virtual CompactibleSpace* first_compaction_space() const;
222
223
// Space enquiries
224
size_t capacity() const;
225
size_t used() const;
226
size_t free() const;
227
size_t max_capacity() const;
228
size_t capacity_before_gc() const;
229
size_t unsafe_max_alloc_nogc() const;
230
size_t contiguous_available() const;
231
232
size_t max_eden_size() const { return _max_eden_size; }
233
size_t max_survivor_size() const { return _max_survivor_size; }
234
235
bool supports_inline_contig_alloc() const { return true; }
236
HeapWord** top_addr() const;
237
HeapWord** end_addr() const;
238
239
// Thread-local allocation buffers
240
bool supports_tlab_allocation() const { return true; }
241
size_t tlab_capacity() const;
242
size_t tlab_used() const;
243
size_t unsafe_max_tlab_alloc() const;
244
245
// Grow the generation by the specified number of bytes.
246
// The size of bytes is assumed to be properly aligned.
247
// Return true if the expansion was successful.
248
bool expand(size_t bytes);
249
250
// DefNewGeneration cannot currently expand except at
251
// a GC.
252
virtual bool is_maximal_no_gc() const { return true; }
253
254
// Iteration
255
void object_iterate(ObjectClosure* blk);
256
257
void younger_refs_iterate(OopsInGenClosure* cl);
258
259
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
260
261
// Allocation support
262
virtual bool should_allocate(size_t word_size, bool is_tlab) {
263
assert(UseTLAB || !is_tlab, "Should not allocate tlab");
264
265
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
266
267
const bool non_zero = word_size > 0;
268
const bool overflows = word_size >= overflow_limit;
269
const bool check_too_big = _pretenure_size_threshold_words > 0;
270
const bool not_too_big = word_size < _pretenure_size_threshold_words;
271
const bool size_ok = is_tlab || !check_too_big || not_too_big;
272
273
bool result = !overflows &&
274
non_zero &&
275
size_ok;
276
277
return result;
278
}
279
280
HeapWord* allocate(size_t word_size, bool is_tlab);
281
HeapWord* allocate_from_space(size_t word_size);
282
283
HeapWord* par_allocate(size_t word_size, bool is_tlab);
284
285
// Prologue & Epilogue
286
virtual void gc_prologue(bool full);
287
virtual void gc_epilogue(bool full);
288
289
// Save the tops for eden, from, and to
290
virtual void record_spaces_top();
291
292
// Doesn't require additional work during GC prologue and epilogue
293
virtual bool performs_in_place_marking() const { return false; }
294
295
// Accessing marks
296
void save_marks();
297
void reset_saved_marks();
298
bool no_allocs_since_save_marks();
299
300
// Need to declare the full complement of closures, whether we'll
301
// override them or not, or get message from the compiler:
302
// oop_since_save_marks_iterate_nv hides virtual function...
303
#define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
304
void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
305
306
ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
307
308
#undef DefNew_SINCE_SAVE_MARKS_DECL
309
310
// For non-youngest collection, the DefNewGeneration can contribute
311
// "to-space".
312
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
313
size_t max_alloc_words);
314
315
// Reset for contribution of "to-space".
316
virtual void reset_scratch();
317
318
// GC support
319
virtual void compute_new_size();
320
321
// Returns true if the collection is likely to be safely
322
// completed. Even if this method returns true, a collection
323
// may not be guaranteed to succeed, and the system should be
324
// able to safely unwind and recover from that failure, albeit
325
// at some additional cost. Override superclass's implementation.
326
virtual bool collection_attempt_is_safe();
327
328
virtual void collect(bool full,
329
bool clear_all_soft_refs,
330
size_t size,
331
bool is_tlab);
332
HeapWord* expand_and_allocate(size_t size,
333
bool is_tlab,
334
bool parallel = false);
335
336
oop copy_to_survivor_space(oop old);
337
uint tenuring_threshold() { return _tenuring_threshold; }
338
339
// Performance Counter support
340
void update_counters();
341
342
// Printing
343
virtual const char* name() const;
344
virtual const char* short_name() const { return "DefNew"; }
345
346
bool must_be_youngest() const { return true; }
347
bool must_be_oldest() const { return false; }
348
349
// PrintHeapAtGC support.
350
void print_on(outputStream* st) const;
351
352
void verify();
353
354
bool promo_failure_scan_is_complete() const {
355
return _promo_failure_scan_stack.is_empty();
356
}
357
358
protected:
359
// If clear_space is true, clear the survivor spaces. Eden is
360
// cleared if the minimum size of eden is 0. If mangle_space
361
// is true, also mangle the space in debug mode.
362
void compute_space_boundaries(uintx minimum_eden_size,
363
bool clear_space,
364
bool mangle_space);
365
// Scavenge support
366
void swap_spaces();
367
};
368
369
#endif // SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
370
371