Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
38920 views
1
/*
2
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
26
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
27
28
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
29
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
30
#include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
31
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
32
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
33
#include "gc_implementation/shared/gcPolicyCounters.hpp"
34
#include "gc_implementation/shared/gcWhen.hpp"
35
#include "gc_interface/collectedHeap.inline.hpp"
36
#include "memory/collectorPolicy.hpp"
37
#include "utilities/ostream.hpp"
38
39
class AdjoiningGenerations;
40
class GCHeapSummary;
41
class GCTaskManager;
42
class PSAdaptiveSizePolicy;
43
class PSHeapSummary;
44
45
class ParallelScavengeHeap : public CollectedHeap {
46
friend class VMStructs;
47
private:
48
static PSYoungGen* _young_gen;
49
static PSOldGen* _old_gen;
50
51
// Sizing policy for entire heap
52
static PSAdaptiveSizePolicy* _size_policy;
53
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
54
55
static ParallelScavengeHeap* _psh;
56
57
GenerationSizer* _collector_policy;
58
59
// Collection of generations that are adjacent in the
60
// space reserved for the heap.
61
AdjoiningGenerations* _gens;
62
unsigned int _death_march_count;
63
64
// The task manager
65
static GCTaskManager* _gc_task_manager;
66
67
void trace_heap(GCWhen::Type when, GCTracer* tracer);
68
69
protected:
70
static inline size_t total_invocations();
71
HeapWord* allocate_new_tlab(size_t size);
72
73
inline bool should_alloc_in_eden(size_t size) const;
74
inline void death_march_check(HeapWord* const result, size_t size);
75
HeapWord* mem_allocate_old_gen(size_t size);
76
77
public:
78
ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
79
80
// For use by VM operations
81
enum CollectionType {
82
Scavenge,
83
MarkSweep
84
};
85
86
ParallelScavengeHeap::Name kind() const {
87
return CollectedHeap::ParallelScavengeHeap;
88
}
89
90
virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
91
92
static PSYoungGen* young_gen() { return _young_gen; }
93
static PSOldGen* old_gen() { return _old_gen; }
94
95
virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
96
97
static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
98
99
static ParallelScavengeHeap* heap();
100
101
static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
102
103
AdjoiningGenerations* gens() { return _gens; }
104
105
// Returns JNI_OK on success
106
virtual jint initialize();
107
108
void post_initialize();
109
void update_counters();
110
111
// The alignment used for the various areas
112
size_t space_alignment() { return _collector_policy->space_alignment(); }
113
size_t generation_alignment() { return _collector_policy->gen_alignment(); }
114
115
// Return the (conservative) maximum heap alignment
116
static size_t conservative_max_heap_alignment() {
117
return CollectorPolicy::compute_heap_alignment();
118
}
119
120
size_t capacity() const;
121
size_t used() const;
122
123
// Return "true" if all generations have reached the
124
// maximal committed limit that they can reach, without a garbage
125
// collection.
126
virtual bool is_maximal_no_gc() const;
127
128
// Return true if the reference points to an object that
129
// can be moved in a partial collection. For currently implemented
130
// generational collectors that means during a collection of
131
// the young gen.
132
virtual bool is_scavengable(const void* addr);
133
134
// Does this heap support heap inspection? (+PrintClassHistogram)
135
bool supports_heap_inspection() const { return true; }
136
137
size_t max_capacity() const;
138
139
// Whether p is in the allocated part of the heap
140
bool is_in(const void* p) const;
141
142
bool is_in_reserved(const void* p) const;
143
144
#ifdef ASSERT
145
virtual bool is_in_partial_collection(const void *p);
146
#endif
147
148
bool is_in_young(oop p); // reserved part
149
bool is_in_old(oop p); // reserved part
150
151
// Memory allocation. "gc_time_limit_was_exceeded" will
152
// be set to true if the adaptive size policy determine that
153
// an excessive amount of time is being spent doing collections
154
// and caused a NULL to be returned. If a NULL is not returned,
155
// "gc_time_limit_was_exceeded" has an undefined meaning.
156
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
157
158
// Allocation attempt(s) during a safepoint. It should never be called
159
// to allocate a new TLAB as this allocation might be satisfied out
160
// of the old generation.
161
HeapWord* failed_mem_allocate(size_t size);
162
163
// Support for System.gc()
164
void collect(GCCause::Cause cause);
165
166
// These also should be called by the vm thread at a safepoint (e.g., from a
167
// VM operation).
168
//
169
// The first collects the young generation only, unless the scavenge fails; it
170
// will then attempt a full gc. The second collects the entire heap; if
171
// maximum_compaction is true, it will compact everything and clear all soft
172
// references.
173
inline void invoke_scavenge();
174
175
// Perform a full collection
176
virtual void do_full_collection(bool clear_all_soft_refs);
177
178
bool supports_inline_contig_alloc() const { return !UseNUMA; }
179
180
HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
181
HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
182
183
void ensure_parsability(bool retire_tlabs);
184
void accumulate_statistics_all_tlabs();
185
void resize_all_tlabs();
186
187
bool supports_tlab_allocation() const { return true; }
188
189
size_t tlab_capacity(Thread* thr) const;
190
size_t tlab_used(Thread* thr) const;
191
size_t unsafe_max_tlab_alloc(Thread* thr) const;
192
193
// Can a compiler initialize a new object without store barriers?
194
// This permission only extends from the creation of a new object
195
// via a TLAB up to the first subsequent safepoint.
196
virtual bool can_elide_tlab_store_barriers() const {
197
return true;
198
}
199
200
virtual bool card_mark_must_follow_store() const {
201
return false;
202
}
203
204
// Return true if we don't we need a store barrier for
205
// initializing stores to an object at this address.
206
virtual bool can_elide_initializing_store_barrier(oop new_obj);
207
208
void oop_iterate(ExtendedOopClosure* cl);
209
void object_iterate(ObjectClosure* cl);
210
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
211
212
HeapWord* block_start(const void* addr) const;
213
size_t block_size(const HeapWord* addr) const;
214
bool block_is_obj(const HeapWord* addr) const;
215
216
jlong millis_since_last_gc();
217
218
void prepare_for_verify();
219
PSHeapSummary create_ps_heap_summary();
220
virtual void print_on(outputStream* st) const;
221
virtual void print_on_error(outputStream* st) const;
222
virtual void print_gc_threads_on(outputStream* st) const;
223
virtual void gc_threads_do(ThreadClosure* tc) const;
224
virtual void print_tracing_info() const;
225
226
void verify(bool silent, VerifyOption option /* ignored */);
227
228
void print_heap_change(size_t prev_used);
229
230
// Resize the young generation. The reserved space for the
231
// generation may be expanded in preparation for the resize.
232
void resize_young_gen(size_t eden_size, size_t survivor_size);
233
234
// Resize the old generation. The reserved space for the
235
// generation may be expanded in preparation for the resize.
236
void resize_old_gen(size_t desired_free_space);
237
238
// Save the tops of the spaces in all generations
239
void record_gen_tops_before_GC() PRODUCT_RETURN;
240
241
// Mangle the unused parts of all spaces in the heap
242
void gen_mangle_unused_area() PRODUCT_RETURN;
243
244
// Call these in sequential code around the processing of strong roots.
245
class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
246
public:
247
ParStrongRootsScope();
248
~ParStrongRootsScope();
249
};
250
};
251
252
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
253
254