Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/z/zCollectedHeap.cpp
40957 views
1
/*
2
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "classfile/classLoaderData.hpp"
26
#include "gc/shared/gcHeapSummary.hpp"
27
#include "gc/shared/suspendibleThreadSet.hpp"
28
#include "gc/z/zCollectedHeap.hpp"
29
#include "gc/z/zDirector.hpp"
30
#include "gc/z/zDriver.hpp"
31
#include "gc/z/zGlobals.hpp"
32
#include "gc/z/zHeap.inline.hpp"
33
#include "gc/z/zNMethod.hpp"
34
#include "gc/z/zObjArrayAllocator.hpp"
35
#include "gc/z/zOop.inline.hpp"
36
#include "gc/z/zServiceability.hpp"
37
#include "gc/z/zStat.hpp"
38
#include "gc/z/zUtils.inline.hpp"
39
#include "memory/classLoaderMetaspace.hpp"
40
#include "memory/iterator.hpp"
41
#include "memory/universe.hpp"
42
#include "utilities/align.hpp"
43
44
ZCollectedHeap* ZCollectedHeap::heap() {
45
return named_heap<ZCollectedHeap>(CollectedHeap::Z);
46
}
47
48
ZCollectedHeap::ZCollectedHeap() :
49
_soft_ref_policy(),
50
_barrier_set(),
51
_initialize(&_barrier_set),
52
_heap(),
53
_director(new ZDirector()),
54
_driver(new ZDriver()),
55
_stat(new ZStat()),
56
_runtime_workers() {}
57
58
CollectedHeap::Name ZCollectedHeap::kind() const {
59
return CollectedHeap::Z;
60
}
61
62
const char* ZCollectedHeap::name() const {
63
return ZName;
64
}
65
66
jint ZCollectedHeap::initialize() {
67
if (!_heap.is_initialized()) {
68
return JNI_ENOMEM;
69
}
70
71
Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX);
72
73
return JNI_OK;
74
}
75
76
void ZCollectedHeap::initialize_serviceability() {
77
_heap.serviceability_initialize();
78
}
79
80
class ZStopConcurrentGCThreadClosure : public ThreadClosure {
81
public:
82
virtual void do_thread(Thread* thread) {
83
if (thread->is_ConcurrentGC_thread() &&
84
!thread->is_GC_task_thread()) {
85
static_cast<ConcurrentGCThread*>(thread)->stop();
86
}
87
}
88
};
89
90
void ZCollectedHeap::stop() {
91
ZStopConcurrentGCThreadClosure cl;
92
gc_threads_do(&cl);
93
}
94
95
SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
96
return &_soft_ref_policy;
97
}
98
99
size_t ZCollectedHeap::max_capacity() const {
100
return _heap.max_capacity();
101
}
102
103
size_t ZCollectedHeap::capacity() const {
104
return _heap.capacity();
105
}
106
107
size_t ZCollectedHeap::used() const {
108
return _heap.used();
109
}
110
111
size_t ZCollectedHeap::unused() const {
112
return _heap.unused();
113
}
114
115
bool ZCollectedHeap::is_maximal_no_gc() const {
116
// Not supported
117
ShouldNotReachHere();
118
return false;
119
}
120
121
bool ZCollectedHeap::is_in(const void* p) const {
122
return _heap.is_in((uintptr_t)p);
123
}
124
125
uint32_t ZCollectedHeap::hash_oop(oop obj) const {
126
return _heap.hash_oop(ZOop::to_address(obj));
127
}
128
129
HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
130
const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
131
const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
132
133
if (addr != 0) {
134
*actual_size = requested_size;
135
}
136
137
return (HeapWord*)addr;
138
}
139
140
oop ZCollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
141
if (!do_zero) {
142
return CollectedHeap::array_allocate(klass, size, length, false /* do_zero */, THREAD);
143
}
144
145
ZObjArrayAllocator allocator(klass, size, length, THREAD);
146
return allocator.allocate();
147
}
148
149
HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
150
const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
151
return (HeapWord*)_heap.alloc_object(size_in_bytes);
152
}
153
154
MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
155
size_t size,
156
Metaspace::MetadataType mdtype) {
157
MetaWord* result;
158
159
// Start asynchronous GC
160
collect(GCCause::_metadata_GC_threshold);
161
162
// Expand and retry allocation
163
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
164
if (result != NULL) {
165
return result;
166
}
167
168
// Start synchronous GC
169
collect(GCCause::_metadata_GC_clear_soft_refs);
170
171
// Retry allocation
172
result = loader_data->metaspace_non_null()->allocate(size, mdtype);
173
if (result != NULL) {
174
return result;
175
}
176
177
// Expand and retry allocation
178
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
179
if (result != NULL) {
180
return result;
181
}
182
183
// Out of memory
184
return NULL;
185
}
186
187
void ZCollectedHeap::collect(GCCause::Cause cause) {
188
_driver->collect(cause);
189
}
190
191
void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
192
// These collection requests are ignored since ZGC can't run a synchronous
193
// GC cycle from within the VM thread. This is considered benign, since the
194
// only GC causes coming in here should be heap dumper and heap inspector.
195
// However, neither the heap dumper nor the heap inspector really need a GC
196
// to happen, but the result of their heap iterations might in that case be
197
// less accurate since they might include objects that would otherwise have
198
// been collected by a GC.
199
assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
200
guarantee(cause == GCCause::_heap_dump ||
201
cause == GCCause::_heap_inspection, "Invalid cause");
202
}
203
204
void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
205
// Not supported
206
ShouldNotReachHere();
207
}
208
209
size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
210
return _heap.tlab_capacity();
211
}
212
213
size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
214
return _heap.tlab_used();
215
}
216
217
size_t ZCollectedHeap::max_tlab_size() const {
218
return _heap.max_tlab_size();
219
}
220
221
size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
222
return _heap.unsafe_max_tlab_alloc();
223
}
224
225
bool ZCollectedHeap::uses_stack_watermark_barrier() const {
226
return true;
227
}
228
229
GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
230
GrowableArray<GCMemoryManager*> memory_managers(2);
231
memory_managers.append(_heap.serviceability_cycle_memory_manager());
232
memory_managers.append(_heap.serviceability_pause_memory_manager());
233
return memory_managers;
234
}
235
236
GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
237
GrowableArray<MemoryPool*> memory_pools(1);
238
memory_pools.append(_heap.serviceability_memory_pool());
239
return memory_pools;
240
}
241
242
void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
243
_heap.object_iterate(cl, true /* visit_weaks */);
244
}
245
246
ParallelObjectIterator* ZCollectedHeap::parallel_object_iterator(uint nworkers) {
247
return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */);
248
}
249
250
void ZCollectedHeap::keep_alive(oop obj) {
251
_heap.keep_alive(obj);
252
}
253
254
void ZCollectedHeap::register_nmethod(nmethod* nm) {
255
ZNMethod::register_nmethod(nm);
256
}
257
258
void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
259
ZNMethod::unregister_nmethod(nm);
260
}
261
262
void ZCollectedHeap::flush_nmethod(nmethod* nm) {
263
ZNMethod::flush_nmethod(nm);
264
}
265
266
void ZCollectedHeap::verify_nmethod(nmethod* nm) {
267
// Does nothing
268
}
269
270
WorkGang* ZCollectedHeap::safepoint_workers() {
271
return _runtime_workers.workers();
272
}
273
274
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
275
tc->do_thread(_director);
276
tc->do_thread(_driver);
277
tc->do_thread(_stat);
278
_heap.threads_do(tc);
279
_runtime_workers.threads_do(tc);
280
}
281
282
VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
283
return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity());
284
}
285
286
void ZCollectedHeap::safepoint_synchronize_begin() {
287
SuspendibleThreadSet::synchronize();
288
}
289
290
void ZCollectedHeap::safepoint_synchronize_end() {
291
SuspendibleThreadSet::desynchronize();
292
}
293
294
void ZCollectedHeap::prepare_for_verify() {
295
// Does nothing
296
}
297
298
void ZCollectedHeap::print_on(outputStream* st) const {
299
_heap.print_on(st);
300
}
301
302
void ZCollectedHeap::print_on_error(outputStream* st) const {
303
st->print_cr("ZGC Globals:");
304
st->print_cr(" GlobalPhase: %u (%s)", ZGlobalPhase, ZGlobalPhaseToString());
305
st->print_cr(" GlobalSeqNum: %u", ZGlobalSeqNum);
306
st->print_cr(" Offset Max: " SIZE_FORMAT "%s (" PTR_FORMAT ")",
307
byte_size_in_exact_unit(ZAddressOffsetMax),
308
exact_unit_for_byte_size(ZAddressOffsetMax),
309
ZAddressOffsetMax);
310
st->print_cr(" Page Size Small: " SIZE_FORMAT "M", ZPageSizeSmall / M);
311
st->print_cr(" Page Size Medium: " SIZE_FORMAT "M", ZPageSizeMedium / M);
312
st->cr();
313
st->print_cr("ZGC Metadata Bits:");
314
st->print_cr(" Good: " PTR_FORMAT, ZAddressGoodMask);
315
st->print_cr(" Bad: " PTR_FORMAT, ZAddressBadMask);
316
st->print_cr(" WeakBad: " PTR_FORMAT, ZAddressWeakBadMask);
317
st->print_cr(" Marked: " PTR_FORMAT, ZAddressMetadataMarked);
318
st->print_cr(" Remapped: " PTR_FORMAT, ZAddressMetadataRemapped);
319
st->cr();
320
CollectedHeap::print_on_error(st);
321
}
322
323
void ZCollectedHeap::print_extended_on(outputStream* st) const {
324
_heap.print_extended_on(st);
325
}
326
327
void ZCollectedHeap::print_tracing_info() const {
328
// Does nothing
329
}
330
331
bool ZCollectedHeap::print_location(outputStream* st, void* addr) const {
332
return _heap.print_location(st, (uintptr_t)addr);
333
}
334
335
void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
336
_heap.verify();
337
}
338
339
bool ZCollectedHeap::is_oop(oop object) const {
340
return _heap.is_oop(ZOop::to_address(object));
341
}
342
343
bool ZCollectedHeap::supports_concurrent_gc_breakpoints() const {
344
return true;
345
}
346
347