Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
40957 views
1
/*
2
* Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
27
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
28
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
29
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
30
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
31
#include "gc/shenandoah/shenandoahUtils.hpp"
32
#include "runtime/atomic.hpp"
33
#include "services/memTracker.hpp"
34
#include "utilities/copy.hpp"
35
36
ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
37
_map_size(heap->num_regions()),
38
_region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
39
_map_space(space),
40
_cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
41
_biased_cset_map(_map_space.base()),
42
_heap(heap),
43
_garbage(0),
44
_used(0),
45
_region_count(0),
46
_current_index(0) {
47
48
// The collection set map is reserved to cover the entire heap *and* zero addresses.
49
// This is needed to accept in-cset checks for both heap oops and NULLs, freeing
50
// high-performance code from checking for NULL first.
51
//
52
// Since heap_base can be far away, committing the entire map would waste memory.
53
// Therefore, we only commit the parts that are needed to operate: the heap view,
54
// and the zero page.
55
//
56
// Note: we could instead commit the entire map, and piggyback on OS virtual memory
57
// subsystem for mapping not-yet-written-to pages to a single physical backing page,
58
// but this is not guaranteed, and would confuse NMT and other memory accounting tools.
59
60
MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
61
62
size_t page_size = (size_t)os::vm_page_size();
63
64
if (!_map_space.special()) {
65
// Commit entire pages that cover the heap cset map.
66
char* bot_addr = align_down(_cset_map, page_size);
67
char* top_addr = align_up(_cset_map + _map_size, page_size);
68
os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
69
"Unable to commit collection set bitmap: heap");
70
71
// Commit the zero page, if not yet covered by heap cset map.
72
if (bot_addr != _biased_cset_map) {
73
os::commit_memory_or_exit(_biased_cset_map, page_size, false,
74
"Unable to commit collection set bitmap: zero page");
75
}
76
}
77
78
Copy::zero_to_bytes(_cset_map, _map_size);
79
Copy::zero_to_bytes(_biased_cset_map, page_size);
80
}
81
82
void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
83
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
84
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
85
assert(!is_in(r), "Already in collection set");
86
_cset_map[r->index()] = 1;
87
_region_count++;
88
_garbage += r->garbage();
89
_used += r->used();
90
91
// Update the region status too. State transition would be checked internally.
92
r->make_cset();
93
}
94
95
void ShenandoahCollectionSet::clear() {
96
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
97
Copy::zero_to_bytes(_cset_map, _map_size);
98
99
#ifdef ASSERT
100
for (size_t index = 0; index < _heap->num_regions(); index ++) {
101
assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
102
}
103
#endif
104
105
_garbage = 0;
106
_used = 0;
107
108
_region_count = 0;
109
_current_index = 0;
110
}
111
112
ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
113
// This code is optimized for the case when collection set contains only
114
// a few regions. In this case, it is more constructive to check for is_in
115
// before hitting the (potentially contended) atomic index.
116
117
size_t max = _heap->num_regions();
118
size_t old = Atomic::load(&_current_index);
119
120
for (size_t index = old; index < max; index++) {
121
if (is_in(index)) {
122
size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
123
assert(cur >= old, "Always move forward");
124
if (cur == old) {
125
// Successfully moved the claim index, this is our region.
126
return _heap->get_region(index);
127
} else {
128
// Somebody else moved the claim index, restart from there.
129
index = cur - 1; // adjust for loop post-increment
130
old = cur;
131
}
132
}
133
}
134
return NULL;
135
}
136
137
ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
138
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
139
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
140
141
size_t max = _heap->num_regions();
142
for (size_t index = _current_index; index < max; index++) {
143
if (is_in(index)) {
144
_current_index = index + 1;
145
return _heap->get_region(index);
146
}
147
}
148
149
return NULL;
150
}
151
152
void ShenandoahCollectionSet::print_on(outputStream* out) const {
153
out->print_cr("Collection Set : " SIZE_FORMAT "", count());
154
155
debug_only(size_t regions = 0;)
156
for (size_t index = 0; index < _heap->num_regions(); index ++) {
157
if (is_in(index)) {
158
_heap->get_region(index)->print_on(out);
159
debug_only(regions ++;)
160
}
161
}
162
assert(regions == count(), "Must match");
163
}
164
165