Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.cpp
38921 views
1
/*
2
* Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
3
*
4
* This code is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License version 2 only, as
6
* published by the Free Software Foundation.
7
*
8
* This code is distributed in the hope that it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11
* version 2 for more details (a copy is included in the LICENSE file that
12
* accompanied this code).
13
*
14
* You should have received a copy of the GNU General Public License version
15
* 2 along with this work; if not, write to the Free Software Foundation,
16
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17
*
18
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19
* or visit www.oracle.com if you need additional information or have any
20
* questions.
21
*
22
*/
23
24
#include "precompiled.hpp"
25
26
#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
27
#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
28
#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
29
#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
30
#include "gc_implementation/shenandoah/shenandoahUtils.hpp"
31
#include "runtime/atomic.hpp"
32
#include "services/memTracker.hpp"
33
#include "utilities/copy.hpp"
34
35
ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
36
_map_size(heap->num_regions()),
37
_region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
38
_map_space(space),
39
_cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
40
_biased_cset_map(_map_space.base()),
41
_heap(heap),
42
_garbage(0),
43
_used(0),
44
_region_count(0),
45
_current_index(0) {
46
47
// The collection set map is reserved to cover the entire heap *and* zero addresses.
48
// This is needed to accept in-cset checks for both heap oops and NULLs, freeing
49
// high-performance code from checking for NULL first.
50
//
51
// Since heap_base can be far away, committing the entire map would waste memory.
52
// Therefore, we only commit the parts that are needed to operate: the heap view,
53
// and the zero page.
54
//
55
// Note: we could instead commit the entire map, and piggyback on OS virtual memory
56
// subsystem for mapping not-yet-written-to pages to a single physical backing page,
57
// but this is not guaranteed, and would confuse NMT and other memory accounting tools.
58
59
MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
60
61
size_t page_size = (size_t)os::vm_page_size();
62
63
if (!_map_space.special()) {
64
// Commit entire pages that cover the heap cset map.
65
char* bot_addr = (char*)align_ptr_down(_cset_map, page_size);
66
char* top_addr = (char*)align_ptr_up(_cset_map + _map_size, page_size);
67
os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
68
"Unable to commit collection set bitmap: heap");
69
70
// Commit the zero page, if not yet covered by heap cset map.
71
if (bot_addr != _biased_cset_map) {
72
os::commit_memory_or_exit(_biased_cset_map, page_size, false,
73
"Unable to commit collection set bitmap: zero page");
74
}
75
}
76
77
Copy::zero_to_bytes(_cset_map, _map_size);
78
Copy::zero_to_bytes(_biased_cset_map, page_size);
79
}
80
81
void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
82
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
83
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
84
assert(!is_in(r), "Already in collection set");
85
_cset_map[r->index()] = 1;
86
_region_count++;
87
_garbage += r->garbage();
88
_used += r->used();
89
90
// Update the region status too. State transition would be checked internally.
91
r->make_cset();
92
}
93
94
void ShenandoahCollectionSet::clear() {
95
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
96
Copy::zero_to_bytes(_cset_map, _map_size);
97
98
#ifdef ASSERT
99
for (size_t index = 0; index < _heap->num_regions(); index ++) {
100
assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
101
}
102
#endif
103
104
_garbage = 0;
105
_used = 0;
106
107
_region_count = 0;
108
_current_index = 0;
109
}
110
111
ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
112
size_t num_regions = _heap->num_regions();
113
if (_current_index >= (jint)num_regions) {
114
return NULL;
115
}
116
117
jint saved_current = _current_index;
118
size_t index = (size_t)saved_current;
119
120
while(index < num_regions) {
121
if (is_in(index)) {
122
jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
123
assert(cur >= (jint)saved_current, "Must move forward");
124
if (cur == saved_current) {
125
assert(is_in(index), "Invariant");
126
return _heap->get_region(index);
127
} else {
128
index = (size_t)cur;
129
saved_current = cur;
130
}
131
} else {
132
index ++;
133
}
134
}
135
return NULL;
136
}
137
138
ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
139
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
140
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
141
size_t num_regions = _heap->num_regions();
142
for (size_t index = (size_t)_current_index; index < num_regions; index ++) {
143
if (is_in(index)) {
144
_current_index = (jint)(index + 1);
145
return _heap->get_region(index);
146
}
147
}
148
149
return NULL;
150
}
151
152
void ShenandoahCollectionSet::print_on(outputStream* out) const {
153
out->print_cr("Collection Set : " SIZE_FORMAT "", count());
154
155
debug_only(size_t regions = 0;)
156
for (size_t index = 0; index < _heap->num_regions(); index ++) {
157
if (is_in(index)) {
158
_heap->get_region(index)->print_on(out);
159
debug_only(regions ++;)
160
}
161
}
162
assert(regions == count(), "Must match");
163
}
164
165