Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
38920 views
1
/*
2
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/systemDictionary.hpp"
27
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
28
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29
#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
32
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
33
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
34
#include "oops/objArrayKlass.inline.hpp"
35
#include "oops/oop.inline.hpp"
36
#include "oops/oop.pcgc.inline.hpp"
37
#include "utilities/stack.inline.hpp"
38
39
PSOldGen* ParCompactionManager::_old_gen = NULL;
40
ParCompactionManager** ParCompactionManager::_manager_array = NULL;
41
42
RegionTaskQueue** ParCompactionManager::_region_list = NULL;
43
44
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
45
ParCompactionManager::ObjArrayTaskQueueSet*
46
ParCompactionManager::_objarray_queues = NULL;
47
ObjectStartArray* ParCompactionManager::_start_array = NULL;
48
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
49
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
50
51
uint* ParCompactionManager::_recycled_stack_index = NULL;
52
int ParCompactionManager::_recycled_top = -1;
53
int ParCompactionManager::_recycled_bottom = -1;
54
55
ParCompactionManager::ParCompactionManager() :
56
_action(CopyAndUpdate),
57
_region_stack(NULL),
58
_region_stack_index((uint)max_uintx) {
59
60
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
61
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
62
63
_old_gen = heap->old_gen();
64
_start_array = old_gen()->start_array();
65
66
marking_stack()->initialize();
67
_objarray_stack.initialize();
68
}
69
70
ParCompactionManager::~ParCompactionManager() {
71
delete _recycled_stack_index;
72
}
73
74
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
75
assert(PSParallelCompact::gc_task_manager() != NULL,
76
"Needed for initialization");
77
78
_mark_bitmap = mbm;
79
80
uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
81
82
assert(_manager_array == NULL, "Attempt to initialize twice");
83
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
84
guarantee(_manager_array != NULL, "Could not allocate manager_array");
85
86
_region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
87
parallel_gc_threads+1, mtGC);
88
guarantee(_region_list != NULL, "Could not initialize promotion manager");
89
90
_recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC);
91
92
// parallel_gc-threads + 1 to be consistent with the number of
93
// compaction managers.
94
for(uint i=0; i<parallel_gc_threads + 1; i++) {
95
_region_list[i] = new RegionTaskQueue();
96
region_list(i)->initialize();
97
}
98
99
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
100
guarantee(_stack_array != NULL, "Could not allocate stack_array");
101
_objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
102
guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
103
_region_array = new RegionTaskQueueSet(parallel_gc_threads);
104
guarantee(_region_array != NULL, "Could not allocate region_array");
105
106
// Create and register the ParCompactionManager(s) for the worker threads.
107
for(uint i=0; i<parallel_gc_threads; i++) {
108
_manager_array[i] = new ParCompactionManager();
109
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
110
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
111
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
112
region_array()->register_queue(i, region_list(i));
113
}
114
115
// The VMThread gets its own ParCompactionManager, which is not available
116
// for work stealing.
117
_manager_array[parallel_gc_threads] = new ParCompactionManager();
118
guarantee(_manager_array[parallel_gc_threads] != NULL,
119
"Could not create ParCompactionManager");
120
assert(PSParallelCompact::gc_task_manager()->workers() != 0,
121
"Not initialized?");
122
}
123
124
int ParCompactionManager::pop_recycled_stack_index() {
125
assert(_recycled_bottom <= _recycled_top, "list is empty");
126
// Get the next available index
127
if (_recycled_bottom < _recycled_top) {
128
uint cur, next, last;
129
do {
130
cur = _recycled_bottom;
131
next = cur + 1;
132
last = Atomic::cmpxchg(next, &_recycled_bottom, cur);
133
} while (cur != last);
134
return _recycled_stack_index[next];
135
} else {
136
return -1;
137
}
138
}
139
140
void ParCompactionManager::push_recycled_stack_index(uint v) {
141
// Get the next available index
142
int cur = Atomic::add(1, &_recycled_top);
143
_recycled_stack_index[cur] = v;
144
assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong");
145
}
146
147
bool ParCompactionManager::should_update() {
148
assert(action() != NotValid, "Action is not set");
149
return (action() == ParCompactionManager::Update) ||
150
(action() == ParCompactionManager::CopyAndUpdate) ||
151
(action() == ParCompactionManager::UpdateAndCopy);
152
}
153
154
bool ParCompactionManager::should_copy() {
155
assert(action() != NotValid, "Action is not set");
156
return (action() == ParCompactionManager::Copy) ||
157
(action() == ParCompactionManager::CopyAndUpdate) ||
158
(action() == ParCompactionManager::UpdateAndCopy);
159
}
160
161
void ParCompactionManager::region_list_push(uint list_index,
162
size_t region_index) {
163
region_list(list_index)->push(region_index);
164
}
165
166
void ParCompactionManager::verify_region_list_empty(uint list_index) {
167
assert(region_list(list_index)->is_empty(), "Not empty");
168
}
169
170
ParCompactionManager*
171
ParCompactionManager::gc_thread_compaction_manager(int index) {
172
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
173
assert(_manager_array != NULL, "Sanity");
174
return _manager_array[index];
175
}
176
177
void ParCompactionManager::follow_marking_stacks() {
178
do {
179
// Drain the overflow stack first, to allow stealing from the marking stack.
180
oop obj;
181
while (marking_stack()->pop_overflow(obj)) {
182
obj->follow_contents(this);
183
}
184
while (marking_stack()->pop_local(obj)) {
185
obj->follow_contents(this);
186
}
187
188
// Process ObjArrays one at a time to avoid marking stack bloat.
189
ObjArrayTask task;
190
if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
191
ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
192
k->oop_follow_contents(this, task.obj(), task.index());
193
}
194
} while (!marking_stacks_empty());
195
196
assert(marking_stacks_empty(), "Sanity");
197
}
198
199
void ParCompactionManager::drain_region_stacks() {
200
do {
201
// Drain overflow stack first so other threads can steal.
202
size_t region_index;
203
while (region_stack()->pop_overflow(region_index)) {
204
PSParallelCompact::fill_and_update_region(this, region_index);
205
}
206
207
while (region_stack()->pop_local(region_index)) {
208
PSParallelCompact::fill_and_update_region(this, region_index);
209
}
210
} while (!region_stack()->is_empty());
211
}
212
213