Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
38920 views
1
/*
2
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
27
#include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
28
#include "gc_implementation/shared/mutableSpace.hpp"
29
#include "oops/oop.inline.hpp"
30
31
size_t PSPromotionLAB::filler_header_size;
32
33
// This is the shared initialization code. It sets up the basic pointers,
34
// and allows enough extra space for a filler object. We call a virtual
35
// method, "lab_is_valid()" to handle the different asserts the old/young
36
// labs require.
37
void PSPromotionLAB::initialize(MemRegion lab) {
38
assert(lab_is_valid(lab), "Sanity");
39
40
HeapWord* bottom = lab.start();
41
HeapWord* end = lab.end();
42
43
set_bottom(bottom);
44
set_end(end);
45
set_top(bottom);
46
47
// Initialize after VM starts up because header_size depends on compressed
48
// oops.
49
filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
50
51
// We can be initialized to a zero size!
52
if (free() > 0) {
53
if (ZapUnusedHeapArea) {
54
debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
55
}
56
57
// NOTE! We need to allow space for a filler object.
58
assert(lab.word_size() >= filler_header_size, "lab is too small");
59
end = end - filler_header_size;
60
set_end(end);
61
62
_state = needs_flush;
63
} else {
64
_state = zero_size;
65
}
66
67
assert(this->top() <= this->end(), "pointers out of order");
68
}
69
70
// Fill all remaining lab space with an unreachable object.
71
// The goal is to leave a contiguous parseable span of objects.
72
void PSPromotionLAB::flush() {
73
assert(_state != flushed, "Attempt to flush PLAB twice");
74
assert(top() <= end(), "pointers out of order");
75
76
// If we were initialized to a zero sized lab, there is
77
// nothing to flush
78
if (_state == zero_size)
79
return;
80
81
// PLAB's never allocate the last aligned_header_size
82
// so they can always fill with an array.
83
HeapWord* tlab_end = end() + filler_header_size;
84
typeArrayOop filler_oop = (typeArrayOop) top();
85
filler_oop->set_mark(markOopDesc::prototype());
86
filler_oop->set_klass(Universe::intArrayKlassObj());
87
const size_t array_length =
88
pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
89
assert( (array_length * (HeapWordSize/sizeof(jint))) < (size_t)max_jint, "array too big in PSPromotionLAB");
90
filler_oop->set_length((int)(array_length * (HeapWordSize/sizeof(jint))));
91
92
#ifdef ASSERT
93
// Note that we actually DO NOT want to use the aligned header size!
94
HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT);
95
Copy::fill_to_words(elt_words, array_length, 0xDEAABABE);
96
#endif
97
98
set_bottom(NULL);
99
set_end(NULL);
100
set_top(NULL);
101
102
_state = flushed;
103
}
104
105
bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
106
assert(Universe::heap()->is_in(obj), "Object outside heap");
107
108
if (contains(obj)) {
109
HeapWord* object_end = obj + obj_size;
110
assert(object_end == top(), "Not matching last allocation");
111
112
set_top(obj);
113
return true;
114
}
115
116
return false;
117
}
118
119
// Fill all remaining lab space with an unreachable object.
120
// The goal is to leave a contiguous parseable span of objects.
121
void PSOldPromotionLAB::flush() {
122
assert(_state != flushed, "Attempt to flush PLAB twice");
123
assert(top() <= end(), "pointers out of order");
124
125
if (_state == zero_size)
126
return;
127
128
HeapWord* obj = top();
129
130
PSPromotionLAB::flush();
131
132
assert(_start_array != NULL, "Sanity");
133
134
_start_array->allocate_block(obj);
135
}
136
137
#ifdef ASSERT
138
139
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
140
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
141
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
142
143
MutableSpace* to_space = heap->young_gen()->to_space();
144
MemRegion used = to_space->used_region();
145
if (used.contains(lab)) {
146
return true;
147
}
148
149
return false;
150
}
151
152
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
153
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
154
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
155
assert(_start_array->covered_region().contains(lab), "Sanity");
156
157
PSOldGen* old_gen = heap->old_gen();
158
MemRegion used = old_gen->object_space()->used_region();
159
160
if (used.contains(lab)) {
161
return true;
162
}
163
164
return false;
165
}
166
167
#endif /* ASSERT */
168
169