Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/g1/g1AllocRegion.hpp
40961 views
1
/*
2
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_GC_G1_G1ALLOCREGION_HPP
26
#define SHARE_GC_G1_G1ALLOCREGION_HPP
27
28
#include "gc/g1/heapRegion.hpp"
29
#include "gc/g1/g1EvacStats.hpp"
30
#include "gc/g1/g1HeapRegionAttr.hpp"
31
#include "gc/g1/g1NUMA.hpp"
32
33
class G1CollectedHeap;
34
35
// A class that holds a region that is active in satisfying allocation
36
// requests, potentially issued in parallel. When the active region is
37
// full it will be retired and replaced with a new one. The
38
// implementation assumes that fast-path allocations will be lock-free
39
// and a lock will need to be taken when the active region needs to be
40
// replaced.
41
42
class G1AllocRegion : public CHeapObj<mtGC> {
43
44
private:
45
// The active allocating region we are currently allocating out
46
// of. The invariant is that if this object is initialized (i.e.,
47
// init() has been called and release() has not) then _alloc_region
48
// is either an active allocating region or the dummy region (i.e.,
49
// it can never be NULL) and this object can be used to satisfy
50
// allocation requests. If this object is not initialized
51
// (i.e. init() has not been called or release() has been called)
52
// then _alloc_region is NULL and this object should not be used to
53
// satisfy allocation requests (it was done this way to force the
54
// correct use of init() and release()).
55
HeapRegion* volatile _alloc_region;
56
57
// It keeps track of the distinct number of regions that are used
58
// for allocation in the active interval of this object, i.e.,
59
// between a call to init() and a call to release(). The count
60
// mostly includes regions that are freshly allocated, as well as
61
// the region that is re-used using the set() method. This count can
62
// be used in any heuristics that might want to bound how many
63
// distinct regions this object can used during an active interval.
64
uint _count;
65
66
// When we set up a new active region we save its used bytes in this
67
// field so that, when we retire it, we can calculate how much space
68
// we allocated in it.
69
size_t _used_bytes_before;
70
71
// When true, indicates that allocate calls should do BOT updates.
72
const bool _bot_updates;
73
74
// Useful for debugging and tracing.
75
const char* _name;
76
77
// A dummy region (i.e., it's been allocated specially for this
78
// purpose and it is not part of the heap) that is full (i.e., top()
79
// == end()). When we don't have a valid active region we make
80
// _alloc_region point to this. This allows us to skip checking
81
// whether the _alloc_region is NULL or not.
82
static HeapRegion* _dummy_region;
83
84
// After a region is allocated by alloc_new_region, this
85
// method is used to set it as the active alloc_region
86
void update_alloc_region(HeapRegion* alloc_region);
87
88
// Allocate a new active region and use it to perform a word_size
89
// allocation. The force parameter will be passed on to
90
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
91
// to allocate a new region even if the max has been reached.
92
HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
93
94
protected:
95
// The memory node index this allocation region belongs to.
96
uint _node_index;
97
98
// Reset the alloc region to point a the dummy region.
99
void reset_alloc_region();
100
101
// Perform a non-MT-safe allocation out of the given region.
102
inline HeapWord* allocate(HeapRegion* alloc_region,
103
size_t word_size);
104
105
// Perform a MT-safe allocation out of the given region.
106
inline HeapWord* par_allocate(HeapRegion* alloc_region,
107
size_t word_size);
108
// Perform a MT-safe allocation out of the given region, with the given
109
// minimum and desired size. Returns the actual size allocated (between
110
// minimum and desired size) in actual_word_size if the allocation has been
111
// successful.
112
inline HeapWord* par_allocate(HeapRegion* alloc_region,
113
size_t min_word_size,
114
size_t desired_word_size,
115
size_t* actual_word_size);
116
117
// Ensure that the region passed as a parameter has been filled up
118
// so that noone else can allocate out of it any more.
119
// Returns the number of bytes that have been wasted by filled up
120
// the space.
121
size_t fill_up_remaining_space(HeapRegion* alloc_region);
122
123
// Retire the active allocating region. If fill_up is true then make
124
// sure that the region is full before we retire it so that no one
125
// else can allocate out of it.
126
// Returns the number of bytes that have been filled up during retire.
127
virtual size_t retire(bool fill_up);
128
129
size_t retire_internal(HeapRegion* alloc_region, bool fill_up);
130
131
// For convenience as subclasses use it.
132
static G1CollectedHeap* _g1h;
133
134
virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0;
135
virtual void retire_region(HeapRegion* alloc_region,
136
size_t allocated_bytes) = 0;
137
138
G1AllocRegion(const char* name, bool bot_updates, uint node_index);
139
140
public:
141
static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
142
143
HeapRegion* get() const {
144
HeapRegion * hr = _alloc_region;
145
// Make sure that the dummy region does not escape this class.
146
return (hr == _dummy_region) ? NULL : hr;
147
}
148
149
uint count() { return _count; }
150
151
// The following two are the building blocks for the allocation method.
152
153
// First-level allocation: Should be called without holding a
154
// lock. It will try to allocate lock-free out of the active region,
155
// or return NULL if it was unable to.
156
inline HeapWord* attempt_allocation(size_t word_size);
157
// Perform an allocation out of the current allocation region, with the given
158
// minimum and desired size. Returns the actual size allocated (between
159
// minimum and desired size) in actual_word_size if the allocation has been
160
// successful.
161
// Should be called without holding a lock. It will try to allocate lock-free
162
// out of the active region, or return NULL if it was unable to.
163
inline HeapWord* attempt_allocation(size_t min_word_size,
164
size_t desired_word_size,
165
size_t* actual_word_size);
166
167
// Second-level allocation: Should be called while holding a
168
// lock. It will try to first allocate lock-free out of the active
169
// region or, if it's unable to, it will try to replace the active
170
// alloc region with a new one. We require that the caller takes the
171
// appropriate lock before calling this so that it is easier to make
172
// it conform to its locking protocol.
173
inline HeapWord* attempt_allocation_locked(size_t word_size);
174
// Same as attempt_allocation_locked(size_t, bool), but allowing specification
175
// of minimum word size of the block in min_word_size, and the maximum word
176
// size of the allocation in desired_word_size. The actual size of the block is
177
// returned in actual_word_size.
178
inline HeapWord* attempt_allocation_locked(size_t min_word_size,
179
size_t desired_word_size,
180
size_t* actual_word_size);
181
182
// Should be called to allocate a new region even if the max of this
183
// type of regions has been reached. Should only be called if other
184
// allocation attempts have failed and we are not holding a valid
185
// active region.
186
inline HeapWord* attempt_allocation_force(size_t word_size);
187
188
// Should be called before we start using this object.
189
virtual void init();
190
191
// This can be used to set the active region to a specific
192
// region. (Use Example: we try to retain the last old GC alloc
193
// region that we've used during a GC and we can use set() to
194
// re-instate it at the beginning of the next GC.)
195
void set(HeapRegion* alloc_region);
196
197
// Should be called when we want to release the active region which
198
// is returned after it's been retired.
199
virtual HeapRegion* release();
200
201
void trace(const char* str,
202
size_t min_word_size = 0,
203
size_t desired_word_size = 0,
204
size_t actual_word_size = 0,
205
HeapWord* result = NULL) PRODUCT_RETURN;
206
};
207
208
class MutatorAllocRegion : public G1AllocRegion {
209
private:
210
// Keeps track of the total waste generated during the current
211
// mutator phase.
212
size_t _wasted_bytes;
213
214
// Retained allocation region. Used to lower the waste generated
215
// during mutation by having two active regions if the free space
216
// in a region about to be retired still could fit a TLAB.
217
HeapRegion* volatile _retained_alloc_region;
218
219
// Decide if the region should be retained, based on the free size
220
// in it and the free size in the currently retained region, if any.
221
bool should_retain(HeapRegion* region);
222
protected:
223
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
224
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
225
virtual size_t retire(bool fill_up);
226
public:
227
MutatorAllocRegion(uint node_index)
228
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */, node_index),
229
_wasted_bytes(0),
230
_retained_alloc_region(NULL) { }
231
232
// Returns the combined used memory in the current alloc region and
233
// the retained alloc region.
234
size_t used_in_alloc_regions();
235
236
// Perform an allocation out of the retained allocation region, with the given
237
// minimum and desired size. Returns the actual size allocated (between
238
// minimum and desired size) in actual_word_size if the allocation has been
239
// successful.
240
// Should be called without holding a lock. It will try to allocate lock-free
241
// out of the retained region, or return NULL if it was unable to.
242
inline HeapWord* attempt_retained_allocation(size_t min_word_size,
243
size_t desired_word_size,
244
size_t* actual_word_size);
245
246
// This specialization of release() makes sure that the retained alloc
247
// region is retired and set to NULL.
248
virtual HeapRegion* release();
249
250
virtual void init();
251
};
252
253
// Common base class for allocation regions used during GC.
254
class G1GCAllocRegion : public G1AllocRegion {
255
protected:
256
G1EvacStats* _stats;
257
G1HeapRegionAttr::region_type_t _purpose;
258
259
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
260
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
261
262
virtual size_t retire(bool fill_up);
263
264
G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats,
265
G1HeapRegionAttr::region_type_t purpose, uint node_index = G1NUMA::AnyNodeIndex)
266
: G1AllocRegion(name, bot_updates, node_index), _stats(stats), _purpose(purpose) {
267
assert(stats != NULL, "Must pass non-NULL PLAB statistics");
268
}
269
};
270
271
class SurvivorGCAllocRegion : public G1GCAllocRegion {
272
public:
273
SurvivorGCAllocRegion(G1EvacStats* stats, uint node_index)
274
: G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young, node_index) { }
275
};
276
277
class OldGCAllocRegion : public G1GCAllocRegion {
278
public:
279
OldGCAllocRegion(G1EvacStats* stats)
280
: G1GCAllocRegion("Old GC Alloc Region", true /* bot_updates */, stats, G1HeapRegionAttr::Old) { }
281
282
// This specialization of release() makes sure that the last card that has
283
// been allocated into has been completely filled by a dummy object. This
284
// avoids races when remembered set scanning wants to update the BOT of the
285
// last card in the retained old gc alloc region, and allocation threads
286
// allocating into that card at the same time.
287
virtual HeapRegion* release();
288
};
289
290
#endif // SHARE_GC_G1_G1ALLOCREGION_HPP
291
292