Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/gc_vlhgc/AllocationContextBalanced.hpp
5986 views
1
/*******************************************************************************
2
* Copyright (c) 1991, 2021 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
24
/**
25
* @file
26
* @ingroup GC_Modron_Base
27
*/
28
29
#if !defined(ALLOCATIONCONTEXTBALANCED_HPP_)
30
#define ALLOCATIONCONTEXTBALANCED_HPP_
31
32
#include "j9.h"
33
#include "j9cfg.h"
34
#include "j9protos.h"
35
#include "modronopt.h"
36
37
#include "AllocationContextTarok.hpp"
38
#include "LightweightNonReentrantLock.hpp"
39
#include "RegionListTarok.hpp"
40
41
class MM_AllocateDescription;
42
class MM_AllocationContextMultiTenant;
43
class MM_EnvironmentBase;
44
class MM_MemorySubSpaceTarok;
45
46
47
class MM_AllocationContextBalanced : public MM_AllocationContextTarok
48
{
49
/* Data members / Types */
50
public:
51
protected:
52
private:
53
MM_LightweightNonReentrantLock _contextLock; /**< protects updates to the context's region cache */
54
MM_LightweightNonReentrantLock _freeListLock; /**< protects updates to the context's free list (managed distinctly from the rest of the context to support region stealing */
55
MM_MemorySubSpaceTarok *_subspace; /**< the subspace from which this context allocates and refreshes itself */
56
MM_HeapRegionDescriptorVLHGC *_allocationRegion; /**< The region currently satisfying allocations within this context - also the most recently replenished/stolen region in this context */
57
MM_RegionListTarok _nonFullRegions; /**< Regions which failed to satisfy a large object allocation but which are not yet full. The _allocationRegion is added to this list when it fails to satisfy an object allocation but this list will quickly be consumed by TLH allocation requests which can soak up all remaining space. This list is consulted after _allocationRegion but before replenishment or theft. */
58
MM_RegionListTarok _discardRegionList; /**< The list of MPAOL regions currently privately owned by this context but either too full or too fragmented to be used to satisfy allocations. These regions must be flushed back to the subspace before a collection */
59
MM_RegionListTarok _flushedRegions; /**< The list of MPAOL regions which have been flushed from active use, for a GC, and have unknown stats (at any point after the GC, however, these regions could all be migrated to _ownedRegions) */
60
MM_RegionListTarok _freeRegions; /**< The list regions which are owned by this context but currently marked as FREE */
61
MM_RegionListTarok _idleMPRegions; /**< The list regions which are owned by this context, currently marked as ADDRESS_ORDERED_IDLE, but contain no objects (this also implies that they can migrate to other contexts on this node, for free, since the receiver isn't using them) */
62
UDATA _freeMemorySize; /**< The amount of free memory currently managed by the context in the _ownedRegions list only (note that dark matter and small holes are not considered free memory). This value is always accurate (that is, there is no time when it becomes out of sync with the actual amount of free memory managed by the context). */
63
UDATA _regionCount[MM_HeapRegionDescriptor::LAST_REGION_TYPE]; /**< Count of regions that are owned by this AC (accurate only for TGC purposes) */
64
UDATA _threadCount;/**< Count of mutator threads that allocate from this AC (accurate only for TGC purposes) */
65
UDATA _numaNode; /**< the NUMA node this context is associated with, or 0 if none */
66
MM_AllocationContextBalanced *_nextSibling; /**< Instances of the receiver are built into a circular linked-list. This points to the next adjacent downstream neighbour in this list (may be pointing to this if there is only one context in the node) */
67
MM_AllocationContextBalanced *_cachedReplenishPoint; /**< The sibling context which most recently replenished the receiver */
68
MM_AllocationContextBalanced *_stealingCousin; /**< A context in the "next" node which the receiver will use for spilling memory requests across NUMA nodes. Points back at the receiver if this is non-NUMA */
69
MM_AllocationContextBalanced *_nextToSteal; /**< A pointer to the next context we will try to steal from (we steal in a round-robin to try to distribute the heap's asymmetry). Points back at the receiver if this is non-NUMA */
70
MM_HeapRegionManager *_heapRegionManager; /**< A cached pointer to the HeapRegionManager */
71
UDATA *_freeProcessorNodes; /**< The array listing all the NUMA node numbers which account for the nodes with processors but no memory plus an empty slot for each context to use (element 0 is used by this context) - this is used when setting affinity */
72
UDATA _freeProcessorNodeCount; /**< The length, in elements, of the _freeProcessorNodes array (always at least 1 after startup) */
73
74
/* Methods */
75
public:
76
static MM_AllocationContextBalanced *newInstance(MM_EnvironmentBase *env, MM_MemorySubSpaceTarok *subspace, UDATA numaNode, UDATA allocationContextNumber);
77
virtual void flush(MM_EnvironmentBase *env);
78
virtual void flushForShutdown(MM_EnvironmentBase *env);
79
80
/**
81
* Ideally, this would only be understood by sub-classes which know about TLH allocation but we will use runtime assertions to ensure this is safe, for now
82
*/
83
virtual void *allocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_ObjectAllocationInterface *objectAllocationInterface, bool shouldCollectOnFailure);
84
virtual void *allocateObject(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, bool shouldCollectOnFailure);
85
virtual void *allocateArrayletLeaf(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, bool shouldCollectOnFailure);
86
87
/**
88
* Acquire a new region to be placed in the active set of regions from which to allocate.
89
* @param env GC thread.
90
* @return A region descriptor that has been acquired and put into the active to be consumed from rotation.
91
* @note This will immediately move the acquired region from _ownedRegions to _flushedRegions
92
*/
93
virtual MM_HeapRegionDescriptorVLHGC *collectorAcquireRegion(MM_EnvironmentBase *env);
94
95
/**
96
* @See MM_AllocationContext::allocate
97
*/
98
virtual void *allocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType);
99
100
/**
101
* Perform an allocate of the given allocationType and return it. Note that the receiver can assume that either the context is locked or the calling thread has exclusive.
102
*
103
* @param[in] env The calling thread
104
* @param[in] objectAllocationInterface The allocation interface through which the original allocation call was initiated (only used by TLH allocations, can be NULL in other cases)
105
* @param[in] allocateDescription The description of the requested allocation
106
* @param[in] allocationType The type of allocation to perform
107
*
108
* @return The result of the allocation (NULL on failure)
109
*/
110
virtual void *lockedAllocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType);
111
112
/**
113
* Called when the given region has been deemed empty, but is not a FREE region. The receiver is responsible for changing the region type and ensuring that it is appropriately stored.
114
* @param env[in] The calling thread (typically the main GC thread)
115
* @param region[in] The region to recycle
116
*/
117
virtual void recycleRegion(MM_EnvironmentVLHGC *env, MM_HeapRegionDescriptorVLHGC *region);
118
119
/**
120
* Called during tear down of a subspace to discard any regions which comprise the subspace.
121
* The region should be FREE (not IDLE!) on completion of this call.
122
* @param env[in] The calling thread (typically the main GC thread)
123
* @param region[in] The region to tear down
124
*/
125
virtual void tearDownRegion(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region);
126
127
/**
128
* Called during both expansion and during region recycling (if the region was owned by this context, it will come through this path when
129
* recycled). This method can only accept FREE regions.
130
* @param env[in] The thread which expanded the region or recycled it
131
* @param region[in] The region which should be added to our free list
132
*/
133
virtual void addRegionToFreeList(MM_EnvironmentBase *env, MM_HeapRegionDescriptorVLHGC *region);
134
135
/**
136
* Reset region count, used by TGC
137
* @param regionType type of the region that we reset the count for
138
*/
139
virtual void resetRegionCount(MM_HeapRegionDescriptor::RegionType regionType) { _regionCount[regionType] = 0; }
140
/**
141
* Increment region count, used by TGC
142
* @param regionType type of the region that we update the count for
143
*/
144
virtual void incRegionCount(MM_HeapRegionDescriptor::RegionType regionType) { _regionCount[regionType] += 1; }
145
146
/**
147
* Return the region count associated with this AC, used by TGC
148
* @param regionType type of the region that we get the count for
149
* @return region count
150
*/
151
virtual UDATA getRegionCount(MM_HeapRegionDescriptor::RegionType regionType) { return _regionCount[regionType]; }
152
/**
153
* Reset mutator count, used by TGC
154
*/
155
virtual void resetThreadCount() { _threadCount = 0; }
156
157
/**
158
* Increment mutator count, used by TGC
159
*/
160
virtual void incThreadCount() { _threadCount += 1; }
161
162
/**
163
* Return the mutator count associated with this AC, used by TGC
164
*
165
* @return mutator count
166
*/
167
virtual UDATA getThreadCount() { return _threadCount; }
168
169
/**
170
* Return the NUMA node with which this AC is associated, or 0 if none
171
*
172
* @return associated NUMA node
173
*/
174
virtual UDATA getNumaNode() { return _numaNode; }
175
176
/**
177
* Return the amount of free memory currently managed by the context. This value is always accurate (that is, there is no time when it becomes
178
* out of sync with the actual amount of free memory managed by the context).
179
*
180
* @return The free memory managed by the receiver, in bytes.
181
*/
182
virtual UDATA getFreeMemorySize();
183
184
/**
185
* Return the number of free (empty) regions currently managed by the context. This value is always accurate (that is, there is no time when it becomes
186
* out of sync with the actual number of regions managed by the context).
187
*
188
* @return The count of free regions managed by the receiver.
189
*/
190
virtual UDATA getFreeRegionCount();
191
192
/**
193
* Sets the downstream sibling in the circularly linked-list of contexts on a given node.
194
* @param sibling[in] The next downstream neighbour in the list (might be this if there is only one context on the node)
195
*/
196
void setNextSibling(MM_AllocationContextBalanced *sibling);
197
198
/**
199
* @return The next region in the circular linked list of contexts on this node (could be this if there is only one context on the node)
200
*/
201
MM_AllocationContextBalanced *getNextSibling() { return _nextSibling; }
202
203
/**
204
* Sets the _stealingCousin instance variable (can only be called once!).
205
* @param cousin[in] The context onto which the receiver will spill failed allocates within its node (must not be NULL)
206
*/
207
void setStealingCousin(MM_AllocationContextBalanced *cousin);
208
209
/**
210
* @return The first context onto which the receiver will spill allocation failures within its own node
211
*/
212
MM_AllocationContextBalanced *getStealingCousin() { return _stealingCousin; }
213
214
/**
215
* Called to reset the largest free entry in all the MemoryPoolBumpPointer instances in the regions managed by the receiver.
216
*/
217
virtual void resetLargestFreeEntry();
218
219
/**
220
* @return The largest free entry out of all the pools managed by the receiver.
221
*/
222
virtual UDATA getLargestFreeEntry();
223
224
/**
225
* Called to move the given region directly from the receiver and into the target context instance. Note that this method can only
226
* be called in one thread on any instance since it directly manipulates lists across contexts. The caller is responsible for
227
* ensuring that these threading semantics are preserved.
228
* @param region[in] The region to migrate from the receiver
229
* @param newOwner[in] The context to which the given region is being migrated
230
*/
231
virtual void migrateRegionToAllocationContext(MM_HeapRegionDescriptorVLHGC *region, MM_AllocationContextTarok *newOwner);
232
233
/**
234
* Called by migrateRegionToAllocationContext on the target context receiving the migrated region.
235
* The receiver will insert the migrated context in its list.
236
*
237
* @param region[in] The region to accept
238
*/
239
virtual void acceptMigratingRegion(MM_HeapRegionDescriptorVLHGC *region);
240
241
/**
242
* This helper merely forwards the resetHeapStatistics call on to all the memory pools owned by the receiver.
243
* @param globalCollect[in] True if this was a global collect (blindly passed through)
244
*/
245
virtual void resetHeapStatistics(bool globalCollect);
246
247
/**
248
* This helper merely forwards the mergeHeapStats call on to all the memory pools owned by the receiver.
249
* @param heapStats[in/out] The stats structure to receive the merged data (blindly passed through)
250
* @param includeMemoryType[in] The memory space type to use in the merge (blindly passed through)
251
*/
252
virtual void mergeHeapStats(MM_HeapStats *heapStats, UDATA includeMemoryType);
253
254
/**
255
* Used by TGC to get the count of regions owned by this context. Differentiates between regions node-local to the context and node-foreign.
256
* @param localCount[out] The number of regions owned by the receiver which are bound to the node in which the receiver logically operates
257
* @param foreignCount[out] The number of regions owned by the receiver which are NOT bound to the node in which the receiver logically operates
258
*/
259
virtual void getRegionCount(UDATA *localCount, UDATA *foreignCount);
260
261
/**
262
* Remove the specified region from the flushed regions list.
263
*
264
* @param region[in] The region to remove from the list
265
*/
266
virtual void removeRegionFromFlushedList(MM_HeapRegionDescriptorVLHGC *region);
267
268
/**
269
* @See MM_AllocationContextTarok::setNumaAffinityForThread
270
*/
271
virtual bool setNumaAffinityForThread(MM_EnvironmentBase *env);
272
273
274
protected:
275
virtual void tearDown(MM_EnvironmentBase *env);
276
bool initialize(MM_EnvironmentBase *env);
277
MM_AllocationContextBalanced(MM_MemorySubSpaceTarok *subspace, UDATA numaNode, UDATA allocationContextNumber)
278
: MM_AllocationContextTarok(allocationContextNumber, MM_AllocationContextTarok::BALANCED)
279
, _subspace(subspace)
280
, _allocationRegion(NULL)
281
, _nonFullRegions()
282
, _discardRegionList()
283
, _flushedRegions()
284
, _freeRegions()
285
, _idleMPRegions()
286
, _freeMemorySize(0)
287
, _threadCount(0)
288
, _numaNode(numaNode)
289
, _nextSibling(NULL)
290
, _cachedReplenishPoint(NULL)
291
, _stealingCousin(NULL)
292
, _nextToSteal(NULL)
293
, _heapRegionManager(NULL)
294
, _freeProcessorNodes(NULL)
295
, _freeProcessorNodeCount(0)
296
{
297
_typeId = __FUNCTION__;
298
}
299
300
virtual void *lockedReplenishAndAllocate(MM_EnvironmentBase *env, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, MM_MemorySubSpace::AllocationType allocationType);
301
virtual MM_HeapRegionDescriptorVLHGC * selectRegionForContraction(MM_EnvironmentBase *env);
302
303
private:
304
305
/**
306
* Locks the "common" _contextLock (as opposed to the _freeListLock).
307
*/
308
void lockCommon();
309
/**
310
* Locks the "common" _contextLock (as opposed to the _freeListLock).
311
*/
312
void unlockCommon();
313
314
/**
315
* Tries to acquire an MPAOL region from the receiver (either from an existing, but idle, ADDRESS_ORDERED or by converting a free region).
316
* If the receiver does not have a valid candidate it will check its siblings and cousins for one.
317
* The region that this method returns will have been removed from the receiver's management and its owning context is set to requestingContext.
318
*
319
* @note Caller must hold this context's @ref _contextLock
320
*
321
* @param env[in] The thread attempting the allocation
322
* @param subspace[in] The subSpace to which the allocated pool must be attached
323
* @param requestingContext[in] The context requesting a region from the receiver
324
* @return The region or NULL if there were none available in the heap
325
*/
326
MM_HeapRegionDescriptorVLHGC *acquireMPRegionFromHeap(MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, MM_AllocationContextTarok *requestingContext);
327
328
/**
329
* Tries to acquire a FREE region from the receiver (either from an existing FREE region or by converting an idle region).
330
* If the receiver does not have a valid candidate it will check its siblings and cousins for one.
331
* The region that this method returns will have been removed from the receiver's management and its owning context is set to requestingContext.
332
*
333
* @note Caller must hold this context's @ref _contextLock
334
*
335
* @param env[in] The thread attempting the allocation
336
* @return The region or NULL if there were none available in the heap
337
*/
338
MM_HeapRegionDescriptorVLHGC *acquireFreeRegionFromHeap(MM_EnvironmentBase *env);
339
340
/**
341
* Returns a region descriptor of ADDRESS_ORDERED type on the node where the receiver is resident. The region may not have been found
342
* in the receiver (it may have been managed by a sibling) but it will be placed under the management of the given requestingContext
343
* before this call returns.
344
* @note The caller must own the receiver's _contextLock
345
* @param env[in] The thread requesting the region
346
* @param subSpace[in] The subSpace to which the allocated pool must be attached
347
* @param requestingContext[in] The context which is requesting the region and asking this context to search its node
348
* @return A region of type ADDRESS_ORDERED (or NULL if no ADDRESS_ORDERED regions could be found or created on the receiver's node)
349
*/
350
MM_HeapRegionDescriptorVLHGC *acquireMPRegionFromNode(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocationContextTarok *requestingContext);
351
352
/**
353
* Returns a region descriptor of FREE type on the node where the receiver is resident. The region may not have been found in the
354
* receiver (it may have been managed by a sibling) but it will not be under any context's management, when it is returned and the
355
* caller accepts responsibility for managing it.
356
* @note The caller must own the receiver's _contextLock
357
* @param env[in] The thread requesting the free region
358
* @return A region of type FREE (or NULL if no FREE regions could be found or created on the receiver's node)
359
*/
360
MM_HeapRegionDescriptorVLHGC *acquireFreeRegionFromNode(MM_EnvironmentBase *env);
361
362
/**
363
* Tries to acquire an MPAOL region from the receiver (either from an existing, but idle, ADDRESS_ORDERED or by converting a free region).
364
* The region that this method returns will have been removed from the receiver's management and its owning context is set to requestingContext.
365
*
366
* @param env[in] The thread attempting the allocation
367
* @param subSpace[in] The subSpace to which the allocated pool must be attached
368
* @param requestingContext[in] The context requesting a region from the receiver
369
* @return The region or NULL if there were none available in the receiver
370
*/
371
MM_HeapRegionDescriptorVLHGC *acquireMPRegionFromContext(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocationContextTarok *requestingContext);
372
373
/**
374
* Tries to acquire a FREE region from the receiver (either from an existing FREE region or by converting an idle region).
375
* The region that this method returns will have been removed from the receiver's management and its owning context is set to requestingContext.
376
*
377
* @param env[in] The thread attempting the allocation
378
* @return The region or NULL if there were none available in the receiver
379
*/
380
MM_HeapRegionDescriptorVLHGC *acquireFreeRegionFromContext(MM_EnvironmentBase *env);
381
382
/**
383
* Perform a TLH allocation. Note that the receiver can assume that either the context is locked or the calling thread has exclusive.
384
*
385
* @param[in] env The calling thread
386
* @param[in] allocateDescription The allocation to perform
387
* @param[in] objectAllocationInterface The interface through which the allocation request was initiated (required to initialize the TLH)
388
*
389
* @return The base pointer of the allocated TLH (or NULL, if the allocation failed)
390
*/
391
void *lockedAllocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_ObjectAllocationInterface *objectAllocationInterface);
392
/**
393
* Allocate an object. Note that the receiver can assume that either the context is locked or the calling thread has exclusive.
394
*
395
* @param[in] env The calling thread
396
* @param[in] allocateDescription The allocation to perform
397
*
398
* @return The address of the allocated object (or NULL, if the allocation failed)
399
*/
400
void *lockedAllocateObject(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription);
401
402
/**
403
* Allocate an arraylet leaf. Note that the receiver can assume that either the context is locked or the calling thread has exclusive.
404
* NOTE: The returned arraylet leaf is UNINITIALIZED MEMORY (since it can't be zeroed under lock) so the caller must zero it before it
405
* can be seen by the collector or user code.
406
*
407
* @param[in] env The calling thread
408
* @param[in] allocateDescription The allocation to perform
409
* @param[in] freeRegionForArrayletLeaf The region to use for the allocation
410
*
411
* @return The address of the leaf
412
*/
413
void *lockedAllocateArrayletLeaf(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_HeapRegionDescriptorVLHGC *freeRegionForArrayletLeaf);
414
415
/**
416
* Common implementation for flush() and flushForShutdown()
417
*/
418
void flushInternal(MM_EnvironmentBase *env);
419
420
/**
421
* Replenishes the receiver's active region list. Called by lockedReplenishAndAllocate in the cases where we aren't allocating a leaf.
422
* @note The calling env must own the receiver's _contextLock or have exclusive VM access
423
*
424
* @param env[in] A GC thread (must own either _contextLock or have exclusive access)
425
* @param payTax Flag indicating whether taxation restrictions should apply
426
* @return region descriptor that was used to replenish the active region, or NULL if no descriptor was found.
427
*/
428
MM_HeapRegionDescriptorVLHGC *internalReplenishActiveRegion(MM_EnvironmentBase *env, bool payTax);
429
430
/**
431
* A helper which increments the given count of local and foreign regions based on the location of the given region.
432
* A region is considered "local" if it is bound to the node managed by the receiver.
433
* @param region[in] A region to check
434
* @param localCount[in/out] The number of regions owned by the receiver which are bound to the node in which the receiver logically operates
435
* @param foreignCount[in/out] The number of regions owned by the receiver which are NOT bound to the node in which the receiver logically operates
436
*/
437
void accountForRegionLocation(MM_HeapRegionDescriptorVLHGC *region, UDATA *localCount, UDATA *foreignCount);
438
439
/**
440
* A helper counts the regions in a specific region list and differentiates between node-local and node-foreign regions
441
* @param localCount[out] The number of regions owned by the receiver which are bound to the node in which the receiver logically operates
442
* @param foreignCount[out] The number of regions owned by the receiver which are NOT bound to the node in which the receiver logically operates
443
*/
444
void countRegionsInList(MM_RegionListTarok *list, UDATA *localCount, UDATA *foreignCount);
445
446
/**
447
* Helper for collectorAcquireRegion
448
* @param env The current GC thread.
449
* @return A region descriptor that has been acquired and put into the active to be consumed from rotation.
450
*/
451
MM_HeapRegionDescriptorVLHGC *internalCollectorAcquireRegion(MM_EnvironmentBase *env);
452
453
};
454
455
#endif /* ALLOCATIONCONTEXTBALANCED_HPP_ */
456
457