Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
40961 views
1
/*
2
* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
26
#define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
27
28
#include "gc/shared/markBitMap.hpp"
29
#include "gc/shared/softRefPolicy.hpp"
30
#include "gc/shared/collectedHeap.hpp"
31
#include "gc/shenandoah/shenandoahAsserts.hpp"
32
#include "gc/shenandoah/shenandoahAllocRequest.hpp"
33
#include "gc/shenandoah/shenandoahLock.hpp"
34
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
35
#include "gc/shenandoah/shenandoahPadding.hpp"
36
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
37
#include "gc/shenandoah/shenandoahUnload.hpp"
38
#include "memory/metaspace.hpp"
39
#include "services/memoryManager.hpp"
40
#include "utilities/globalDefinitions.hpp"
41
#include "utilities/stack.hpp"
42
43
class ConcurrentGCTimer;
44
class ObjectIterateScanRootClosure;
45
class ShenandoahCollectorPolicy;
46
class ShenandoahControlThread;
47
class ShenandoahGCSession;
48
class ShenandoahGCStateResetter;
49
class ShenandoahHeuristics;
50
class ShenandoahMarkingContext;
51
class ShenandoahMode;
52
class ShenandoahPhaseTimings;
53
class ShenandoahHeap;
54
class ShenandoahHeapRegion;
55
class ShenandoahHeapRegionClosure;
56
class ShenandoahCollectionSet;
57
class ShenandoahFreeSet;
58
class ShenandoahConcurrentMark;
59
class ShenandoahFullGC;
60
class ShenandoahMonitoringSupport;
61
class ShenandoahPacer;
62
class ShenandoahReferenceProcessor;
63
class ShenandoahVerifier;
64
class ShenandoahWorkGang;
65
class VMStructs;
66
67
// Used for buffering per-region liveness data.
68
// Needed since ShenandoahHeapRegion uses atomics to update liveness.
69
// The ShenandoahHeap array has max-workers elements, each of which is an array of
70
// uint16_t * max_regions. The choice of uint16_t is not accidental:
71
// there is a tradeoff between static/dynamic footprint that translates
72
// into cache pressure (which is already high during marking), and
73
// too many atomic updates. uint32_t is too large, uint8_t is too small.
74
typedef uint16_t ShenandoahLiveData;
75
#define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
76
77
class ShenandoahRegionIterator : public StackObj {
78
private:
79
ShenandoahHeap* _heap;
80
81
shenandoah_padding(0);
82
volatile size_t _index;
83
shenandoah_padding(1);
84
85
// No implicit copying: iterators should be passed by reference to capture the state
86
NONCOPYABLE(ShenandoahRegionIterator);
87
88
public:
89
ShenandoahRegionIterator();
90
ShenandoahRegionIterator(ShenandoahHeap* heap);
91
92
// Reset iterator to default state
93
void reset();
94
95
// Returns next region, or NULL if there are no more regions.
96
// This is multi-thread-safe.
97
inline ShenandoahHeapRegion* next();
98
99
// This is *not* MT safe. However, in the absence of multithreaded access, it
100
// can be used to determine if there is more work to do.
101
bool has_next() const;
102
};
103
104
class ShenandoahHeapRegionClosure : public StackObj {
105
public:
106
virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
107
virtual bool is_thread_safe() { return false; }
108
};
109
110
typedef ShenandoahLock ShenandoahHeapLock;
111
typedef ShenandoahLocker ShenandoahHeapLocker;
112
typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
113
114
// Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
115
// to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
116
// See ShenandoahControlThread for GC cycle structure.
117
//
118
class ShenandoahHeap : public CollectedHeap {
119
friend class ShenandoahAsserts;
120
friend class VMStructs;
121
friend class ShenandoahGCSession;
122
friend class ShenandoahGCStateResetter;
123
friend class ShenandoahParallelObjectIterator;
124
friend class ShenandoahSafepoint;
125
// Supported GC
126
friend class ShenandoahConcurrentGC;
127
friend class ShenandoahDegenGC;
128
friend class ShenandoahFullGC;
129
friend class ShenandoahUnload;
130
131
// ---------- Locks that guard important data structures in Heap
132
//
133
private:
134
ShenandoahHeapLock _lock;
135
136
public:
137
ShenandoahHeapLock* lock() {
138
return &_lock;
139
}
140
141
// ---------- Initialization, termination, identification, printing routines
142
//
143
public:
144
static ShenandoahHeap* heap();
145
146
const char* name() const { return "Shenandoah"; }
147
ShenandoahHeap::Name kind() const { return CollectedHeap::Shenandoah; }
148
149
ShenandoahHeap(ShenandoahCollectorPolicy* policy);
150
jint initialize();
151
void post_initialize();
152
void initialize_mode();
153
void initialize_heuristics();
154
155
void initialize_serviceability();
156
157
void print_on(outputStream* st) const;
158
void print_extended_on(outputStream *st) const;
159
void print_tracing_info() const;
160
void print_heap_regions_on(outputStream* st) const;
161
162
void stop();
163
164
void prepare_for_verify();
165
void verify(VerifyOption vo);
166
167
// WhiteBox testing support.
168
bool supports_concurrent_gc_breakpoints() const {
169
return true;
170
}
171
172
// ---------- Heap counters and metrics
173
//
174
private:
175
size_t _initial_size;
176
size_t _minimum_size;
177
volatile size_t _soft_max_size;
178
shenandoah_padding(0);
179
volatile size_t _used;
180
volatile size_t _committed;
181
volatile size_t _bytes_allocated_since_gc_start;
182
shenandoah_padding(1);
183
184
public:
185
void increase_used(size_t bytes);
186
void decrease_used(size_t bytes);
187
void set_used(size_t bytes);
188
189
void increase_committed(size_t bytes);
190
void decrease_committed(size_t bytes);
191
void increase_allocated(size_t bytes);
192
193
size_t bytes_allocated_since_gc_start();
194
void reset_bytes_allocated_since_gc_start();
195
196
size_t min_capacity() const;
197
size_t max_capacity() const;
198
size_t soft_max_capacity() const;
199
size_t initial_capacity() const;
200
size_t capacity() const;
201
size_t used() const;
202
size_t committed() const;
203
204
void set_soft_max_capacity(size_t v);
205
206
// ---------- Workers handling
207
//
208
private:
209
uint _max_workers;
210
ShenandoahWorkGang* _workers;
211
ShenandoahWorkGang* _safepoint_workers;
212
213
public:
214
uint max_workers();
215
void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
216
217
WorkGang* workers() const;
218
WorkGang* safepoint_workers();
219
220
void gc_threads_do(ThreadClosure* tcl) const;
221
222
// ---------- Heap regions handling machinery
223
//
224
private:
225
MemRegion _heap_region;
226
bool _heap_region_special;
227
size_t _num_regions;
228
ShenandoahHeapRegion** _regions;
229
ShenandoahRegionIterator _update_refs_iterator;
230
231
public:
232
233
inline HeapWord* base() const { return _heap_region.start(); }
234
235
inline size_t num_regions() const { return _num_regions; }
236
inline bool is_heap_region_special() { return _heap_region_special; }
237
238
inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
239
inline size_t heap_region_index_containing(const void* addr) const;
240
241
inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
242
243
void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
244
void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
245
246
// ---------- GC state machinery
247
//
248
// GC state describes the important parts of collector state, that may be
249
// used to make barrier selection decisions in the native and generated code.
250
// Multiple bits can be set at once.
251
//
252
// Important invariant: when GC state is zero, the heap is stable, and no barriers
253
// are required.
254
//
255
public:
256
enum GCStateBitPos {
257
// Heap has forwarded objects: needs LRB barriers.
258
HAS_FORWARDED_BITPOS = 0,
259
260
// Heap is under marking: needs SATB barriers.
261
MARKING_BITPOS = 1,
262
263
// Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
264
EVACUATION_BITPOS = 2,
265
266
// Heap is under updating: needs no additional barriers.
267
UPDATEREFS_BITPOS = 3,
268
269
// Heap is under weak-reference/roots processing: needs weak-LRB barriers.
270
WEAK_ROOTS_BITPOS = 4,
271
};
272
273
enum GCState {
274
STABLE = 0,
275
HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
276
MARKING = 1 << MARKING_BITPOS,
277
EVACUATION = 1 << EVACUATION_BITPOS,
278
UPDATEREFS = 1 << UPDATEREFS_BITPOS,
279
WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
280
};
281
282
private:
283
ShenandoahSharedBitmap _gc_state;
284
ShenandoahSharedFlag _degenerated_gc_in_progress;
285
ShenandoahSharedFlag _full_gc_in_progress;
286
ShenandoahSharedFlag _full_gc_move_in_progress;
287
ShenandoahSharedFlag _progress_last_gc;
288
ShenandoahSharedFlag _concurrent_strong_root_in_progress;
289
290
void set_gc_state_all_threads(char state);
291
void set_gc_state_mask(uint mask, bool value);
292
293
public:
294
char gc_state() const;
295
static address gc_state_addr();
296
297
void set_concurrent_mark_in_progress(bool in_progress);
298
void set_evacuation_in_progress(bool in_progress);
299
void set_update_refs_in_progress(bool in_progress);
300
void set_degenerated_gc_in_progress(bool in_progress);
301
void set_full_gc_in_progress(bool in_progress);
302
void set_full_gc_move_in_progress(bool in_progress);
303
void set_has_forwarded_objects(bool cond);
304
void set_concurrent_strong_root_in_progress(bool cond);
305
void set_concurrent_weak_root_in_progress(bool cond);
306
307
inline bool is_stable() const;
308
inline bool is_idle() const;
309
inline bool is_concurrent_mark_in_progress() const;
310
inline bool is_update_refs_in_progress() const;
311
inline bool is_evacuation_in_progress() const;
312
inline bool is_degenerated_gc_in_progress() const;
313
inline bool is_full_gc_in_progress() const;
314
inline bool is_full_gc_move_in_progress() const;
315
inline bool has_forwarded_objects() const;
316
inline bool is_gc_in_progress_mask(uint mask) const;
317
inline bool is_stw_gc_in_progress() const;
318
inline bool is_concurrent_strong_root_in_progress() const;
319
inline bool is_concurrent_weak_root_in_progress() const;
320
321
private:
322
enum CancelState {
323
// Normal state. GC has not been cancelled and is open for cancellation.
324
// Worker threads can suspend for safepoint.
325
CANCELLABLE,
326
327
// GC has been cancelled. Worker threads can not suspend for
328
// safepoint but must finish their work as soon as possible.
329
CANCELLED,
330
331
// GC has not been cancelled and must not be cancelled. At least
332
// one worker thread checks for pending safepoint and may suspend
333
// if a safepoint is pending.
334
NOT_CANCELLED
335
};
336
337
ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
338
bool try_cancel_gc();
339
340
public:
341
static address cancelled_gc_addr();
342
343
inline bool cancelled_gc() const;
344
inline bool check_cancelled_gc_and_yield(bool sts_active = true);
345
346
inline void clear_cancelled_gc();
347
348
void cancel_gc(GCCause::Cause cause);
349
350
public:
351
// Elastic heap support
352
void entry_uncommit(double shrink_before, size_t shrink_until);
353
void op_uncommit(double shrink_before, size_t shrink_until);
354
355
private:
356
// GC support
357
// Reset bitmap, prepare regions for new GC cycle
358
void prepare_gc();
359
void prepare_regions_and_collection_set(bool concurrent);
360
// Evacuation
361
void prepare_evacuation(bool concurrent);
362
void evacuate_collection_set(bool concurrent);
363
// Concurrent root processing
364
void prepare_concurrent_roots();
365
void finish_concurrent_roots();
366
// Concurrent class unloading support
367
void do_class_unloading();
368
// Reference updating
369
void prepare_update_heap_references(bool concurrent);
370
void update_heap_references(bool concurrent);
371
// Final update region states
372
void update_heap_region_states(bool concurrent);
373
void rebuild_free_set(bool concurrent);
374
375
void rendezvous_threads();
376
void recycle_trash();
377
public:
378
void notify_gc_progress() { _progress_last_gc.set(); }
379
void notify_gc_no_progress() { _progress_last_gc.unset(); }
380
381
//
382
// Mark support
383
private:
384
ShenandoahControlThread* _control_thread;
385
ShenandoahCollectorPolicy* _shenandoah_policy;
386
ShenandoahMode* _gc_mode;
387
ShenandoahHeuristics* _heuristics;
388
ShenandoahFreeSet* _free_set;
389
ShenandoahPacer* _pacer;
390
ShenandoahVerifier* _verifier;
391
392
ShenandoahPhaseTimings* _phase_timings;
393
394
ShenandoahControlThread* control_thread() { return _control_thread; }
395
396
public:
397
ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
398
ShenandoahMode* mode() const { return _gc_mode; }
399
ShenandoahHeuristics* heuristics() const { return _heuristics; }
400
ShenandoahFreeSet* free_set() const { return _free_set; }
401
ShenandoahPacer* pacer() const { return _pacer; }
402
403
ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
404
405
ShenandoahVerifier* verifier();
406
407
// ---------- VM subsystem bindings
408
//
409
private:
410
ShenandoahMonitoringSupport* _monitoring_support;
411
MemoryPool* _memory_pool;
412
GCMemoryManager _stw_memory_manager;
413
GCMemoryManager _cycle_memory_manager;
414
ConcurrentGCTimer* _gc_timer;
415
SoftRefPolicy _soft_ref_policy;
416
417
// For exporting to SA
418
int _log_min_obj_alignment_in_bytes;
419
public:
420
ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
421
GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
422
GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
423
SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
424
425
GrowableArray<GCMemoryManager*> memory_managers();
426
GrowableArray<MemoryPool*> memory_pools();
427
MemoryUsage memory_usage();
428
GCTracer* tracer();
429
ConcurrentGCTimer* gc_timer() const;
430
431
// ---------- Reference processing
432
//
433
private:
434
ShenandoahReferenceProcessor* const _ref_processor;
435
436
public:
437
ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
438
439
// ---------- Class Unloading
440
//
441
private:
442
ShenandoahSharedFlag _unload_classes;
443
ShenandoahUnload _unloader;
444
445
public:
446
void set_unload_classes(bool uc);
447
bool unload_classes() const;
448
449
// Perform STW class unloading and weak root cleaning
450
void parallel_cleaning(bool full_gc);
451
452
private:
453
void stw_unload_classes(bool full_gc);
454
void stw_process_weak_roots(bool full_gc);
455
void stw_weak_refs(bool full_gc);
456
457
// Heap iteration support
458
void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
459
bool prepare_aux_bitmap_for_iteration();
460
void reclaim_aux_bitmap_for_iteration();
461
462
// ---------- Generic interface hooks
463
// Minor things that super-interface expects us to implement to play nice with
464
// the rest of runtime. Some of the things here are not required to be implemented,
465
// and can be stubbed out.
466
//
467
public:
468
AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL);
469
bool is_maximal_no_gc() const shenandoah_not_implemented_return(false);
470
471
bool is_in(const void* p) const;
472
473
MemRegion reserved_region() const { return _reserved; }
474
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
475
476
void collect(GCCause::Cause cause);
477
void do_full_collection(bool clear_all_soft_refs);
478
479
// Used for parsing heap during error printing
480
HeapWord* block_start(const void* addr) const;
481
bool block_is_obj(const HeapWord* addr) const;
482
bool print_location(outputStream* st, void* addr) const;
483
484
// Used for native heap walkers: heap dumpers, mostly
485
void object_iterate(ObjectClosure* cl);
486
// Parallel heap iteration support
487
virtual ParallelObjectIterator* parallel_object_iterator(uint workers);
488
489
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
490
void keep_alive(oop obj);
491
492
// ---------- Safepoint interface hooks
493
//
494
public:
495
void safepoint_synchronize_begin();
496
void safepoint_synchronize_end();
497
498
// ---------- Code roots handling hooks
499
//
500
public:
501
void register_nmethod(nmethod* nm);
502
void unregister_nmethod(nmethod* nm);
503
void flush_nmethod(nmethod* nm);
504
void verify_nmethod(nmethod* nm) {}
505
506
// ---------- Pinning hooks
507
//
508
public:
509
// Shenandoah supports per-object (per-region) pinning
510
bool supports_object_pinning() const { return true; }
511
512
oop pin_object(JavaThread* thread, oop obj);
513
void unpin_object(JavaThread* thread, oop obj);
514
515
void sync_pinned_region_status();
516
void assert_pinned_region_status() NOT_DEBUG_RETURN;
517
518
// ---------- Concurrent Stack Processing support
519
//
520
public:
521
bool uses_stack_watermark_barrier() const { return true; }
522
523
// ---------- Allocation support
524
//
525
private:
526
HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
527
inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
528
HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
529
HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
530
531
public:
532
HeapWord* allocate_memory(ShenandoahAllocRequest& request);
533
HeapWord* mem_allocate(size_t size, bool* what);
534
MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
535
size_t size,
536
Metaspace::MetadataType mdtype);
537
538
void notify_mutator_alloc_words(size_t words, bool waste);
539
540
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size);
541
size_t tlab_capacity(Thread *thr) const;
542
size_t unsafe_max_tlab_alloc(Thread *thread) const;
543
size_t max_tlab_size() const;
544
size_t tlab_used(Thread* ignored) const;
545
546
void ensure_parsability(bool retire_labs);
547
548
void labs_make_parsable();
549
void tlabs_retire(bool resize);
550
void gclabs_retire(bool resize);
551
552
// ---------- Marking support
553
//
554
private:
555
ShenandoahMarkingContext* _marking_context;
556
MemRegion _bitmap_region;
557
MemRegion _aux_bitmap_region;
558
MarkBitMap _verification_bit_map;
559
MarkBitMap _aux_bit_map;
560
561
size_t _bitmap_size;
562
size_t _bitmap_regions_per_slice;
563
size_t _bitmap_bytes_per_slice;
564
565
size_t _pretouch_heap_page_size;
566
size_t _pretouch_bitmap_page_size;
567
568
bool _bitmap_region_special;
569
bool _aux_bitmap_region_special;
570
571
ShenandoahLiveData** _liveness_cache;
572
573
public:
574
inline ShenandoahMarkingContext* complete_marking_context() const;
575
inline ShenandoahMarkingContext* marking_context() const;
576
inline void mark_complete_marking_context();
577
inline void mark_incomplete_marking_context();
578
579
template<class T>
580
inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
581
582
template<class T>
583
inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
584
585
template<class T>
586
inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
587
588
void reset_mark_bitmap();
589
590
// SATB barriers hooks
591
inline bool requires_marking(const void* entry) const;
592
593
// Support for bitmap uncommits
594
bool commit_bitmap_slice(ShenandoahHeapRegion *r);
595
bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
596
bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
597
598
// Liveness caching support
599
ShenandoahLiveData* get_liveness_cache(uint worker_id);
600
void flush_liveness_cache(uint worker_id);
601
602
size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
603
604
// ---------- Evacuation support
605
//
606
private:
607
ShenandoahCollectionSet* _collection_set;
608
ShenandoahEvacOOMHandler _oom_evac_handler;
609
610
public:
611
static address in_cset_fast_test_addr();
612
613
ShenandoahCollectionSet* collection_set() const { return _collection_set; }
614
615
// Checks if object is in the collection set.
616
inline bool in_collection_set(oop obj) const;
617
618
// Checks if location is in the collection set. Can be interior pointer, not the oop itself.
619
inline bool in_collection_set_loc(void* loc) const;
620
621
// Evacuates object src. Returns the evacuated object, either evacuated
622
// by this thread, or by some other thread.
623
inline oop evacuate_object(oop src, Thread* thread);
624
625
// Call before/after evacuation.
626
inline void enter_evacuation(Thread* t);
627
inline void leave_evacuation(Thread* t);
628
629
// ---------- Helper functions
630
//
631
public:
632
template <class T>
633
inline void conc_update_with_forwarded(T* p);
634
635
template <class T>
636
inline void update_with_forwarded(T* p);
637
638
static inline oop cas_oop(oop n, narrowOop* addr, oop c);
639
static inline oop cas_oop(oop n, oop* addr, oop c);
640
static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c);
641
642
void trash_humongous_region_at(ShenandoahHeapRegion *r);
643
644
private:
645
void trash_cset_regions();
646
647
// ---------- Testing helpers functions
648
//
649
private:
650
ShenandoahSharedFlag _inject_alloc_failure;
651
652
void try_inject_alloc_failure();
653
bool should_inject_alloc_failure();
654
};
655
656
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
657
658