Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
32285 views
1
/*
2
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
26
#define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
27
28
#include "gc_interface/gcCause.hpp"
29
#include "gc_implementation/shared/gcWhen.hpp"
30
#include "memory/allocation.hpp"
31
#include "memory/barrierSet.hpp"
32
#include "runtime/handles.hpp"
33
#include "runtime/perfData.hpp"
34
#include "runtime/safepoint.hpp"
35
#include "utilities/events.hpp"
36
37
// A "CollectedHeap" is an implementation of a java heap for HotSpot. This
38
// is an abstract class: there may be many different kinds of heaps. This
39
// class defines the functions that a heap must implement, and contains
40
// infrastructure common to all heaps.
41
42
class AdaptiveSizePolicy;
43
class BarrierSet;
44
class CollectorPolicy;
45
class GCHeapSummary;
46
class GCTimer;
47
class GCTracer;
48
class MetaspaceSummary;
49
class Thread;
50
class ThreadClosure;
51
class VirtualSpaceSummary;
52
class nmethod;
53
54
class GCMessage : public FormatBuffer<1024> {
55
public:
56
bool is_before;
57
58
public:
59
GCMessage() {}
60
};
61
62
class GCHeapLog : public EventLogBase<GCMessage> {
63
private:
64
void log_heap(bool before);
65
66
public:
67
GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
68
69
void log_heap_before() {
70
log_heap(true);
71
}
72
void log_heap_after() {
73
log_heap(false);
74
}
75
};
76
77
//
78
// CollectedHeap
79
// SharedHeap
80
// GenCollectedHeap
81
// G1CollectedHeap
82
// ParallelScavengeHeap
83
// ShenandoahHeap
84
//
85
class CollectedHeap : public CHeapObj<mtInternal> {
86
friend class VMStructs;
87
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
88
89
#ifdef ASSERT
90
static int _fire_out_of_memory_count;
91
#endif
92
93
// Used for filler objects (static, but initialized in ctor).
94
static size_t _filler_array_max_size;
95
96
GCHeapLog* _gc_heap_log;
97
98
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
99
bool _defer_initial_card_mark;
100
101
protected:
102
MemRegion _reserved;
103
BarrierSet* _barrier_set;
104
bool _is_gc_active;
105
uint _n_par_threads;
106
107
unsigned int _total_collections; // ... started
108
unsigned int _total_full_collections; // ... started
109
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
110
NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
111
112
// Reason for current garbage collection. Should be set to
113
// a value reflecting no collection between collections.
114
GCCause::Cause _gc_cause;
115
GCCause::Cause _gc_lastcause;
116
PerfStringVariable* _perf_gc_cause;
117
PerfStringVariable* _perf_gc_lastcause;
118
119
// Constructor
120
CollectedHeap();
121
122
// Do common initializations that must follow instance construction,
123
// for example, those needing virtual calls.
124
// This code could perhaps be moved into initialize() but would
125
// be slightly more awkward because we want the latter to be a
126
// pure virtual.
127
void pre_initialize();
128
129
// Create a new tlab. All TLAB allocations must go through this.
130
virtual HeapWord* allocate_new_tlab(size_t size);
131
132
// Accumulate statistics on all tlabs.
133
virtual void accumulate_statistics_all_tlabs();
134
135
// Reinitialize tlabs before resuming mutators.
136
virtual void resize_all_tlabs();
137
138
// Allocate from the current thread's TLAB, with broken-out slow path.
139
inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size);
140
static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size);
141
142
// Allocate an uninitialized block of the given size, or returns NULL if
143
// this is impossible.
144
inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS);
145
146
// Like allocate_init, but the block returned by a successful allocation
147
// is guaranteed initialized to zeros.
148
inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS);
149
150
// Helper functions for (VM) allocation.
151
inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
152
inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
153
HeapWord* objPtr);
154
155
inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj, int size);
156
157
inline static void post_allocation_setup_array(KlassHandle klass,
158
HeapWord* obj, int length);
159
160
// Clears an allocated object.
161
inline static void init_obj(HeapWord* obj, size_t size);
162
163
// Filler object utilities.
164
static inline size_t filler_array_hdr_size();
165
static inline size_t filler_array_min_size();
166
167
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
168
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
169
170
// Fill with a single array; caller must ensure filler_array_min_size() <=
171
// words <= filler_array_max_size().
172
static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
173
174
// Fill with a single object (either an int array or a java.lang.Object).
175
static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
176
177
virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
178
179
// Verification functions
180
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
181
PRODUCT_RETURN;
182
virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
183
PRODUCT_RETURN;
184
debug_only(static void check_for_valid_allocation_state();)
185
186
public:
187
enum Name {
188
Abstract,
189
SharedHeap,
190
GenCollectedHeap,
191
ParallelScavengeHeap,
192
G1CollectedHeap,
193
ShenandoahHeap
194
};
195
196
static inline size_t filler_array_max_size() {
197
return _filler_array_max_size;
198
}
199
200
virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
201
202
/**
203
* Returns JNI error code JNI_ENOMEM if memory could not be allocated,
204
* and JNI_OK on success.
205
*/
206
virtual jint initialize() = 0;
207
208
// In many heaps, there will be a need to perform some initialization activities
209
// after the Universe is fully formed, but before general heap allocation is allowed.
210
// This is the correct place to place such initialization methods.
211
virtual void post_initialize() = 0;
212
213
// Stop any onging concurrent work and prepare for exit.
214
virtual void stop() {}
215
216
MemRegion reserved_region() const { return _reserved; }
217
address base() const { return (address)reserved_region().start(); }
218
219
virtual size_t capacity() const = 0;
220
virtual size_t used() const = 0;
221
222
// Return "true" if the part of the heap that allocates Java
223
// objects has reached the maximal committed limit that it can
224
// reach, without a garbage collection.
225
virtual bool is_maximal_no_gc() const = 0;
226
227
// Support for java.lang.Runtime.maxMemory(): return the maximum amount of
228
// memory that the vm could make available for storing 'normal' java objects.
229
// This is based on the reserved address space, but should not include space
230
// that the vm uses internally for bookkeeping or temporary storage
231
// (e.g., in the case of the young gen, one of the survivor
232
// spaces).
233
virtual size_t max_capacity() const = 0;
234
235
// Returns "TRUE" if "p" points into the reserved area of the heap.
236
bool is_in_reserved(const void* p) const {
237
return _reserved.contains(p);
238
}
239
240
bool is_in_reserved_or_null(const void* p) const {
241
return p == NULL || is_in_reserved(p);
242
}
243
244
// Returns "TRUE" iff "p" points into the committed areas of the heap.
245
// Since this method can be expensive in general, we restrict its
246
// use to assertion checking only.
247
virtual bool is_in(const void* p) const = 0;
248
249
bool is_in_or_null(const void* p) const {
250
return p == NULL || is_in(p);
251
}
252
253
bool is_in_place(Metadata** p) {
254
return !Universe::heap()->is_in(p);
255
}
256
bool is_in_place(oop* p) { return Universe::heap()->is_in(p); }
257
bool is_in_place(narrowOop* p) {
258
oop o = oopDesc::load_decode_heap_oop_not_null(p);
259
return Universe::heap()->is_in((const void*)o);
260
}
261
262
// Let's define some terms: a "closed" subset of a heap is one that
263
//
264
// 1) contains all currently-allocated objects, and
265
//
266
// 2) is closed under reference: no object in the closed subset
267
// references one outside the closed subset.
268
//
269
// Membership in a heap's closed subset is useful for assertions.
270
// Clearly, the entire heap is a closed subset, so the default
271
// implementation is to use "is_in_reserved". But this may not be too
272
// liberal to perform useful checking. Also, the "is_in" predicate
273
// defines a closed subset, but may be too expensive, since "is_in"
274
// verifies that its argument points to an object head. The
275
// "closed_subset" method allows a heap to define an intermediate
276
// predicate, allowing more precise checking than "is_in_reserved" at
277
// lower cost than "is_in."
278
279
// One important case is a heap composed of disjoint contiguous spaces,
280
// such as the Garbage-First collector. Such heaps have a convenient
281
// closed subset consisting of the allocated portions of those
282
// contiguous spaces.
283
284
// Return "TRUE" iff the given pointer points into the heap's defined
285
// closed subset (which defaults to the entire heap).
286
virtual bool is_in_closed_subset(const void* p) const {
287
return is_in_reserved(p);
288
}
289
290
bool is_in_closed_subset_or_null(const void* p) const {
291
return p == NULL || is_in_closed_subset(p);
292
}
293
294
#ifdef ASSERT
295
// Returns true if "p" is in the part of the
296
// heap being collected.
297
virtual bool is_in_partial_collection(const void *p) = 0;
298
#endif
299
300
// An object is scavengable if its location may move during a scavenge.
301
// (A scavenge is a GC which is not a full GC.)
302
virtual bool is_scavengable(const void *p) = 0;
303
304
void set_gc_cause(GCCause::Cause v) {
305
if (UsePerfData) {
306
_gc_lastcause = _gc_cause;
307
_perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
308
_perf_gc_cause->set_value(GCCause::to_string(v));
309
}
310
_gc_cause = v;
311
}
312
GCCause::Cause gc_cause() { return _gc_cause; }
313
314
// Number of threads currently working on GC tasks.
315
uint n_par_threads() { return _n_par_threads; }
316
317
// May be overridden to set additional parallelism.
318
virtual void set_par_threads(uint t) { _n_par_threads = t; };
319
320
// General obj/array allocation facilities.
321
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
322
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
323
inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
324
325
// Raw memory allocation facilities
326
// The obj and array allocate methods are covers for these methods.
327
// mem_allocate() should never be
328
// called to allocate TLABs, only individual objects.
329
virtual HeapWord* mem_allocate(size_t size,
330
bool* gc_overhead_limit_was_exceeded) = 0;
331
332
// Utilities for turning raw memory into filler objects.
333
//
334
// min_fill_size() is the smallest region that can be filled.
335
// fill_with_objects() can fill arbitrary-sized regions of the heap using
336
// multiple objects. fill_with_object() is for regions known to be smaller
337
// than the largest array of integers; it uses a single object to fill the
338
// region and has slightly less overhead.
339
static size_t min_fill_size() {
340
return size_t(align_object_size(oopDesc::header_size()));
341
}
342
343
static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
344
345
static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
346
static void fill_with_object(MemRegion region, bool zap = true) {
347
fill_with_object(region.start(), region.word_size(), zap);
348
}
349
static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
350
fill_with_object(start, pointer_delta(end, start), zap);
351
}
352
353
// Return the address "addr" aligned by "alignment_in_bytes" if such
354
// an address is below "end". Return NULL otherwise.
355
inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
356
HeapWord* end,
357
unsigned short alignment_in_bytes);
358
359
// Some heaps may offer a contiguous region for shared non-blocking
360
// allocation, via inlined code (by exporting the address of the top and
361
// end fields defining the extent of the contiguous allocation region.)
362
363
// This function returns "true" iff the heap supports this kind of
364
// allocation. (Default is "no".)
365
virtual bool supports_inline_contig_alloc() const {
366
return false;
367
}
368
// These functions return the addresses of the fields that define the
369
// boundaries of the contiguous allocation area. (These fields should be
370
// physically near to one another.)
371
virtual HeapWord** top_addr() const {
372
guarantee(false, "inline contiguous allocation not supported");
373
return NULL;
374
}
375
virtual HeapWord** end_addr() const {
376
guarantee(false, "inline contiguous allocation not supported");
377
return NULL;
378
}
379
380
// Some heaps may be in an unparseable state at certain times between
381
// collections. This may be necessary for efficient implementation of
382
// certain allocation-related activities. Calling this function before
383
// attempting to parse a heap ensures that the heap is in a parsable
384
// state (provided other concurrent activity does not introduce
385
// unparsability). It is normally expected, therefore, that this
386
// method is invoked with the world stopped.
387
// NOTE: if you override this method, make sure you call
388
// super::ensure_parsability so that the non-generational
389
// part of the work gets done. See implementation of
390
// CollectedHeap::ensure_parsability and, for instance,
391
// that of GenCollectedHeap::ensure_parsability().
392
// The argument "retire_tlabs" controls whether existing TLABs
393
// are merely filled or also retired, thus preventing further
394
// allocation from them and necessitating allocation of new TLABs.
395
virtual void ensure_parsability(bool retire_tlabs);
396
397
// Section on thread-local allocation buffers (TLABs)
398
// If the heap supports thread-local allocation buffers, it should override
399
// the following methods:
400
// Returns "true" iff the heap supports thread-local allocation buffers.
401
// The default is "no".
402
virtual bool supports_tlab_allocation() const = 0;
403
404
// The amount of space available for thread-local allocation buffers.
405
virtual size_t tlab_capacity(Thread *thr) const = 0;
406
407
// The amount of used space for thread-local allocation buffers for the given thread.
408
virtual size_t tlab_used(Thread *thr) const = 0;
409
410
virtual size_t max_tlab_size() const;
411
412
// An estimate of the maximum allocation that could be performed
413
// for thread-local allocation buffers without triggering any
414
// collection or expansion activity.
415
virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
416
guarantee(false, "thread-local allocation buffers not supported");
417
return 0;
418
}
419
420
// Can a compiler initialize a new object without store barriers?
421
// This permission only extends from the creation of a new object
422
// via a TLAB up to the first subsequent safepoint. If such permission
423
// is granted for this heap type, the compiler promises to call
424
// defer_store_barrier() below on any slow path allocation of
425
// a new object for which such initializing store barriers will
426
// have been elided.
427
virtual bool can_elide_tlab_store_barriers() const = 0;
428
429
// If a compiler is eliding store barriers for TLAB-allocated objects,
430
// there is probably a corresponding slow path which can produce
431
// an object allocated anywhere. The compiler's runtime support
432
// promises to call this function on such a slow-path-allocated
433
// object before performing initializations that have elided
434
// store barriers. Returns new_obj, or maybe a safer copy thereof.
435
virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
436
437
// Answers whether an initializing store to a new object currently
438
// allocated at the given address doesn't need a store
439
// barrier. Returns "true" if it doesn't need an initializing
440
// store barrier; answers "false" if it does.
441
virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
442
443
// If a compiler is eliding store barriers for TLAB-allocated objects,
444
// we will be informed of a slow-path allocation by a call
445
// to new_store_pre_barrier() above. Such a call precedes the
446
// initialization of the object itself, and no post-store-barriers will
447
// be issued. Some heap types require that the barrier strictly follows
448
// the initializing stores. (This is currently implemented by deferring the
449
// barrier until the next slow-path allocation or gc-related safepoint.)
450
// This interface answers whether a particular heap type needs the card
451
// mark to be thus strictly sequenced after the stores.
452
virtual bool card_mark_must_follow_store() const = 0;
453
454
// If the CollectedHeap was asked to defer a store barrier above,
455
// this informs it to flush such a deferred store barrier to the
456
// remembered set.
457
virtual void flush_deferred_store_barrier(JavaThread* thread);
458
459
// Does this heap support heap inspection (+PrintClassHistogram?)
460
virtual bool supports_heap_inspection() const = 0;
461
462
// Perform a collection of the heap; intended for use in implementing
463
// "System.gc". This probably implies as full a collection as the
464
// "CollectedHeap" supports.
465
virtual void collect(GCCause::Cause cause) = 0;
466
467
// Perform a full collection
468
virtual void do_full_collection(bool clear_all_soft_refs) = 0;
469
470
// This interface assumes that it's being called by the
471
// vm thread. It collects the heap assuming that the
472
// heap lock is already held and that we are executing in
473
// the context of the vm thread.
474
virtual void collect_as_vm_thread(GCCause::Cause cause);
475
476
// Returns the barrier set for this heap
477
BarrierSet* barrier_set() { return _barrier_set; }
478
479
// Returns "true" iff there is a stop-world GC in progress. (I assume
480
// that it should answer "false" for the concurrent part of a concurrent
481
// collector -- dld).
482
bool is_gc_active() const { return _is_gc_active; }
483
484
// Total number of GC collections (started)
485
unsigned int total_collections() const { return _total_collections; }
486
unsigned int total_full_collections() const { return _total_full_collections;}
487
488
// Increment total number of GC collections (started)
489
// Should be protected but used by PSMarkSweep - cleanup for 1.4.2
490
void increment_total_collections(bool full = false) {
491
_total_collections++;
492
if (full) {
493
increment_total_full_collections();
494
}
495
}
496
497
void increment_total_full_collections() { _total_full_collections++; }
498
499
// Return the AdaptiveSizePolicy for the heap.
500
virtual AdaptiveSizePolicy* size_policy() = 0;
501
502
// Return the CollectorPolicy for the heap
503
virtual CollectorPolicy* collector_policy() const = 0;
504
505
void oop_iterate_no_header(OopClosure* cl);
506
507
// Iterate over all the ref-containing fields of all objects, calling
508
// "cl.do_oop" on each.
509
virtual void oop_iterate(ExtendedOopClosure* cl) = 0;
510
511
// Iterate over all objects, calling "cl.do_object" on each.
512
virtual void object_iterate(ObjectClosure* cl) = 0;
513
514
// Similar to object_iterate() except iterates only
515
// over live objects.
516
virtual void safe_object_iterate(ObjectClosure* cl) = 0;
517
518
// NOTE! There is no requirement that a collector implement these
519
// functions.
520
//
521
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
522
// each address in the (reserved) heap is a member of exactly
523
// one block. The defining characteristic of a block is that it is
524
// possible to find its size, and thus to progress forward to the next
525
// block. (Blocks may be of different sizes.) Thus, blocks may
526
// represent Java objects, or they might be free blocks in a
527
// free-list-based heap (or subheap), as long as the two kinds are
528
// distinguishable and the size of each is determinable.
529
530
// Returns the address of the start of the "block" that contains the
531
// address "addr". We say "blocks" instead of "object" since some heaps
532
// may not pack objects densely; a chunk may either be an object or a
533
// non-object.
534
virtual HeapWord* block_start(const void* addr) const = 0;
535
536
// Requires "addr" to be the start of a chunk, and returns its size.
537
// "addr + size" is required to be the start of a new chunk, or the end
538
// of the active area of the heap.
539
virtual size_t block_size(const HeapWord* addr) const = 0;
540
541
// Requires "addr" to be the start of a block, and returns "TRUE" iff
542
// the block is an object.
543
virtual bool block_is_obj(const HeapWord* addr) const = 0;
544
545
// Returns the longest time (in ms) that has elapsed since the last
546
// time that any part of the heap was examined by a garbage collection.
547
virtual jlong millis_since_last_gc() = 0;
548
549
// Perform any cleanup actions necessary before allowing a verification.
550
virtual void prepare_for_verify() = 0;
551
552
// Generate any dumps preceding or following a full gc
553
void pre_full_gc_dump(GCTimer* timer);
554
void post_full_gc_dump(GCTimer* timer);
555
556
VirtualSpaceSummary create_heap_space_summary();
557
GCHeapSummary create_heap_summary();
558
559
MetaspaceSummary create_metaspace_summary();
560
561
// Print heap information on the given outputStream.
562
virtual void print_on(outputStream* st) const = 0;
563
// The default behavior is to call print_on() on tty.
564
virtual void print() const {
565
print_on(tty);
566
}
567
// Print more detailed heap information on the given
568
// outputStream. The default behavior is to call print_on(). It is
569
// up to each subclass to override it and add any additional output
570
// it needs.
571
virtual void print_extended_on(outputStream* st) const {
572
print_on(st);
573
}
574
575
virtual void print_on_error(outputStream* st) const {
576
st->print_cr("Heap:");
577
print_extended_on(st);
578
st->cr();
579
580
_barrier_set->print_on(st);
581
}
582
583
// Print all GC threads (other than the VM thread)
584
// used by this heap.
585
virtual void print_gc_threads_on(outputStream* st) const = 0;
586
// The default behavior is to call print_gc_threads_on() on tty.
587
void print_gc_threads() {
588
print_gc_threads_on(tty);
589
}
590
// Iterator for all GC threads (other than VM thread)
591
virtual void gc_threads_do(ThreadClosure* tc) const = 0;
592
593
// Print any relevant tracing info that flags imply.
594
// Default implementation does nothing.
595
virtual void print_tracing_info() const = 0;
596
597
void print_heap_before_gc();
598
void print_heap_after_gc();
599
600
// Registering and unregistering an nmethod (compiled code) with the heap.
601
// Override with specific mechanism for each specialized heap type.
602
virtual void register_nmethod(nmethod* nm);
603
virtual void unregister_nmethod(nmethod* nm);
604
605
void trace_heap_before_gc(GCTracer* gc_tracer);
606
void trace_heap_after_gc(GCTracer* gc_tracer);
607
608
// Heap verification
609
virtual void verify(bool silent, VerifyOption option) = 0;
610
611
// Shut down all GC workers and other GC related threads.
612
virtual void shutdown();
613
614
// Accumulate additional statistics from GCLABs.
615
virtual void accumulate_statistics_all_gclabs();
616
617
// Support for object pinning. This is used by JNI Get*Critical()
618
// and Release*Critical() family of functions. If supported, the GC
619
// must guarantee that pinned objects never move.
620
virtual bool supports_object_pinning() const;
621
virtual oop pin_object(JavaThread* thread, oop obj);
622
virtual void unpin_object(JavaThread* thread, oop obj);
623
624
// Non product verification and debugging.
625
#ifndef PRODUCT
626
// Support for PromotionFailureALot. Return true if it's time to cause a
627
// promotion failure. The no-argument version uses
628
// this->_promotion_failure_alot_count as the counter.
629
inline bool promotion_should_fail(volatile size_t* count);
630
inline bool promotion_should_fail();
631
632
// Reset the PromotionFailureALot counters. Should be called at the end of a
633
// GC in which promotion failure occurred.
634
inline void reset_promotion_should_fail(volatile size_t* count);
635
inline void reset_promotion_should_fail();
636
#endif // #ifndef PRODUCT
637
638
#ifdef ASSERT
639
static int fired_fake_oom() {
640
return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
641
}
642
#endif
643
644
public:
645
// This is a convenience method that is used in cases where
646
// the actual number of GC worker threads is not pertinent but
647
// only whether there more than 0. Use of this method helps
648
// reduce the occurrence of ParallelGCThreads to uses where the
649
// actual number may be germane.
650
static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
651
652
// Copy the current allocation context statistics for the specified contexts.
653
// For each context in contexts, set the corresponding entries in the totals
654
// and accuracy arrays to the current values held by the statistics. Each
655
// array should be of length len.
656
// Returns true if there are more stats available.
657
virtual bool copy_allocation_context_stats(const jint* contexts,
658
jlong* totals,
659
jbyte* accuracy,
660
jint len) {
661
return false;
662
}
663
664
/////////////// Unit tests ///////////////
665
666
NOT_PRODUCT(static void test_is_in();)
667
};
668
669
// Class to set and reset the GC cause for a CollectedHeap.
670
671
class GCCauseSetter : StackObj {
672
CollectedHeap* _heap;
673
GCCause::Cause _previous_cause;
674
public:
675
GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
676
assert(SafepointSynchronize::is_at_safepoint(),
677
"This method manipulates heap state without locking");
678
_heap = heap;
679
_previous_cause = _heap->gc_cause();
680
_heap->set_gc_cause(cause);
681
}
682
683
~GCCauseSetter() {
684
assert(SafepointSynchronize::is_at_safepoint(),
685
"This method manipulates heap state without locking");
686
_heap->set_gc_cause(_previous_cause);
687
}
688
};
689
690
#endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
691
692