Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/gc/z/zDirector.cpp
66644 views
1
/*
2
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "gc/shared/gc_globals.hpp"
26
#include "gc/z/zDirector.hpp"
27
#include "gc/z/zDriver.hpp"
28
#include "gc/z/zHeap.inline.hpp"
29
#include "gc/z/zHeuristics.hpp"
30
#include "gc/z/zStat.hpp"
31
#include "logging/log.hpp"
32
33
constexpr double one_in_1000 = 3.290527;
34
constexpr double sample_interval = 1.0 / ZStatAllocRate::sample_hz;
35
36
ZDirector::ZDirector(ZDriver* driver) :
37
_driver(driver),
38
_metronome(ZStatAllocRate::sample_hz) {
39
set_name("ZDirector");
40
create_and_start();
41
}
42
43
static void sample_allocation_rate() {
44
// Sample allocation rate. This is needed by rule_allocation_rate()
45
// below to estimate the time we have until we run out of memory.
46
const double bytes_per_second = ZStatAllocRate::sample_and_reset();
47
48
log_debug(gc, alloc)("Allocation Rate: %.1fMB/s, Predicted: %.1fMB/s, Avg: %.1f(+/-%.1f)MB/s",
49
bytes_per_second / M,
50
ZStatAllocRate::predict() / M,
51
ZStatAllocRate::avg() / M,
52
ZStatAllocRate::sd() / M);
53
}
54
55
static ZDriverRequest rule_allocation_stall() {
56
// Perform GC if we've observed at least one allocation stall since
57
// the last GC started.
58
if (!ZHeap::heap()->has_alloc_stalled()) {
59
return GCCause::_no_gc;
60
}
61
62
log_debug(gc, director)("Rule: Allocation Stall Observed");
63
64
return GCCause::_z_allocation_stall;
65
}
66
67
static ZDriverRequest rule_warmup() {
68
if (ZStatCycle::is_warm()) {
69
// Rule disabled
70
return GCCause::_no_gc;
71
}
72
73
// Perform GC if heap usage passes 10/20/30% and no other GC has been
74
// performed yet. This allows us to get some early samples of the GC
75
// duration, which is needed by the other rules.
76
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
77
const size_t used = ZHeap::heap()->used();
78
const double used_threshold_percent = (ZStatCycle::nwarmup_cycles() + 1) * 0.1;
79
const size_t used_threshold = soft_max_capacity * used_threshold_percent;
80
81
log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
82
used_threshold_percent * 100, used / M, used_threshold / M);
83
84
if (used < used_threshold) {
85
return GCCause::_no_gc;
86
}
87
88
return GCCause::_z_warmup;
89
}
90
91
static ZDriverRequest rule_timer() {
92
if (ZCollectionInterval <= 0) {
93
// Rule disabled
94
return GCCause::_no_gc;
95
}
96
97
// Perform GC if timer has expired.
98
const double time_since_last_gc = ZStatCycle::time_since_last();
99
const double time_until_gc = ZCollectionInterval - time_since_last_gc;
100
101
log_debug(gc, director)("Rule: Timer, Interval: %.3fs, TimeUntilGC: %.3fs",
102
ZCollectionInterval, time_until_gc);
103
104
if (time_until_gc > 0) {
105
return GCCause::_no_gc;
106
}
107
108
return GCCause::_z_timer;
109
}
110
111
static double estimated_gc_workers(double serial_gc_time, double parallelizable_gc_time, double time_until_deadline) {
112
const double parallelizable_time_until_deadline = MAX2(time_until_deadline - serial_gc_time, 0.001);
113
return parallelizable_gc_time / parallelizable_time_until_deadline;
114
}
115
116
static uint discrete_gc_workers(double gc_workers) {
117
return clamp<uint>(ceil(gc_workers), 1, ConcGCThreads);
118
}
119
120
static double select_gc_workers(double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) {
121
// Use all workers until we're warm
122
if (!ZStatCycle::is_warm()) {
123
const double not_warm_gc_workers = ConcGCThreads;
124
log_debug(gc, director)("Select GC Workers (Not Warm), GCWorkers: %.3f", not_warm_gc_workers);
125
return not_warm_gc_workers;
126
}
127
128
// Calculate number of GC workers needed to avoid a long GC cycle and to avoid OOM.
129
const double avoid_long_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, 10 /* seconds */);
130
const double avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, time_until_oom);
131
132
const double gc_workers = MAX2(avoid_long_gc_workers, avoid_oom_gc_workers);
133
const uint actual_gc_workers = discrete_gc_workers(gc_workers);
134
const uint last_gc_workers = ZStatCycle::last_active_workers();
135
136
// More than 15% division from the average is considered unsteady
137
if (alloc_rate_sd_percent >= 0.15) {
138
const double half_gc_workers = ConcGCThreads / 2.0;
139
const double unsteady_gc_workers = MAX3<double>(gc_workers, last_gc_workers, half_gc_workers);
140
log_debug(gc, director)("Select GC Workers (Unsteady), "
141
"AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, HalfGCWorkers: %.3f, GCWorkers: %.3f",
142
avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, half_gc_workers, unsteady_gc_workers);
143
return unsteady_gc_workers;
144
}
145
146
if (actual_gc_workers < last_gc_workers) {
147
// Before decreasing number of GC workers compared to the previous GC cycle, check if the
148
// next GC cycle will need to increase it again. If so, use the same number of GC workers
149
// that will be needed in the next cycle.
150
const double gc_duration_delta = (parallelizable_gc_time / actual_gc_workers) - (parallelizable_gc_time / last_gc_workers);
151
const double additional_time_for_allocations = ZStatCycle::time_since_last() - gc_duration_delta - sample_interval;
152
const double next_time_until_oom = time_until_oom + additional_time_for_allocations;
153
const double next_avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, next_time_until_oom);
154
155
// Add 0.5 to increase friction and avoid lowering too eagerly
156
const double next_gc_workers = next_avoid_oom_gc_workers + 0.5;
157
const double try_lowering_gc_workers = clamp<double>(next_gc_workers, actual_gc_workers, last_gc_workers);
158
159
log_debug(gc, director)("Select GC Workers (Try Lowering), "
160
"AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, NextAvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f",
161
avoid_long_gc_workers, avoid_oom_gc_workers, next_avoid_oom_gc_workers, (double)last_gc_workers, try_lowering_gc_workers);
162
return try_lowering_gc_workers;
163
}
164
165
log_debug(gc, director)("Select GC Workers (Normal), "
166
"AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f",
167
avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, gc_workers);
168
return gc_workers;
169
}
170
171
ZDriverRequest rule_allocation_rate_dynamic() {
172
if (!ZStatCycle::is_time_trustable()) {
173
// Rule disabled
174
return GCCause::_no_gc;
175
}
176
177
// Calculate amount of free memory available. Note that we take the
178
// relocation headroom into account to avoid in-place relocation.
179
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
180
const size_t used = ZHeap::heap()->used();
181
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
182
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
183
184
// Calculate time until OOM given the max allocation rate and the amount
185
// of free memory. The allocation rate is a moving average and we multiply
186
// that with an allocation spike tolerance factor to guard against unforeseen
187
// phase changes in the allocate rate. We then add ~3.3 sigma to account for
188
// the allocation rate variance, which means the probability is 1 in 1000
189
// that a sample is outside of the confidence interval.
190
const double alloc_rate_predict = ZStatAllocRate::predict();
191
const double alloc_rate_avg = ZStatAllocRate::avg();
192
const double alloc_rate_sd = ZStatAllocRate::sd();
193
const double alloc_rate_sd_percent = alloc_rate_sd / (alloc_rate_avg + 1.0);
194
const double alloc_rate = (MAX2(alloc_rate_predict, alloc_rate_avg) * ZAllocationSpikeTolerance) + (alloc_rate_sd * one_in_1000) + 1.0;
195
const double time_until_oom = (free / alloc_rate) / (1.0 + alloc_rate_sd_percent);
196
197
// Calculate max serial/parallel times of a GC cycle. The times are
198
// moving averages, we add ~3.3 sigma to account for the variance.
199
const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000);
200
const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000);
201
202
// Calculate number of GC workers needed to avoid OOM.
203
const double gc_workers = select_gc_workers(serial_gc_time, parallelizable_gc_time, alloc_rate_sd_percent, time_until_oom);
204
205
// Convert to a discrete number of GC workers within limits.
206
const uint actual_gc_workers = discrete_gc_workers(gc_workers);
207
208
// Calculate GC duration given number of GC workers needed.
209
const double actual_gc_duration = serial_gc_time + (parallelizable_gc_time / actual_gc_workers);
210
const uint last_gc_workers = ZStatCycle::last_active_workers();
211
212
// Calculate time until GC given the time until OOM and GC duration.
213
// We also subtract the sample interval, so that we don't overshoot the
214
// target time and end up starting the GC too late in the next interval.
215
const double time_until_gc = time_until_oom - actual_gc_duration - sample_interval;
216
217
log_debug(gc, director)("Rule: Allocation Rate (Dynamic GC Workers), "
218
"MaxAllocRate: %.1fMB/s (+/-%.1f%%), Free: " SIZE_FORMAT "MB, GCCPUTime: %.3f, "
219
"GCDuration: %.3fs, TimeUntilOOM: %.3fs, TimeUntilGC: %.3fs, GCWorkers: %u -> %u",
220
alloc_rate / M,
221
alloc_rate_sd_percent * 100,
222
free / M,
223
serial_gc_time + parallelizable_gc_time,
224
serial_gc_time + (parallelizable_gc_time / actual_gc_workers),
225
time_until_oom,
226
time_until_gc,
227
last_gc_workers,
228
actual_gc_workers);
229
230
if (actual_gc_workers <= last_gc_workers && time_until_gc > 0) {
231
return ZDriverRequest(GCCause::_no_gc, actual_gc_workers);
232
}
233
234
return ZDriverRequest(GCCause::_z_allocation_rate, actual_gc_workers);
235
}
236
237
static ZDriverRequest rule_allocation_rate_static() {
238
if (!ZStatCycle::is_time_trustable()) {
239
// Rule disabled
240
return GCCause::_no_gc;
241
}
242
243
// Perform GC if the estimated max allocation rate indicates that we
244
// will run out of memory. The estimated max allocation rate is based
245
// on the moving average of the sampled allocation rate plus a safety
246
// margin based on variations in the allocation rate and unforeseen
247
// allocation spikes.
248
249
// Calculate amount of free memory available. Note that we take the
250
// relocation headroom into account to avoid in-place relocation.
251
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
252
const size_t used = ZHeap::heap()->used();
253
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
254
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
255
256
// Calculate time until OOM given the max allocation rate and the amount
257
// of free memory. The allocation rate is a moving average and we multiply
258
// that with an allocation spike tolerance factor to guard against unforeseen
259
// phase changes in the allocate rate. We then add ~3.3 sigma to account for
260
// the allocation rate variance, which means the probability is 1 in 1000
261
// that a sample is outside of the confidence interval.
262
const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::sd() * one_in_1000);
263
const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero
264
265
// Calculate max serial/parallel times of a GC cycle. The times are
266
// moving averages, we add ~3.3 sigma to account for the variance.
267
const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000);
268
const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000);
269
270
// Calculate GC duration given number of GC workers needed.
271
const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads);
272
273
// Calculate time until GC given the time until OOM and max duration of GC.
274
// We also deduct the sample interval, so that we don't overshoot the target
275
// time and end up starting the GC too late in the next interval.
276
const double time_until_gc = time_until_oom - gc_duration - sample_interval;
277
278
log_debug(gc, director)("Rule: Allocation Rate (Static GC Workers), MaxAllocRate: %.1fMB/s, Free: " SIZE_FORMAT "MB, GCDuration: %.3fs, TimeUntilGC: %.3fs",
279
max_alloc_rate / M, free / M, gc_duration, time_until_gc);
280
281
if (time_until_gc > 0) {
282
return GCCause::_no_gc;
283
}
284
285
return GCCause::_z_allocation_rate;
286
}
287
288
static ZDriverRequest rule_allocation_rate() {
289
if (UseDynamicNumberOfGCThreads) {
290
return rule_allocation_rate_dynamic();
291
} else {
292
return rule_allocation_rate_static();
293
}
294
}
295
296
static ZDriverRequest rule_high_usage() {
297
// Perform GC if the amount of free memory is 5% or less. This is a preventive
298
// meassure in the case where the application has a very low allocation rate,
299
// such that the allocation rate rule doesn't trigger, but the amount of free
300
// memory is still slowly but surely heading towards zero. In this situation,
301
// we start a GC cycle to avoid a potential allocation stall later.
302
303
// Calculate amount of free memory available. Note that we take the
304
// relocation headroom into account to avoid in-place relocation.
305
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
306
const size_t used = ZHeap::heap()->used();
307
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
308
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
309
const double free_percent = percent_of(free, soft_max_capacity);
310
311
log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)",
312
free / M, free_percent);
313
314
if (free_percent > 5.0) {
315
return GCCause::_no_gc;
316
}
317
318
return GCCause::_z_high_usage;
319
}
320
321
static ZDriverRequest rule_proactive() {
322
if (!ZProactive || !ZStatCycle::is_warm()) {
323
// Rule disabled
324
return GCCause::_no_gc;
325
}
326
327
// Perform GC if the impact of doing so, in terms of application throughput
328
// reduction, is considered acceptable. This rule allows us to keep the heap
329
// size down and allow reference processing to happen even when we have a lot
330
// of free space on the heap.
331
332
// Only consider doing a proactive GC if the heap usage has grown by at least
333
// 10% of the max capacity since the previous GC, or more than 5 minutes has
334
// passed since the previous GC. This helps avoid superfluous GCs when running
335
// applications with very low allocation rate.
336
const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end();
337
const size_t used_increase_threshold = ZHeap::heap()->soft_max_capacity() * 0.10; // 10%
338
const size_t used_threshold = used_after_last_gc + used_increase_threshold;
339
const size_t used = ZHeap::heap()->used();
340
const double time_since_last_gc = ZStatCycle::time_since_last();
341
const double time_since_last_gc_threshold = 5 * 60; // 5 minutes
342
if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) {
343
// Don't even consider doing a proactive GC
344
log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3fs",
345
(used_threshold - used) / M,
346
time_since_last_gc_threshold - time_since_last_gc);
347
return GCCause::_no_gc;
348
}
349
350
const double assumed_throughput_drop_during_gc = 0.50; // 50%
351
const double acceptable_throughput_drop = 0.01; // 1%
352
const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000);
353
const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000);
354
const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads);
355
const double acceptable_gc_interval = gc_duration * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0);
356
const double time_until_gc = acceptable_gc_interval - time_since_last_gc;
357
358
log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3fs, TimeSinceLastGC: %.3fs, TimeUntilGC: %.3fs",
359
acceptable_gc_interval, time_since_last_gc, time_until_gc);
360
361
if (time_until_gc > 0) {
362
return GCCause::_no_gc;
363
}
364
365
return GCCause::_z_proactive;
366
}
367
368
static ZDriverRequest make_gc_decision() {
369
// List of rules
370
using ZDirectorRule = ZDriverRequest (*)();
371
const ZDirectorRule rules[] = {
372
rule_allocation_stall,
373
rule_warmup,
374
rule_timer,
375
rule_allocation_rate,
376
rule_high_usage,
377
rule_proactive,
378
};
379
380
// Execute rules
381
for (size_t i = 0; i < ARRAY_SIZE(rules); i++) {
382
const ZDriverRequest request = rules[i]();
383
if (request.cause() != GCCause::_no_gc) {
384
return request;
385
}
386
}
387
388
return GCCause::_no_gc;
389
}
390
391
void ZDirector::run_service() {
392
// Main loop
393
while (_metronome.wait_for_tick()) {
394
sample_allocation_rate();
395
if (!_driver->is_busy()) {
396
const ZDriverRequest request = make_gc_decision();
397
if (request.cause() != GCCause::_no_gc) {
398
_driver->collect(request);
399
}
400
}
401
}
402
}
403
404
void ZDirector::stop_service() {
405
_metronome.stop();
406
}
407
408