Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
38920 views
1
/*
2
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "memory/allocation.inline.hpp"
27
#include "memory/cardTableModRefBS.hpp"
28
#include "memory/cardTableRS.hpp"
29
#include "memory/sharedHeap.hpp"
30
#include "memory/space.inline.hpp"
31
#include "memory/universe.hpp"
32
#include "oops/oop.inline.hpp"
33
#include "runtime/java.hpp"
34
#include "runtime/mutexLocker.hpp"
35
#include "runtime/orderAccess.inline.hpp"
36
#include "runtime/virtualspace.hpp"
37
#include "runtime/vmThread.hpp"
38
39
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
40
41
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
42
OopsInGenClosure* cl,
43
CardTableRS* ct,
44
int n_threads) {
45
assert(n_threads > 0, "Error: expected n_threads > 0");
46
assert((n_threads == 1 && ParallelGCThreads == 0) ||
47
n_threads <= (int)ParallelGCThreads,
48
"# worker threads != # requested!");
49
assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
50
assert(UseDynamicNumberOfGCThreads ||
51
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
52
n_threads == (int)ParallelGCThreads,
53
"# worker threads != # requested!");
54
// Make sure the LNC array is valid for the space.
55
jbyte** lowest_non_clean;
56
uintptr_t lowest_non_clean_base_chunk_index;
57
size_t lowest_non_clean_chunk_size;
58
get_LNC_array_for_space(sp, lowest_non_clean,
59
lowest_non_clean_base_chunk_index,
60
lowest_non_clean_chunk_size);
61
62
uint n_strides = n_threads * ParGCStridesPerThread;
63
SequentialSubTasksDone* pst = sp->par_seq_tasks();
64
// Sets the condition for completion of the subtask (how many threads
65
// need to finish in order to be done).
66
pst->set_n_threads(n_threads);
67
pst->set_n_tasks(n_strides);
68
69
uint stride = 0;
70
while (!pst->is_task_claimed(/* reference */ stride)) {
71
process_stride(sp, mr, stride, n_strides, cl, ct,
72
lowest_non_clean,
73
lowest_non_clean_base_chunk_index,
74
lowest_non_clean_chunk_size);
75
}
76
if (pst->all_tasks_completed()) {
77
// Clear lowest_non_clean array for next time.
78
intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
79
uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
80
for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
81
intptr_t ind = ch - lowest_non_clean_base_chunk_index;
82
assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
83
"Bounds error");
84
lowest_non_clean[ind] = NULL;
85
}
86
}
87
}
88
89
void
90
CardTableModRefBS::
91
process_stride(Space* sp,
92
MemRegion used,
93
jint stride, int n_strides,
94
OopsInGenClosure* cl,
95
CardTableRS* ct,
96
jbyte** lowest_non_clean,
97
uintptr_t lowest_non_clean_base_chunk_index,
98
size_t lowest_non_clean_chunk_size) {
99
// We go from higher to lower addresses here; it wouldn't help that much
100
// because of the strided parallelism pattern used here.
101
102
// Find the first card address of the first chunk in the stride that is
103
// at least "bottom" of the used region.
104
jbyte* start_card = byte_for(used.start());
105
jbyte* end_card = byte_after(used.last());
106
uintptr_t start_chunk = addr_to_chunk_index(used.start());
107
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
108
jbyte* chunk_card_start;
109
110
if ((uintptr_t)stride >= start_chunk_stride_num) {
111
chunk_card_start = (jbyte*)(start_card +
112
(stride - start_chunk_stride_num) *
113
ParGCCardsPerStrideChunk);
114
} else {
115
// Go ahead to the next chunk group boundary, then to the requested stride.
116
chunk_card_start = (jbyte*)(start_card +
117
(n_strides - start_chunk_stride_num + stride) *
118
ParGCCardsPerStrideChunk);
119
}
120
121
while (chunk_card_start < end_card) {
122
// Even though we go from lower to higher addresses below, the
123
// strided parallelism can interleave the actual processing of the
124
// dirty pages in various ways. For a specific chunk within this
125
// stride, we take care to avoid double scanning or missing a card
126
// by suitably initializing the "min_done" field in process_chunk_boundaries()
127
// below, together with the dirty region extension accomplished in
128
// DirtyCardToOopClosure::do_MemRegion().
129
jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
130
// Invariant: chunk_mr should be fully contained within the "used" region.
131
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
132
chunk_card_end >= end_card ?
133
used.end() : addr_for(chunk_card_end));
134
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
135
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
136
137
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
138
cl->gen_boundary());
139
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
140
141
142
// Process the chunk.
143
process_chunk_boundaries(sp,
144
dcto_cl,
145
chunk_mr,
146
used,
147
lowest_non_clean,
148
lowest_non_clean_base_chunk_index,
149
lowest_non_clean_chunk_size);
150
151
// We want the LNC array updates above in process_chunk_boundaries
152
// to be visible before any of the card table value changes as a
153
// result of the dirty card iteration below.
154
OrderAccess::storestore();
155
156
// We do not call the non_clean_card_iterate_serial() version because
157
// we want to clear the cards: clear_cl here does the work of finding
158
// contiguous dirty ranges of cards to process and clear.
159
clear_cl.do_MemRegion(chunk_mr);
160
161
// Find the next chunk of the stride.
162
chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
163
}
164
}
165
166
167
// If you want a talkative process_chunk_boundaries,
168
// then #define NOISY(x) x
169
#ifdef NOISY
170
#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow"
171
#else
172
#define NOISY(x)
173
#endif
174
175
void
176
CardTableModRefBS::
177
process_chunk_boundaries(Space* sp,
178
DirtyCardToOopClosure* dcto_cl,
179
MemRegion chunk_mr,
180
MemRegion used,
181
jbyte** lowest_non_clean,
182
uintptr_t lowest_non_clean_base_chunk_index,
183
size_t lowest_non_clean_chunk_size)
184
{
185
// We must worry about non-array objects that cross chunk boundaries,
186
// because such objects are both precisely and imprecisely marked:
187
// .. if the head of such an object is dirty, the entire object
188
// needs to be scanned, under the interpretation that this
189
// was an imprecise mark
190
// .. if the head of such an object is not dirty, we can assume
191
// precise marking and it's efficient to scan just the dirty
192
// cards.
193
// In either case, each scanned reference must be scanned precisely
194
// once so as to avoid cloning of a young referent. For efficiency,
195
// our closures depend on this property and do not protect against
196
// double scans.
197
198
uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
199
cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
200
201
NOISY(tty->print_cr("===========================================================================");)
202
NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")",
203
chunk_mr.start(), chunk_mr.end());)
204
205
// First, set "our" lowest_non_clean entry, which would be
206
// used by the thread scanning an adjoining left chunk with
207
// a non-array object straddling the mutual boundary.
208
// Find the object that spans our boundary, if one exists.
209
// first_block is the block possibly straddling our left boundary.
210
HeapWord* first_block = sp->block_start(chunk_mr.start());
211
assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
212
"First chunk should always have a co-initial block");
213
// Does the block straddle the chunk's left boundary, and is it
214
// a non-array object?
215
if (first_block < chunk_mr.start() // first block straddles left bdry
216
&& sp->block_is_obj(first_block) // first block is an object
217
&& !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
218
|| oop(first_block)->is_typeArray())) {
219
// Find our least non-clean card, so that a left neighbour
220
// does not scan an object straddling the mutual boundary
221
// too far to the right, and attempt to scan a portion of
222
// that object twice.
223
jbyte* first_dirty_card = NULL;
224
jbyte* last_card_of_first_obj =
225
byte_for(first_block + sp->block_size(first_block) - 1);
226
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
227
jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
228
jbyte* last_card_to_check =
229
(jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
230
(intptr_t) last_card_of_first_obj);
231
// Note that this does not need to go beyond our last card
232
// if our first object completely straddles this chunk.
233
for (jbyte* cur = first_card_of_cur_chunk;
234
cur <= last_card_to_check; cur++) {
235
jbyte val = *cur;
236
if (card_will_be_scanned(val)) {
237
first_dirty_card = cur; break;
238
} else {
239
assert(!card_may_have_been_dirty(val), "Error");
240
}
241
}
242
if (first_dirty_card != NULL) {
243
NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk",
244
first_dirty_card);)
245
assert(0 <= cur_chunk_index && cur_chunk_index < lowest_non_clean_chunk_size,
246
"Bounds error.");
247
assert(lowest_non_clean[cur_chunk_index] == NULL,
248
"Write exactly once : value should be stable hereafter for this round");
249
lowest_non_clean[cur_chunk_index] = first_dirty_card;
250
} NOISY(else {
251
tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
252
// In the future, we could have this thread look for a non-NULL value to copy from its
253
// right neighbour (up to the end of the first object).
254
if (last_card_of_cur_chunk < last_card_of_first_obj) {
255
tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
256
" might be efficient to get value from right neighbour?");
257
}
258
})
259
} else {
260
// In this case we can help our neighbour by just asking them
261
// to stop at our first card (even though it may not be dirty).
262
NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
263
assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
264
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
265
lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
266
}
267
NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT
268
" which corresponds to the heap address " PTR_FORMAT,
269
cur_chunk_index, lowest_non_clean[cur_chunk_index],
270
(lowest_non_clean[cur_chunk_index] != NULL)
271
? addr_for(lowest_non_clean[cur_chunk_index])
272
: NULL);)
273
NOISY(tty->print_cr("---------------------------------------------------------------------------");)
274
275
// Next, set our own max_to_do, which will strictly/exclusively bound
276
// the highest address that we will scan past the right end of our chunk.
277
HeapWord* max_to_do = NULL;
278
if (chunk_mr.end() < used.end()) {
279
// This is not the last chunk in the used region.
280
// What is our last block? We check the first block of
281
// the next (right) chunk rather than strictly check our last block
282
// because it's potentially more efficient to do so.
283
HeapWord* const last_block = sp->block_start(chunk_mr.end());
284
assert(last_block <= chunk_mr.end(), "In case this property changes.");
285
if ((last_block == chunk_mr.end()) // our last block does not straddle boundary
286
|| !sp->block_is_obj(last_block) // last_block isn't an object
287
|| oop(last_block)->is_objArray() // last_block is an array (precisely marked)
288
|| oop(last_block)->is_typeArray()) {
289
max_to_do = chunk_mr.end();
290
NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n"
291
" max_to_do left at " PTR_FORMAT, max_to_do);)
292
} else {
293
assert(last_block < chunk_mr.end(), "Tautology");
294
// It is a non-array object that straddles the right boundary of this chunk.
295
// last_obj_card is the card corresponding to the start of the last object
296
// in the chunk. Note that the last object may not start in
297
// the chunk.
298
jbyte* const last_obj_card = byte_for(last_block);
299
const jbyte val = *last_obj_card;
300
if (!card_will_be_scanned(val)) {
301
assert(!card_may_have_been_dirty(val), "Error");
302
// The card containing the head is not dirty. Any marks on
303
// subsequent cards still in this chunk must have been made
304
// precisely; we can cap processing at the end of our chunk.
305
max_to_do = chunk_mr.end();
306
NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n"
307
" max_to_do left at " PTR_FORMAT,
308
max_to_do);)
309
} else {
310
// The last object must be considered dirty, and extends onto the
311
// following chunk. Look for a dirty card in that chunk that will
312
// bound our processing.
313
jbyte* limit_card = NULL;
314
const size_t last_block_size = sp->block_size(last_block);
315
jbyte* const last_card_of_last_obj =
316
byte_for(last_block + last_block_size - 1);
317
jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
318
// This search potentially goes a long distance looking
319
// for the next card that will be scanned, terminating
320
// at the end of the last_block, if no earlier dirty card
321
// is found.
322
assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
323
"last card of next chunk may be wrong");
324
for (jbyte* cur = first_card_of_next_chunk;
325
cur <= last_card_of_last_obj; cur++) {
326
const jbyte val = *cur;
327
if (card_will_be_scanned(val)) {
328
NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x",
329
cur, (int)val);)
330
limit_card = cur; break;
331
} else {
332
assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
333
}
334
}
335
if (limit_card != NULL) {
336
max_to_do = addr_for(limit_card);
337
assert(limit_card != NULL && max_to_do != NULL, "Error");
338
NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT
339
" max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: "
340
PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
341
limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));)
342
} else {
343
// The following is a pessimistic value, because it's possible
344
// that a dirty card on a subsequent chunk has been cleared by
345
// the time we get to look at it; we'll correct for that further below,
346
// using the LNC array which records the least non-clean card
347
// before cards were cleared in a particular chunk.
348
limit_card = last_card_of_last_obj;
349
max_to_do = last_block + last_block_size;
350
assert(limit_card != NULL && max_to_do != NULL, "Error");
351
NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n"
352
" Setting limit_card to " PTR_FORMAT
353
" and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
354
limit_card, last_block, last_block_size, max_to_do);)
355
}
356
assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
357
"Bounds error.");
358
// It is possible that a dirty card for the last object may have been
359
// cleared before we had a chance to examine it. In that case, the value
360
// will have been logged in the LNC for that chunk.
361
// We need to examine as many chunks to the right as this object
362
// covers. However, we need to bound this checking to the largest
363
// entry in the LNC array: this is because the heap may expand
364
// after the LNC array has been created but before we reach this point,
365
// and the last block in our chunk may have been expanded to include
366
// the expansion delta (and possibly subsequently allocated from, so
367
// it wouldn't be sufficient to check whether that last block was
368
// or was not an object at this point).
369
uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
370
- lowest_non_clean_base_chunk_index;
371
const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
372
- lowest_non_clean_base_chunk_index;
373
if (last_chunk_index_to_check > last_chunk_index) {
374
assert(last_block + last_block_size > used.end(),
375
err_msg("Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
376
" does not exceed used.end() = " PTR_FORMAT ","
377
" yet last_chunk_index_to_check " INTPTR_FORMAT
378
" exceeds last_chunk_index " INTPTR_FORMAT,
379
last_block, last_block + last_block_size,
380
used.end(),
381
last_chunk_index_to_check, last_chunk_index));
382
assert(sp->used_region().end() > used.end(),
383
err_msg("Expansion did not happen: "
384
"[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
385
sp->used_region().start(), sp->used_region().end(), used.start(), used.end()));
386
NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
387
last_chunk_index_to_check = last_chunk_index;
388
}
389
for (uintptr_t lnc_index = cur_chunk_index + 1;
390
lnc_index <= last_chunk_index_to_check;
391
lnc_index++) {
392
jbyte* lnc_card = lowest_non_clean[lnc_index];
393
if (lnc_card != NULL) {
394
// we can stop at the first non-NULL entry we find
395
if (lnc_card <= limit_card) {
396
NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT,
397
" max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT,
398
lnc_card, limit_card, addr_for(lnc_card), max_to_do);)
399
limit_card = lnc_card;
400
max_to_do = addr_for(limit_card);
401
assert(limit_card != NULL && max_to_do != NULL, "Error");
402
}
403
// In any case, we break now
404
break;
405
} // else continue to look for a non-NULL entry if any
406
}
407
assert(limit_card != NULL && max_to_do != NULL, "Error");
408
}
409
assert(max_to_do != NULL, "OOPS 1 !");
410
}
411
assert(max_to_do != NULL, "OOPS 2!");
412
} else {
413
max_to_do = used.end();
414
NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n"
415
" max_to_do left at " PTR_FORMAT,
416
max_to_do);)
417
}
418
assert(max_to_do != NULL, "OOPS 3!");
419
// Now we can set the closure we're using so it doesn't to beyond
420
// max_to_do.
421
dcto_cl->set_min_done(max_to_do);
422
#ifndef PRODUCT
423
dcto_cl->set_last_bottom(max_to_do);
424
#endif
425
NOISY(tty->print_cr("===========================================================================\n");)
426
}
427
428
#undef NOISY
429
430
void
431
CardTableModRefBS::
432
get_LNC_array_for_space(Space* sp,
433
jbyte**& lowest_non_clean,
434
uintptr_t& lowest_non_clean_base_chunk_index,
435
size_t& lowest_non_clean_chunk_size) {
436
437
int i = find_covering_region_containing(sp->bottom());
438
MemRegion covered = _covered[i];
439
size_t n_chunks = chunks_to_cover(covered);
440
441
// Only the first thread to obtain the lock will resize the
442
// LNC array for the covered region. Any later expansion can't affect
443
// the used_at_save_marks region.
444
// (I observed a bug in which the first thread to execute this would
445
// resize, and then it would cause "expand_and_allocate" that would
446
// increase the number of chunks in the covered region. Then a second
447
// thread would come and execute this, see that the size didn't match,
448
// and free and allocate again. So the first thread would be using a
449
// freed "_lowest_non_clean" array.)
450
451
// Do a dirty read here. If we pass the conditional then take the rare
452
// event lock and do the read again in case some other thread had already
453
// succeeded and done the resize.
454
int cur_collection = Universe::heap()->total_collections();
455
// Updated _last_LNC_resizing_collection[i] must not be visible before
456
// _lowest_non_clean and friends are visible. Therefore use acquire/release
457
// to guarantee this on non TSO architecures.
458
if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
459
MutexLocker x(ParGCRareEvent_lock);
460
// This load_acquire is here for clarity only. The MutexLocker already fences.
461
if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
462
if (_lowest_non_clean[i] == NULL ||
463
n_chunks != _lowest_non_clean_chunk_size[i]) {
464
465
// Should we delete the old?
466
if (_lowest_non_clean[i] != NULL) {
467
assert(n_chunks != _lowest_non_clean_chunk_size[i],
468
"logical consequence");
469
FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i], mtGC);
470
_lowest_non_clean[i] = NULL;
471
}
472
// Now allocate a new one if necessary.
473
if (_lowest_non_clean[i] == NULL) {
474
_lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
475
_lowest_non_clean_chunk_size[i] = n_chunks;
476
_lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
477
for (int j = 0; j < (int)n_chunks; j++)
478
_lowest_non_clean[i][j] = NULL;
479
}
480
}
481
// Make sure this gets visible only after _lowest_non_clean* was initialized
482
OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
483
}
484
}
485
// In any case, now do the initialization.
486
lowest_non_clean = _lowest_non_clean[i];
487
lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
488
lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i];
489
}
490
491