Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/code/dependencyContext.cpp
40931 views
1
/*
2
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "code/nmethod.hpp"
27
#include "code/dependencies.hpp"
28
#include "code/dependencyContext.hpp"
29
#include "memory/resourceArea.hpp"
30
#include "runtime/atomic.hpp"
31
#include "runtime/orderAccess.hpp"
32
#include "runtime/perfData.hpp"
33
#include "utilities/exceptions.hpp"
34
35
PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
36
PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
37
PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
38
PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
39
nmethodBucket* volatile DependencyContext::_purge_list = NULL;
40
volatile uint64_t DependencyContext::_cleaning_epoch = 0;
41
uint64_t DependencyContext::_cleaning_epoch_monotonic = 0;
42
43
void dependencyContext_init() {
44
DependencyContext::init();
45
}
46
47
void DependencyContext::init() {
48
if (UsePerfData) {
49
EXCEPTION_MARK;
50
_perf_total_buckets_allocated_count =
51
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
52
_perf_total_buckets_deallocated_count =
53
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
54
_perf_total_buckets_stale_count =
55
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
56
_perf_total_buckets_stale_acc_count =
57
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
58
}
59
}
60
61
//
62
// Walk the list of dependent nmethods searching for nmethods which
63
// are dependent on the changes that were passed in and mark them for
64
// deoptimization. Returns the number of nmethods found.
65
//
66
int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
67
int found = 0;
68
for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
69
nmethod* nm = b->get_nmethod();
70
// since dependencies aren't removed until an nmethod becomes a zombie,
71
// the dependency list may contain nmethods which aren't alive.
72
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
73
if (TraceDependencies) {
74
ResourceMark rm;
75
tty->print_cr("Marked for deoptimization");
76
changes.print();
77
nm->print();
78
nm->print_dependencies();
79
}
80
changes.mark_for_deoptimization(nm);
81
found++;
82
}
83
}
84
return found;
85
}
86
87
//
88
// Add an nmethod to the dependency context.
89
// It's possible that an nmethod has multiple dependencies on a klass
90
// so a count is kept for each bucket to guarantee that creation and
91
// deletion of dependencies is consistent.
92
//
93
void DependencyContext::add_dependent_nmethod(nmethod* nm) {
94
assert_lock_strong(CodeCache_lock);
95
for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
96
if (nm == b->get_nmethod()) {
97
b->increment();
98
return;
99
}
100
}
101
nmethodBucket* new_head = new nmethodBucket(nm, NULL);
102
for (;;) {
103
nmethodBucket* head = Atomic::load(_dependency_context_addr);
104
new_head->set_next(head);
105
if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
106
break;
107
}
108
}
109
if (UsePerfData) {
110
_perf_total_buckets_allocated_count->inc();
111
}
112
}
113
114
void DependencyContext::release(nmethodBucket* b) {
115
bool expunge = Atomic::load(&_cleaning_epoch) == 0;
116
if (expunge) {
117
assert_locked_or_safepoint(CodeCache_lock);
118
delete b;
119
if (UsePerfData) {
120
_perf_total_buckets_deallocated_count->inc();
121
}
122
} else {
123
// Mark the context as having stale entries, since it is not safe to
124
// expunge the list right now.
125
for (;;) {
126
nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
127
b->set_purge_list_next(purge_list_head);
128
if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
129
break;
130
}
131
}
132
if (UsePerfData) {
133
_perf_total_buckets_stale_count->inc();
134
_perf_total_buckets_stale_acc_count->inc();
135
}
136
}
137
}
138
139
//
140
// Remove an nmethod dependency from the context.
141
// Decrement count of the nmethod in the dependency list and, optionally, remove
142
// the bucket completely when the count goes to 0. This method must find
143
// a corresponding bucket otherwise there's a bug in the recording of dependencies.
144
// Can be called concurrently by parallel GC threads.
145
//
146
void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
147
assert_locked_or_safepoint(CodeCache_lock);
148
nmethodBucket* first = dependencies_not_unloading();
149
nmethodBucket* last = NULL;
150
for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) {
151
if (nm == b->get_nmethod()) {
152
int val = b->decrement();
153
guarantee(val >= 0, "Underflow: %d", val);
154
if (val == 0) {
155
if (last == NULL) {
156
// If there was not a head that was not unloading, we can set a new
157
// head without a CAS, because we know there is no contending cleanup.
158
set_dependencies(b->next_not_unloading());
159
} else {
160
// Only supports a single inserting thread (protected by CodeCache_lock)
161
// for now. Therefore, the next pointer only competes with another cleanup
162
// operation. That interaction does not need a CAS.
163
last->set_next(b->next_not_unloading());
164
}
165
release(b);
166
}
167
return;
168
}
169
last = b;
170
}
171
}
172
173
//
174
// Reclaim all unused buckets.
175
//
176
void DependencyContext::purge_dependency_contexts() {
177
int removed = 0;
178
for (nmethodBucket* b = _purge_list; b != NULL;) {
179
nmethodBucket* next = b->purge_list_next();
180
removed++;
181
delete b;
182
b = next;
183
}
184
if (UsePerfData && removed > 0) {
185
_perf_total_buckets_deallocated_count->inc(removed);
186
}
187
_purge_list = NULL;
188
}
189
190
//
191
// Cleanup a dependency context by unlinking and placing all dependents corresponding
192
// to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
193
void DependencyContext::clean_unloading_dependents() {
194
if (!claim_cleanup()) {
195
// Somebody else is cleaning up this dependency context.
196
return;
197
}
198
// Walk the nmethodBuckets and move dead entries on the purge list, which will
199
// be deleted during ClassLoaderDataGraph::purge().
200
nmethodBucket* b = dependencies_not_unloading();
201
while (b != NULL) {
202
nmethodBucket* next = b->next_not_unloading();
203
b = next;
204
}
205
}
206
207
//
208
// Invalidate all dependencies in the context
209
int DependencyContext::remove_all_dependents() {
210
nmethodBucket* b = dependencies_not_unloading();
211
set_dependencies(NULL);
212
int marked = 0;
213
int removed = 0;
214
while (b != NULL) {
215
nmethod* nm = b->get_nmethod();
216
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
217
nm->mark_for_deoptimization();
218
marked++;
219
}
220
nmethodBucket* next = b->next_not_unloading();
221
removed++;
222
release(b);
223
b = next;
224
}
225
if (UsePerfData && removed > 0) {
226
_perf_total_buckets_deallocated_count->inc(removed);
227
}
228
return marked;
229
}
230
231
#ifndef PRODUCT
232
void DependencyContext::print_dependent_nmethods(bool verbose) {
233
int idx = 0;
234
for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
235
nmethod* nm = b->get_nmethod();
236
tty->print("[%d] count=%d { ", idx++, b->count());
237
if (!verbose) {
238
nm->print_on(tty, "nmethod");
239
tty->print_cr(" } ");
240
} else {
241
nm->print();
242
nm->print_dependencies();
243
tty->print_cr("--- } ");
244
}
245
}
246
}
247
#endif //PRODUCT
248
249
bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
250
for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
251
if (nm == b->get_nmethod()) {
252
#ifdef ASSERT
253
int count = b->count();
254
assert(count >= 0, "count shouldn't be negative: %d", count);
255
#endif
256
return true;
257
}
258
}
259
return false;
260
}
261
262
int nmethodBucket::decrement() {
263
return Atomic::sub(&_count, 1);
264
}
265
266
// We use a monotonically increasing epoch counter to track the last epoch a given
267
// dependency context was cleaned. GC threads claim cleanup tasks by performing
268
// a CAS on this value.
269
bool DependencyContext::claim_cleanup() {
270
uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
271
uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
272
if (last_cleanup >= cleaning_epoch) {
273
return false;
274
}
275
return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
276
}
277
278
// Retrieve the first nmethodBucket that has a dependent that does not correspond to
279
// an is_unloading nmethod. Any nmethodBucket entries observed from the original head
280
// that is_unloading() will be unlinked and placed on the purge list.
281
nmethodBucket* DependencyContext::dependencies_not_unloading() {
282
for (;;) {
283
// Need acquire becase the read value could come from a concurrent insert.
284
nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
285
if (head == NULL || !head->get_nmethod()->is_unloading()) {
286
return head;
287
}
288
nmethodBucket* head_next = head->next();
289
OrderAccess::loadload();
290
if (Atomic::load(_dependency_context_addr) != head) {
291
// Unstable load of head w.r.t. head->next
292
continue;
293
}
294
if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
295
// Release is_unloading entries if unlinking was claimed
296
DependencyContext::release(head);
297
}
298
}
299
}
300
301
// Relaxed accessors
302
void DependencyContext::set_dependencies(nmethodBucket* b) {
303
Atomic::store(_dependency_context_addr, b);
304
}
305
306
nmethodBucket* DependencyContext::dependencies() {
307
return Atomic::load(_dependency_context_addr);
308
}
309
310
// After the gc_prologue, the dependency contexts may be claimed by the GC
311
// and releasing of nmethodBucket entries will be deferred and placed on
312
// a purge list to be deleted later.
313
void DependencyContext::cleaning_start() {
314
assert(SafepointSynchronize::is_at_safepoint(), "must be");
315
uint64_t epoch = ++_cleaning_epoch_monotonic;
316
Atomic::store(&_cleaning_epoch, epoch);
317
}
318
319
// The epilogue marks the end of dependency context cleanup by the GC,
320
// and also makes subsequent releases of nmethodBuckets cause immediate
321
// deletion. It is okay to delay calling of cleaning_end() to a concurrent
322
// phase, subsequent to the safepoint operation in which cleaning_start()
323
// was called. That allows dependency contexts to be cleaned concurrently.
324
void DependencyContext::cleaning_end() {
325
uint64_t epoch = 0;
326
Atomic::store(&_cleaning_epoch, epoch);
327
}
328
329
// This function skips over nmethodBuckets in the list corresponding to
330
// nmethods that are is_unloading. This allows exposing a view of the
331
// dependents as-if they were already cleaned, despite being cleaned
332
// concurrently. Any entry observed that is_unloading() will be unlinked
333
// and placed on the purge list.
334
nmethodBucket* nmethodBucket::next_not_unloading() {
335
for (;;) {
336
// Do not need acquire because the loaded entry can never be
337
// concurrently inserted.
338
nmethodBucket* next = Atomic::load(&_next);
339
if (next == NULL || !next->get_nmethod()->is_unloading()) {
340
return next;
341
}
342
nmethodBucket* next_next = Atomic::load(&next->_next);
343
OrderAccess::loadload();
344
if (Atomic::load(&_next) != next) {
345
// Unstable load of next w.r.t. next->next
346
continue;
347
}
348
if (Atomic::cmpxchg(&_next, next, next_next) == next) {
349
// Release is_unloading entries if unlinking was claimed
350
DependencyContext::release(next);
351
}
352
}
353
}
354
355
// Relaxed accessors
356
nmethodBucket* nmethodBucket::next() {
357
return Atomic::load(&_next);
358
}
359
360
void nmethodBucket::set_next(nmethodBucket* b) {
361
Atomic::store(&_next, b);
362
}
363
364
nmethodBucket* nmethodBucket::purge_list_next() {
365
return Atomic::load(&_purge_list_next);
366
}
367
368
void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
369
Atomic::store(&_purge_list_next, b);
370
}
371
372