Path: blob/master/src/hotspot/share/code/dependencyContext.cpp
40931 views
/*1* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "code/nmethod.hpp"26#include "code/dependencies.hpp"27#include "code/dependencyContext.hpp"28#include "memory/resourceArea.hpp"29#include "runtime/atomic.hpp"30#include "runtime/orderAccess.hpp"31#include "runtime/perfData.hpp"32#include "utilities/exceptions.hpp"3334PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;35PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;36PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;37PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;38nmethodBucket* volatile DependencyContext::_purge_list = NULL;39volatile uint64_t DependencyContext::_cleaning_epoch = 0;40uint64_t DependencyContext::_cleaning_epoch_monotonic = 0;4142void dependencyContext_init() {43DependencyContext::init();44}4546void DependencyContext::init() {47if (UsePerfData) {48EXCEPTION_MARK;49_perf_total_buckets_allocated_count =50PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);51_perf_total_buckets_deallocated_count =52PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);53_perf_total_buckets_stale_count =54PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);55_perf_total_buckets_stale_acc_count =56PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);57}58}5960//61// Walk the list of dependent nmethods searching for nmethods which62// are dependent on the changes that were passed in and mark them for63// deoptimization. Returns the number of nmethods found.64//65int DependencyContext::mark_dependent_nmethods(DepChange& changes) {66int found = 0;67for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {68nmethod* nm = b->get_nmethod();69// since dependencies aren't removed until an nmethod becomes a zombie,70// the dependency list may contain nmethods which aren't alive.71if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {72if (TraceDependencies) {73ResourceMark rm;74tty->print_cr("Marked for deoptimization");75changes.print();76nm->print();77nm->print_dependencies();78}79changes.mark_for_deoptimization(nm);80found++;81}82}83return found;84}8586//87// Add an nmethod to the dependency context.88// It's possible that an nmethod has multiple dependencies on a klass89// so a count is kept for each bucket to guarantee that creation and90// deletion of dependencies is consistent.91//92void DependencyContext::add_dependent_nmethod(nmethod* nm) {93assert_lock_strong(CodeCache_lock);94for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {95if (nm == b->get_nmethod()) {96b->increment();97return;98}99}100nmethodBucket* new_head = new nmethodBucket(nm, NULL);101for (;;) {102nmethodBucket* head = Atomic::load(_dependency_context_addr);103new_head->set_next(head);104if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {105break;106}107}108if (UsePerfData) {109_perf_total_buckets_allocated_count->inc();110}111}112113void DependencyContext::release(nmethodBucket* b) {114bool expunge = Atomic::load(&_cleaning_epoch) == 0;115if (expunge) {116assert_locked_or_safepoint(CodeCache_lock);117delete b;118if (UsePerfData) {119_perf_total_buckets_deallocated_count->inc();120}121} else {122// Mark the context as having stale entries, since it is not safe to123// expunge the list right now.124for (;;) {125nmethodBucket* purge_list_head = Atomic::load(&_purge_list);126b->set_purge_list_next(purge_list_head);127if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {128break;129}130}131if (UsePerfData) {132_perf_total_buckets_stale_count->inc();133_perf_total_buckets_stale_acc_count->inc();134}135}136}137138//139// Remove an nmethod dependency from the context.140// Decrement count of the nmethod in the dependency list and, optionally, remove141// the bucket completely when the count goes to 0. This method must find142// a corresponding bucket otherwise there's a bug in the recording of dependencies.143// Can be called concurrently by parallel GC threads.144//145void DependencyContext::remove_dependent_nmethod(nmethod* nm) {146assert_locked_or_safepoint(CodeCache_lock);147nmethodBucket* first = dependencies_not_unloading();148nmethodBucket* last = NULL;149for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) {150if (nm == b->get_nmethod()) {151int val = b->decrement();152guarantee(val >= 0, "Underflow: %d", val);153if (val == 0) {154if (last == NULL) {155// If there was not a head that was not unloading, we can set a new156// head without a CAS, because we know there is no contending cleanup.157set_dependencies(b->next_not_unloading());158} else {159// Only supports a single inserting thread (protected by CodeCache_lock)160// for now. Therefore, the next pointer only competes with another cleanup161// operation. That interaction does not need a CAS.162last->set_next(b->next_not_unloading());163}164release(b);165}166return;167}168last = b;169}170}171172//173// Reclaim all unused buckets.174//175void DependencyContext::purge_dependency_contexts() {176int removed = 0;177for (nmethodBucket* b = _purge_list; b != NULL;) {178nmethodBucket* next = b->purge_list_next();179removed++;180delete b;181b = next;182}183if (UsePerfData && removed > 0) {184_perf_total_buckets_deallocated_count->inc(removed);185}186_purge_list = NULL;187}188189//190// Cleanup a dependency context by unlinking and placing all dependents corresponding191// to is_unloading nmethods on a purge list, which will be deleted later when it is safe.192void DependencyContext::clean_unloading_dependents() {193if (!claim_cleanup()) {194// Somebody else is cleaning up this dependency context.195return;196}197// Walk the nmethodBuckets and move dead entries on the purge list, which will198// be deleted during ClassLoaderDataGraph::purge().199nmethodBucket* b = dependencies_not_unloading();200while (b != NULL) {201nmethodBucket* next = b->next_not_unloading();202b = next;203}204}205206//207// Invalidate all dependencies in the context208int DependencyContext::remove_all_dependents() {209nmethodBucket* b = dependencies_not_unloading();210set_dependencies(NULL);211int marked = 0;212int removed = 0;213while (b != NULL) {214nmethod* nm = b->get_nmethod();215if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {216nm->mark_for_deoptimization();217marked++;218}219nmethodBucket* next = b->next_not_unloading();220removed++;221release(b);222b = next;223}224if (UsePerfData && removed > 0) {225_perf_total_buckets_deallocated_count->inc(removed);226}227return marked;228}229230#ifndef PRODUCT231void DependencyContext::print_dependent_nmethods(bool verbose) {232int idx = 0;233for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {234nmethod* nm = b->get_nmethod();235tty->print("[%d] count=%d { ", idx++, b->count());236if (!verbose) {237nm->print_on(tty, "nmethod");238tty->print_cr(" } ");239} else {240nm->print();241nm->print_dependencies();242tty->print_cr("--- } ");243}244}245}246#endif //PRODUCT247248bool DependencyContext::is_dependent_nmethod(nmethod* nm) {249for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {250if (nm == b->get_nmethod()) {251#ifdef ASSERT252int count = b->count();253assert(count >= 0, "count shouldn't be negative: %d", count);254#endif255return true;256}257}258return false;259}260261int nmethodBucket::decrement() {262return Atomic::sub(&_count, 1);263}264265// We use a monotonically increasing epoch counter to track the last epoch a given266// dependency context was cleaned. GC threads claim cleanup tasks by performing267// a CAS on this value.268bool DependencyContext::claim_cleanup() {269uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);270uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);271if (last_cleanup >= cleaning_epoch) {272return false;273}274return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;275}276277// Retrieve the first nmethodBucket that has a dependent that does not correspond to278// an is_unloading nmethod. Any nmethodBucket entries observed from the original head279// that is_unloading() will be unlinked and placed on the purge list.280nmethodBucket* DependencyContext::dependencies_not_unloading() {281for (;;) {282// Need acquire becase the read value could come from a concurrent insert.283nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);284if (head == NULL || !head->get_nmethod()->is_unloading()) {285return head;286}287nmethodBucket* head_next = head->next();288OrderAccess::loadload();289if (Atomic::load(_dependency_context_addr) != head) {290// Unstable load of head w.r.t. head->next291continue;292}293if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {294// Release is_unloading entries if unlinking was claimed295DependencyContext::release(head);296}297}298}299300// Relaxed accessors301void DependencyContext::set_dependencies(nmethodBucket* b) {302Atomic::store(_dependency_context_addr, b);303}304305nmethodBucket* DependencyContext::dependencies() {306return Atomic::load(_dependency_context_addr);307}308309// After the gc_prologue, the dependency contexts may be claimed by the GC310// and releasing of nmethodBucket entries will be deferred and placed on311// a purge list to be deleted later.312void DependencyContext::cleaning_start() {313assert(SafepointSynchronize::is_at_safepoint(), "must be");314uint64_t epoch = ++_cleaning_epoch_monotonic;315Atomic::store(&_cleaning_epoch, epoch);316}317318// The epilogue marks the end of dependency context cleanup by the GC,319// and also makes subsequent releases of nmethodBuckets cause immediate320// deletion. It is okay to delay calling of cleaning_end() to a concurrent321// phase, subsequent to the safepoint operation in which cleaning_start()322// was called. That allows dependency contexts to be cleaned concurrently.323void DependencyContext::cleaning_end() {324uint64_t epoch = 0;325Atomic::store(&_cleaning_epoch, epoch);326}327328// This function skips over nmethodBuckets in the list corresponding to329// nmethods that are is_unloading. This allows exposing a view of the330// dependents as-if they were already cleaned, despite being cleaned331// concurrently. Any entry observed that is_unloading() will be unlinked332// and placed on the purge list.333nmethodBucket* nmethodBucket::next_not_unloading() {334for (;;) {335// Do not need acquire because the loaded entry can never be336// concurrently inserted.337nmethodBucket* next = Atomic::load(&_next);338if (next == NULL || !next->get_nmethod()->is_unloading()) {339return next;340}341nmethodBucket* next_next = Atomic::load(&next->_next);342OrderAccess::loadload();343if (Atomic::load(&_next) != next) {344// Unstable load of next w.r.t. next->next345continue;346}347if (Atomic::cmpxchg(&_next, next, next_next) == next) {348// Release is_unloading entries if unlinking was claimed349DependencyContext::release(next);350}351}352}353354// Relaxed accessors355nmethodBucket* nmethodBucket::next() {356return Atomic::load(&_next);357}358359void nmethodBucket::set_next(nmethodBucket* b) {360Atomic::store(&_next, b);361}362363nmethodBucket* nmethodBucket::purge_list_next() {364return Atomic::load(&_purge_list_next);365}366367void nmethodBucket::set_purge_list_next(nmethodBucket* b) {368Atomic::store(&_purge_list_next, b);369}370371372