Path: blob/master/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
66645 views
/*1* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/shared/collectedHeap.hpp"26#include "gc/shared/oopStorage.hpp"27#include "gc/shared/oopStorageSet.hpp"28#include "jfr/jfrEvents.hpp"29#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"30#include "jfr/leakprofiler/sampling/objectSample.hpp"31#include "jfr/leakprofiler/sampling/objectSampler.hpp"32#include "jfr/leakprofiler/sampling/sampleList.hpp"33#include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"34#include "jfr/recorder/jfrEventSetting.inline.hpp"35#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"36#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"37#include "jfr/support/jfrThreadLocal.hpp"38#include "jfr/utilities/jfrTime.hpp"39#include "jfr/utilities/jfrTryLock.hpp"40#include "logging/log.hpp"41#include "memory/universe.hpp"42#include "oops/oop.inline.hpp"43#include "runtime/atomic.hpp"44#include "runtime/orderAccess.hpp"45#include "runtime/safepoint.hpp"46#include "runtime/thread.hpp"4748// Timestamp of when the gc last processed the set of sampled objects.49// Atomic access to prevent word tearing on 32-bit platforms.50static volatile int64_t _last_sweep;5152// Condition variable to communicate that some sampled objects have been cleared by the gc53// and can therefore be removed from the sample priority queue.54static bool volatile _dead_samples = false;5556// The OopStorage instance is used to hold weak references to sampled objects.57// It is constructed and registered during VM initialization. This is a singleton58// that persist independent of the state of the ObjectSampler.59static OopStorage* _oop_storage = NULL;6061OopStorage* ObjectSampler::oop_storage() { return _oop_storage; }6263// Callback invoked by the GC after an iteration over the oop storage64// that may have cleared dead referents. num_dead is the number of entries65// already NULL or cleared by the iteration.66void ObjectSampler::oop_storage_gc_notification(size_t num_dead) {67if (num_dead != 0) {68// The ObjectSampler instance may have already been cleaned or a new69// instance was created concurrently. This allows for a small race where cleaning70// could be done again.71Atomic::store(&_dead_samples, true);72Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());73}74}7576bool ObjectSampler::create_oop_storage() {77_oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples", mtTracing);78assert(_oop_storage != NULL, "invariant");79_oop_storage->register_num_dead_callback(&oop_storage_gc_notification);80return true;81}8283static ObjectSampler* _instance = NULL;8485static ObjectSampler& instance() {86assert(_instance != NULL, "invariant");87return *_instance;88}8990ObjectSampler::ObjectSampler(size_t size) :91_priority_queue(new SamplePriorityQueue(size)),92_list(new SampleList(size)),93_total_allocated(0),94_threshold(0),95_size(size) {96Atomic::store(&_dead_samples, false);97Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());98}99100ObjectSampler::~ObjectSampler() {101delete _priority_queue;102_priority_queue = NULL;103delete _list;104_list = NULL;105}106107bool ObjectSampler::create(size_t size) {108assert(SafepointSynchronize::is_at_safepoint(), "invariant");109assert(_oop_storage != NULL, "should be already created");110ObjectSampleCheckpoint::clear();111assert(_instance == NULL, "invariant");112_instance = new ObjectSampler(size);113return _instance != NULL;114}115116bool ObjectSampler::is_created() {117return _instance != NULL;118}119120ObjectSampler* ObjectSampler::sampler() {121assert(is_created(), "invariant");122return _instance;123}124125void ObjectSampler::destroy() {126assert(SafepointSynchronize::is_at_safepoint(), "invariant");127if (_instance != NULL) {128ObjectSampler* const sampler = _instance;129_instance = NULL;130delete sampler;131}132}133134static volatile int _lock = 0;135136ObjectSampler* ObjectSampler::acquire() {137while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}138return _instance;139}140141void ObjectSampler::release() {142OrderAccess::fence();143_lock = 0;144}145146static traceid get_thread_id(JavaThread* thread) {147assert(thread != NULL, "invariant");148if (thread->threadObj() == NULL) {149return 0;150}151const JfrThreadLocal* const tl = thread->jfr_thread_local();152assert(tl != NULL, "invariant");153if (tl->is_excluded()) {154return 0;155}156if (!tl->has_thread_blob()) {157JfrCheckpointManager::create_thread_blob(thread);158}159assert(tl->has_thread_blob(), "invariant");160return tl->thread_id();161}162163class RecordStackTrace {164private:165JavaThread* _jt;166bool _enabled;167public:168RecordStackTrace(JavaThread* jt) : _jt(jt),169_enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {170if (_enabled) {171JfrStackTraceRepository::record_for_leak_profiler(jt);172}173}174~RecordStackTrace() {175if (_enabled) {176_jt->jfr_thread_local()->clear_cached_stack_trace();177}178}179};180181void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {182assert(thread != NULL, "invariant");183assert(is_created(), "invariant");184const traceid thread_id = get_thread_id(thread);185if (thread_id == 0) {186return;187}188RecordStackTrace rst(thread);189// try enter critical section190JfrTryLock tryLock(&_lock);191if (!tryLock.acquired()) {192log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");193return;194}195instance().add(obj, allocated, thread_id, thread);196}197198void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {199assert(obj != NULL, "invariant");200assert(thread_id != 0, "invariant");201assert(thread != NULL, "invariant");202assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");203204if (Atomic::load(&_dead_samples)) {205// There's a small race where a GC scan might reset this to true, potentially206// causing a back-to-back scavenge.207Atomic::store(&_dead_samples, false);208scavenge();209}210211_total_allocated += allocated;212const size_t span = _total_allocated - _priority_queue->total();213ObjectSample* sample;214if ((size_t)_priority_queue->count() == _size) {215assert(_list->count() == _size, "invariant");216const ObjectSample* peek = _priority_queue->peek();217if (peek->span() > span) {218// quick reject, will not fit219return;220}221sample = _list->reuse(_priority_queue->pop());222} else {223sample = _list->get();224}225226assert(sample != NULL, "invariant");227sample->set_thread_id(thread_id);228229const JfrThreadLocal* const tl = thread->jfr_thread_local();230sample->set_thread(tl->thread_blob());231232const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();233if (stacktrace_hash != 0) {234sample->set_stack_trace_id(tl->cached_stack_trace_id());235sample->set_stack_trace_hash(stacktrace_hash);236}237238sample->set_span(allocated);239sample->set_object(cast_to_oop(obj));240sample->set_allocated(allocated);241sample->set_allocation_time(JfrTicks::now());242sample->set_heap_used_at_last_gc(Universe::heap()->used_at_last_gc());243_priority_queue->push(sample);244}245246void ObjectSampler::scavenge() {247ObjectSample* current = _list->last();248while (current != NULL) {249ObjectSample* next = current->next();250if (current->is_dead()) {251remove_dead(current);252}253current = next;254}255}256257void ObjectSampler::remove_dead(ObjectSample* sample) {258assert(sample != NULL, "invariant");259assert(sample->is_dead(), "invariant");260sample->release();261262ObjectSample* const previous = sample->prev();263// push span onto previous264if (previous != NULL) {265_priority_queue->remove(previous);266previous->add_span(sample->span());267_priority_queue->push(previous);268}269_priority_queue->remove(sample);270_list->release(sample);271}272273ObjectSample* ObjectSampler::last() const {274return _list->last();275}276277const ObjectSample* ObjectSampler::first() const {278return _list->first();279}280281const ObjectSample* ObjectSampler::last_resolved() const {282return _list->last_resolved();283}284285void ObjectSampler::set_last_resolved(const ObjectSample* sample) {286_list->set_last_resolved(sample);287}288289int ObjectSampler::item_count() const {290return _priority_queue->count();291}292293const ObjectSample* ObjectSampler::item_at(int index) const {294return _priority_queue->item_at(index);295}296297ObjectSample* ObjectSampler::item_at(int index) {298return const_cast<ObjectSample*>(299const_cast<const ObjectSampler*>(this)->item_at(index)300);301}302303int64_t ObjectSampler::last_sweep() {304return Atomic::load(&_last_sweep);305}306307308