/* SPDX-License-Identifier: GPL-2.0 */1/*2* This program is free software; you can redistribute it and/or modify3* it under the terms of the GNU General Public License as published by4* the Free Software Foundation; either version 2 of the License, or5* (at your option) any later version.6*7* This program is distributed in the hope that it will be useful,8* but WITHOUT ANY WARRANTY; without even the implied warranty of9* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the10* GNU General Public License for more details.11*12* Authors: Waiman Long <[email protected]>13*/1415#ifndef LOCK_EVENT16#define LOCK_EVENT(name) LOCKEVENT_ ## name,17#endif1819#ifdef CONFIG_QUEUED_SPINLOCKS20#ifdef CONFIG_PARAVIRT_SPINLOCKS21/*22* Locking events for PV qspinlock.23*/24LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */25LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */26LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */27LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */28LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */29LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */30LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */31LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */32LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */33LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */34LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */35#endif /* CONFIG_PARAVIRT_SPINLOCKS */3637/*38* Locking events for qspinlock39*40* Subtracting lock_use_node[234] from lock_slowpath will give you41* lock_use_node1.42*/43LOCK_EVENT(lock_pending) /* # of locking ops via pending code */44LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */45LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */46LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */47LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */48LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */49#endif /* CONFIG_QUEUED_SPINLOCKS */5051/*52* Locking events for Resilient Queued Spin Lock53*/54LOCK_EVENT(rqspinlock_lock_timeout) /* # of locking ops that timeout */5556/*57* Locking events for rwsem58*/59LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */60LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */61LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */62LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */63LOCK_EVENT(rwsem_opt_lock) /* # of opt-acquired write locks */64LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */65LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */66LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */67LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */68LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */69LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */70LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */71LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */72LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */73LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */7475/*76* Locking events for rtlock_slowlock()77*/78LOCK_EVENT(rtlock_slowlock) /* # of rtlock_slowlock() calls */79LOCK_EVENT(rtlock_slow_acq1) /* # of locks acquired after wait_lock */80LOCK_EVENT(rtlock_slow_acq2) /* # of locks acquired in for loop */81LOCK_EVENT(rtlock_slow_sleep) /* # of sleeps */82LOCK_EVENT(rtlock_slow_wake) /* # of wakeup's */8384/*85* Locking events for rt_mutex_slowlock()86*/87LOCK_EVENT(rtmutex_slowlock) /* # of rt_mutex_slowlock() calls */88LOCK_EVENT(rtmutex_slow_block) /* # of rt_mutex_slowlock_block() calls */89LOCK_EVENT(rtmutex_slow_acq1) /* # of locks acquired after wait_lock */90LOCK_EVENT(rtmutex_slow_acq2) /* # of locks acquired at the end */91LOCK_EVENT(rtmutex_slow_acq3) /* # of locks acquired in *block() */92LOCK_EVENT(rtmutex_slow_sleep) /* # of sleeps */93LOCK_EVENT(rtmutex_slow_wake) /* # of wakeup's */94LOCK_EVENT(rtmutex_deadlock) /* # of rt_mutex_handle_deadlock()'s */9596/*97* Locking events for lockdep98*/99LOCK_EVENT(lockdep_acquire)100LOCK_EVENT(lockdep_lock)101LOCK_EVENT(lockdep_nocheck)102103104