Path: blob/master/tools/testing/selftests/kvm/include/x86/pmu.h
50334 views
/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2023, Tencent, Inc.3*/4#ifndef SELFTEST_KVM_PMU_H5#define SELFTEST_KVM_PMU_H67#include <stdbool.h>8#include <stdint.h>910#include <linux/bits.h>1112#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 3001314/*15* Encode an eventsel+umask pair into event-select MSR format. Note, this is16* technically AMD's format, as Intel's format only supports 8 bits for the17* event selector, i.e. doesn't use bits 24:16 for the selector. But, OR-ing18* in '0' is a nop and won't clobber the CMASK.19*/20#define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) | \21((eventsel) & 0xff) | \22((umask) & 0xff) << 8)2324/*25* These are technically Intel's definitions, but except for CMASK (see above),26* AMD's layout is compatible with Intel's.27*/28#define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0)29#define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8)30#define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16)31#define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17)32#define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18)33#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19)34#define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20)35#define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21)36#define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22)37#define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23)38#define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24)3940/* RDPMC control flags, Intel only. */41#define INTEL_RDPMC_METRICS BIT_ULL(29)42#define INTEL_RDPMC_FIXED BIT_ULL(30)43#define INTEL_RDPMC_FAST BIT_ULL(31)4445/* Fixed PMC controls, Intel only. */46#define FIXED_PMC_GLOBAL_CTRL_ENABLE(_idx) BIT_ULL((32 + (_idx)))4748#define FIXED_PMC_KERNEL BIT_ULL(0)49#define FIXED_PMC_USER BIT_ULL(1)50#define FIXED_PMC_ANYTHREAD BIT_ULL(2)51#define FIXED_PMC_ENABLE_PMI BIT_ULL(3)52#define FIXED_PMC_NR_BITS 453#define FIXED_PMC_CTRL(_idx, _val) ((_val) << ((_idx) * FIXED_PMC_NR_BITS))5455#define PMU_CAP_FW_WRITES BIT_ULL(13)56#define PMU_CAP_LBR_FMT 0x3f5758#define INTEL_ARCH_CPU_CYCLES RAW_EVENT(0x3c, 0x00)59#define INTEL_ARCH_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)60#define INTEL_ARCH_REFERENCE_CYCLES RAW_EVENT(0x3c, 0x01)61#define INTEL_ARCH_LLC_REFERENCES RAW_EVENT(0x2e, 0x4f)62#define INTEL_ARCH_LLC_MISSES RAW_EVENT(0x2e, 0x41)63#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)64#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)65#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)66#define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02)67#define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00)68#define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01)69#define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02)70#define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01)7172#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)73#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)74#define AMD_ZEN_BRANCHES_RETIRED RAW_EVENT(0xc2, 0x00)75#define AMD_ZEN_BRANCHES_MISPREDICTED RAW_EVENT(0xc3, 0x00)7677/*78* Note! The order and thus the index of the architectural events matters as79* support for each event is enumerated via CPUID using the index of the event.80*/81enum intel_pmu_architectural_events {82INTEL_ARCH_CPU_CYCLES_INDEX,83INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX,84INTEL_ARCH_REFERENCE_CYCLES_INDEX,85INTEL_ARCH_LLC_REFERENCES_INDEX,86INTEL_ARCH_LLC_MISSES_INDEX,87INTEL_ARCH_BRANCHES_RETIRED_INDEX,88INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,89INTEL_ARCH_TOPDOWN_SLOTS_INDEX,90INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,91INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,92INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,93INTEL_ARCH_TOPDOWN_RETIRING_INDEX,94INTEL_ARCH_LBR_INSERTS_INDEX,95NR_INTEL_ARCH_EVENTS,96};9798enum amd_pmu_zen_events {99AMD_ZEN_CORE_CYCLES_INDEX,100AMD_ZEN_INSTRUCTIONS_INDEX,101AMD_ZEN_BRANCHES_INDEX,102AMD_ZEN_BRANCH_MISSES_INDEX,103NR_AMD_ZEN_EVENTS,104};105106extern const uint64_t intel_pmu_arch_events[];107extern const uint64_t amd_pmu_zen_events[];108109enum pmu_errata {110INSTRUCTIONS_RETIRED_OVERCOUNT,111BRANCHES_RETIRED_OVERCOUNT,112};113extern uint64_t pmu_errata_mask;114115void kvm_init_pmu_errata(void);116117static inline bool this_pmu_has_errata(enum pmu_errata errata)118{119return pmu_errata_mask & BIT_ULL(errata);120}121122#endif /* SELFTEST_KVM_PMU_H */123124125