Path: blob/master/arch/sh/kernel/cpu/sh4a/perf_event.c
17466 views
/*1* Performance events support for SH-4A performance counters2*3* Copyright (C) 2009, 2010 Paul Mundt4*5* This file is subject to the terms and conditions of the GNU General Public6* License. See the file "COPYING" in the main directory of this archive7* for more details.8*/9#include <linux/kernel.h>10#include <linux/init.h>11#include <linux/io.h>12#include <linux/irq.h>13#include <linux/perf_event.h>14#include <asm/processor.h>1516#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))17#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))1819#define CCBR_CIT_MASK (0x7ff << 6)20#define CCBR_DUC (1 << 3)21#define CCBR_CMDS (1 << 1)22#define CCBR_PPCE (1 << 0)2324#ifdef CONFIG_CPU_SHX325/*26* The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR27* and PMCTR locations remains tentatively constant. This change remains28* wholly undocumented, and was simply found through trial and error.29*30* Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and31* it's unclear when this ceased to be the case. For now we always use32* the new location (if future parts keep up with this trend then33* scanning for them at runtime also remains a viable option.)34*35* The gap in the register space also suggests that there are other36* undocumented counters, so this will need to be revisited at a later37* point in time.38*/39#define PPC_PMCAT 0xfc10024040#else41#define PPC_PMCAT 0xfc10008042#endif4344#define PMCAT_OVF3 (1 << 27)45#define PMCAT_CNN3 (1 << 26)46#define PMCAT_CLR3 (1 << 25)47#define PMCAT_OVF2 (1 << 19)48#define PMCAT_CLR2 (1 << 17)49#define PMCAT_OVF1 (1 << 11)50#define PMCAT_CNN1 (1 << 10)51#define PMCAT_CLR1 (1 << 9)52#define PMCAT_OVF0 (1 << 3)53#define PMCAT_CLR0 (1 << 1)5455static struct sh_pmu sh4a_pmu;5657/*58* Supported raw event codes:59*60* Event Code Description61* ---------- -----------62*63* 0x0000 number of elapsed cycles64* 0x0200 number of elapsed cycles in privileged mode65* 0x0280 number of elapsed cycles while SR.BL is asserted66* 0x0202 instruction execution67* 0x0203 instruction execution in parallel68* 0x0204 number of unconditional branches69* 0x0208 number of exceptions70* 0x0209 number of interrupts71* 0x0220 UTLB miss caused by instruction fetch72* 0x0222 UTLB miss caused by operand access73* 0x02a0 number of ITLB misses74* 0x0028 number of accesses to instruction memories75* 0x0029 number of accesses to instruction cache76* 0x002a instruction cache miss77* 0x022e number of access to instruction X/Y memory78* 0x0030 number of reads to operand memories79* 0x0038 number of writes to operand memories80* 0x0031 number of operand cache read accesses81* 0x0039 number of operand cache write accesses82* 0x0032 operand cache read miss83* 0x003a operand cache write miss84* 0x0236 number of reads to operand X/Y memory85* 0x023e number of writes to operand X/Y memory86* 0x0237 number of reads to operand U memory87* 0x023f number of writes to operand U memory88* 0x0337 number of U memory read buffer misses89* 0x02b4 number of wait cycles due to operand read access90* 0x02bc number of wait cycles due to operand write access91* 0x0033 number of wait cycles due to operand cache read miss92* 0x003b number of wait cycles due to operand cache write miss93*/9495/*96* Special reserved bits used by hardware emulators, read values will97* vary, but writes must always be 0.98*/99#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))100101static const int sh4a_general_events[] = {102[PERF_COUNT_HW_CPU_CYCLES] = 0x0000,103[PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,104[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */105[PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */106[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,107[PERF_COUNT_HW_BRANCH_MISSES] = -1,108[PERF_COUNT_HW_BUS_CYCLES] = -1,109};110111#define C(x) PERF_COUNT_HW_CACHE_##x112113static const int sh4a_cache_events114[PERF_COUNT_HW_CACHE_MAX]115[PERF_COUNT_HW_CACHE_OP_MAX]116[PERF_COUNT_HW_CACHE_RESULT_MAX] =117{118[ C(L1D) ] = {119[ C(OP_READ) ] = {120[ C(RESULT_ACCESS) ] = 0x0031,121[ C(RESULT_MISS) ] = 0x0032,122},123[ C(OP_WRITE) ] = {124[ C(RESULT_ACCESS) ] = 0x0039,125[ C(RESULT_MISS) ] = 0x003a,126},127[ C(OP_PREFETCH) ] = {128[ C(RESULT_ACCESS) ] = 0,129[ C(RESULT_MISS) ] = 0,130},131},132133[ C(L1I) ] = {134[ C(OP_READ) ] = {135[ C(RESULT_ACCESS) ] = 0x0029,136[ C(RESULT_MISS) ] = 0x002a,137},138[ C(OP_WRITE) ] = {139[ C(RESULT_ACCESS) ] = -1,140[ C(RESULT_MISS) ] = -1,141},142[ C(OP_PREFETCH) ] = {143[ C(RESULT_ACCESS) ] = 0,144[ C(RESULT_MISS) ] = 0,145},146},147148[ C(LL) ] = {149[ C(OP_READ) ] = {150[ C(RESULT_ACCESS) ] = 0x0030,151[ C(RESULT_MISS) ] = 0,152},153[ C(OP_WRITE) ] = {154[ C(RESULT_ACCESS) ] = 0x0038,155[ C(RESULT_MISS) ] = 0,156},157[ C(OP_PREFETCH) ] = {158[ C(RESULT_ACCESS) ] = 0,159[ C(RESULT_MISS) ] = 0,160},161},162163[ C(DTLB) ] = {164[ C(OP_READ) ] = {165[ C(RESULT_ACCESS) ] = 0x0222,166[ C(RESULT_MISS) ] = 0x0220,167},168[ C(OP_WRITE) ] = {169[ C(RESULT_ACCESS) ] = 0,170[ C(RESULT_MISS) ] = 0,171},172[ C(OP_PREFETCH) ] = {173[ C(RESULT_ACCESS) ] = 0,174[ C(RESULT_MISS) ] = 0,175},176},177178[ C(ITLB) ] = {179[ C(OP_READ) ] = {180[ C(RESULT_ACCESS) ] = 0,181[ C(RESULT_MISS) ] = 0x02a0,182},183[ C(OP_WRITE) ] = {184[ C(RESULT_ACCESS) ] = -1,185[ C(RESULT_MISS) ] = -1,186},187[ C(OP_PREFETCH) ] = {188[ C(RESULT_ACCESS) ] = -1,189[ C(RESULT_MISS) ] = -1,190},191},192193[ C(BPU) ] = {194[ C(OP_READ) ] = {195[ C(RESULT_ACCESS) ] = -1,196[ C(RESULT_MISS) ] = -1,197},198[ C(OP_WRITE) ] = {199[ C(RESULT_ACCESS) ] = -1,200[ C(RESULT_MISS) ] = -1,201},202[ C(OP_PREFETCH) ] = {203[ C(RESULT_ACCESS) ] = -1,204[ C(RESULT_MISS) ] = -1,205},206},207};208209static int sh4a_event_map(int event)210{211return sh4a_general_events[event];212}213214static u64 sh4a_pmu_read(int idx)215{216return __raw_readl(PPC_PMCTR(idx));217}218219static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)220{221unsigned int tmp;222223tmp = __raw_readl(PPC_CCBR(idx));224tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);225__raw_writel(tmp, PPC_CCBR(idx));226}227228static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)229{230unsigned int tmp;231232tmp = __raw_readl(PPC_PMCAT);233tmp &= ~PMCAT_EMU_CLR_MASK;234tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;235__raw_writel(tmp, PPC_PMCAT);236237tmp = __raw_readl(PPC_CCBR(idx));238tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;239__raw_writel(tmp, PPC_CCBR(idx));240241__raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));242}243244static void sh4a_pmu_disable_all(void)245{246int i;247248for (i = 0; i < sh4a_pmu.num_events; i++)249__raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));250}251252static void sh4a_pmu_enable_all(void)253{254int i;255256for (i = 0; i < sh4a_pmu.num_events; i++)257__raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));258}259260static struct sh_pmu sh4a_pmu = {261.name = "sh4a",262.num_events = 2,263.event_map = sh4a_event_map,264.max_events = ARRAY_SIZE(sh4a_general_events),265.raw_event_mask = 0x3ff,266.cache_events = &sh4a_cache_events,267.read = sh4a_pmu_read,268.disable = sh4a_pmu_disable,269.enable = sh4a_pmu_enable,270.disable_all = sh4a_pmu_disable_all,271.enable_all = sh4a_pmu_enable_all,272};273274static int __init sh4a_pmu_init(void)275{276/*277* Make sure this CPU actually has perf counters.278*/279if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {280pr_notice("HW perf events unsupported, software events only.\n");281return -ENODEV;282}283284return register_sh_pmu(&sh4a_pmu);285}286early_initcall(sh4a_pmu_init);287288289