Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/kvm/arm_pmu.h
26288 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2015 Linaro Ltd.
4
* Author: Shannon Zhao <[email protected]>
5
*/
6
7
#ifndef __ASM_ARM_KVM_PMU_H
8
#define __ASM_ARM_KVM_PMU_H
9
10
#include <linux/perf_event.h>
11
#include <linux/perf/arm_pmuv3.h>
12
13
#define KVM_ARMV8_PMU_MAX_COUNTERS 32
14
15
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
16
struct kvm_pmc {
17
u8 idx; /* index into the pmu->pmc array */
18
struct perf_event *perf_event;
19
};
20
21
struct kvm_pmu_events {
22
u64 events_host;
23
u64 events_guest;
24
};
25
26
struct kvm_pmu {
27
struct irq_work overflow_work;
28
struct kvm_pmu_events events;
29
struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
30
int irq_num;
31
bool created;
32
bool irq_level;
33
};
34
35
struct arm_pmu_entry {
36
struct list_head entry;
37
struct arm_pmu *arm_pmu;
38
};
39
40
bool kvm_supports_guest_pmuv3(void);
41
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
42
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
43
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
44
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
45
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
46
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
47
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
48
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
49
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
50
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
51
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
52
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
53
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
54
void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
55
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
56
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
57
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
58
u64 select_idx);
59
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
60
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
61
struct kvm_device_attr *attr);
62
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
63
struct kvm_device_attr *attr);
64
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
65
struct kvm_device_attr *attr);
66
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
67
68
struct kvm_pmu_events *kvm_get_pmu_events(void);
69
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
70
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
71
void kvm_vcpu_pmu_resync_el0(void);
72
73
#define kvm_vcpu_has_pmu(vcpu) \
74
(vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
75
76
/*
77
* Updates the vcpu's view of the pmu events for this cpu.
78
* Must be called before every vcpu run after disabling interrupts, to ensure
79
* that an interrupt cannot fire and update the structure.
80
*/
81
#define kvm_pmu_update_vcpu_events(vcpu) \
82
do { \
83
if (!has_vhe() && system_supports_pmuv3()) \
84
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
85
} while (0)
86
87
u8 kvm_arm_pmu_get_pmuver_limit(void);
88
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
89
int kvm_arm_set_default_pmu(struct kvm *kvm);
90
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
91
92
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
93
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
94
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
95
#else
96
struct kvm_pmu {
97
};
98
99
static inline bool kvm_supports_guest_pmuv3(void)
100
{
101
return false;
102
}
103
104
#define kvm_arm_pmu_irq_initialized(v) (false)
105
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
106
u64 select_idx)
107
{
108
return 0;
109
}
110
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
111
u64 select_idx, u64 val) {}
112
static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
113
u64 select_idx, u64 val) {}
114
static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
115
{
116
return 0;
117
}
118
static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
119
{
120
return 0;
121
}
122
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
123
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
124
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
125
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
126
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
127
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
128
{
129
return false;
130
}
131
static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
132
static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
133
static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
134
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
135
u64 data, u64 select_idx) {}
136
static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
137
struct kvm_device_attr *attr)
138
{
139
return -ENXIO;
140
}
141
static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
142
struct kvm_device_attr *attr)
143
{
144
return -ENXIO;
145
}
146
static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
147
struct kvm_device_attr *attr)
148
{
149
return -ENXIO;
150
}
151
static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
152
{
153
return 0;
154
}
155
static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
156
{
157
return 0;
158
}
159
160
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
161
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
162
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
163
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
164
static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
165
static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
166
{
167
return 0;
168
}
169
static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
170
{
171
return 0;
172
}
173
static inline void kvm_vcpu_pmu_resync_el0(void) {}
174
175
static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
176
{
177
return -ENODEV;
178
}
179
180
static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
181
{
182
return 0;
183
}
184
185
static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
186
{
187
return 0;
188
}
189
190
static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
191
{
192
return false;
193
}
194
195
static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
196
197
#endif
198
199
#endif
200
201