Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kernel/paravirt.c
50708 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/types.h>
3
#include <linux/interrupt.h>
4
#include <linux/irq_work.h>
5
#include <linux/jump_label.h>
6
#include <linux/kvm_para.h>
7
#include <linux/reboot.h>
8
#include <linux/static_call.h>
9
#include <linux/sched/cputime.h>
10
#include <asm/paravirt.h>
11
12
static int has_steal_clock;
13
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
14
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
15
16
static bool steal_acc = true;
17
18
static int __init parse_no_stealacc(char *arg)
19
{
20
steal_acc = false;
21
return 0;
22
}
23
early_param("no-steal-acc", parse_no_stealacc);
24
25
static u64 paravt_steal_clock(int cpu)
26
{
27
int version;
28
u64 steal;
29
struct kvm_steal_time *src;
30
31
src = &per_cpu(steal_time, cpu);
32
do {
33
34
version = src->version;
35
virt_rmb(); /* Make sure that the version is read before the steal */
36
steal = src->steal;
37
virt_rmb(); /* Make sure that the steal is read before the next version */
38
39
} while ((version & 1) || (version != src->version));
40
41
return steal;
42
}
43
44
#ifdef CONFIG_SMP
45
static struct smp_ops native_ops;
46
47
static void pv_send_ipi_single(int cpu, unsigned int action)
48
{
49
int min, old;
50
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
51
52
if (unlikely(action == ACTION_BOOT_CPU)) {
53
native_ops.send_ipi_single(cpu, action);
54
return;
55
}
56
57
old = atomic_fetch_or(BIT(action), &info->message);
58
if (old)
59
return;
60
61
min = cpu_logical_map(cpu);
62
kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
63
}
64
65
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
66
67
static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
68
{
69
int i, cpu, min = 0, max = 0, old;
70
__uint128_t bitmap = 0;
71
irq_cpustat_t *info;
72
73
if (cpumask_empty(mask))
74
return;
75
76
if (unlikely(action == ACTION_BOOT_CPU)) {
77
native_ops.send_ipi_mask(mask, action);
78
return;
79
}
80
81
action = BIT(action);
82
for_each_cpu(i, mask) {
83
info = &per_cpu(irq_stat, i);
84
old = atomic_fetch_or(action, &info->message);
85
if (old)
86
continue;
87
88
cpu = cpu_logical_map(i);
89
if (!bitmap) {
90
min = max = cpu;
91
} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
92
/* cpu < min, and bitmap still enough */
93
bitmap <<= min - cpu;
94
min = cpu;
95
} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
96
/* cpu > min, and bitmap still enough */
97
max = cpu > max ? cpu : max;
98
} else {
99
/*
100
* With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
101
* send IPI here directly and skip the remaining CPUs.
102
*/
103
kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
104
(unsigned long)(bitmap >> BITS_PER_LONG), min);
105
min = max = cpu;
106
bitmap = 0;
107
}
108
__set_bit(cpu - min, (unsigned long *)&bitmap);
109
}
110
111
if (bitmap)
112
kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
113
(unsigned long)(bitmap >> BITS_PER_LONG), min);
114
}
115
116
static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
117
{
118
u32 action;
119
irq_cpustat_t *info;
120
121
/* Clear SWI interrupt */
122
clear_csr_estat(1 << INT_SWI0);
123
info = this_cpu_ptr(&irq_stat);
124
action = atomic_xchg(&info->message, 0);
125
126
if (action & SMP_RESCHEDULE) {
127
scheduler_ipi();
128
info->ipi_irqs[IPI_RESCHEDULE]++;
129
}
130
131
if (action & SMP_CALL_FUNCTION) {
132
generic_smp_call_function_interrupt();
133
info->ipi_irqs[IPI_CALL_FUNCTION]++;
134
}
135
136
if (action & SMP_IRQ_WORK) {
137
irq_work_run();
138
info->ipi_irqs[IPI_IRQ_WORK]++;
139
}
140
141
if (action & SMP_CLEAR_VECTOR) {
142
complete_irq_moving();
143
info->ipi_irqs[IPI_CLEAR_VECTOR]++;
144
}
145
146
return IRQ_HANDLED;
147
}
148
149
static void pv_init_ipi(void)
150
{
151
int r, swi;
152
153
/* Init native ipi irq for ACTION_BOOT_CPU */
154
native_ops.init_ipi();
155
swi = get_percpu_irq(INT_SWI0);
156
if (swi < 0)
157
panic("SWI0 IRQ mapping failed\n");
158
irq_set_percpu_devid(swi);
159
r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
160
if (r < 0)
161
panic("SWI0 IRQ request failed\n");
162
}
163
#endif
164
165
bool kvm_para_available(void)
166
{
167
int config;
168
static int hypervisor_type;
169
170
if (!cpu_has_hypervisor)
171
return false;
172
173
if (!hypervisor_type) {
174
config = read_cpucfg(CPUCFG_KVM_SIG);
175
if (!memcmp(&config, KVM_SIGNATURE, 4))
176
hypervisor_type = HYPERVISOR_KVM;
177
}
178
179
return hypervisor_type == HYPERVISOR_KVM;
180
}
181
182
unsigned int kvm_arch_para_features(void)
183
{
184
static unsigned int feature;
185
186
if (!kvm_para_available())
187
return 0;
188
189
if (!feature)
190
feature = read_cpucfg(CPUCFG_KVM_FEATURE);
191
192
return feature;
193
}
194
195
int __init pv_ipi_init(void)
196
{
197
if (!kvm_para_has_feature(KVM_FEATURE_IPI))
198
return 0;
199
200
#ifdef CONFIG_SMP
201
native_ops = mp_ops;
202
mp_ops.init_ipi = pv_init_ipi;
203
mp_ops.send_ipi_single = pv_send_ipi_single;
204
mp_ops.send_ipi_mask = pv_send_ipi_mask;
205
#endif
206
207
return 0;
208
}
209
210
static int pv_enable_steal_time(void)
211
{
212
int cpu = smp_processor_id();
213
unsigned long addr;
214
struct kvm_steal_time *st;
215
216
if (!has_steal_clock)
217
return -EPERM;
218
219
st = &per_cpu(steal_time, cpu);
220
addr = per_cpu_ptr_to_phys(st);
221
222
/* The whole structure kvm_steal_time should be in one page */
223
if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
224
pr_warn("Illegal PV steal time addr %lx\n", addr);
225
return -EFAULT;
226
}
227
228
addr |= KVM_STEAL_PHYS_VALID;
229
kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
230
231
return 0;
232
}
233
234
static void pv_disable_steal_time(void)
235
{
236
if (has_steal_clock)
237
kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
238
}
239
240
#ifdef CONFIG_SMP
241
static int pv_time_cpu_online(unsigned int cpu)
242
{
243
unsigned long flags;
244
245
local_irq_save(flags);
246
pv_enable_steal_time();
247
local_irq_restore(flags);
248
249
return 0;
250
}
251
252
static int pv_time_cpu_down_prepare(unsigned int cpu)
253
{
254
unsigned long flags;
255
256
local_irq_save(flags);
257
pv_disable_steal_time();
258
local_irq_restore(flags);
259
260
return 0;
261
}
262
#endif
263
264
static void pv_cpu_reboot(void *unused)
265
{
266
pv_disable_steal_time();
267
}
268
269
static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
270
{
271
on_each_cpu(pv_cpu_reboot, NULL, 1);
272
return NOTIFY_DONE;
273
}
274
275
static struct notifier_block pv_reboot_nb = {
276
.notifier_call = pv_reboot_notify,
277
};
278
279
int __init pv_time_init(void)
280
{
281
int r;
282
283
if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
284
return 0;
285
286
has_steal_clock = 1;
287
r = pv_enable_steal_time();
288
if (r < 0) {
289
has_steal_clock = 0;
290
return 0;
291
}
292
register_reboot_notifier(&pv_reboot_nb);
293
294
#ifdef CONFIG_SMP
295
r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
296
"loongarch/pv_time:online",
297
pv_time_cpu_online, pv_time_cpu_down_prepare);
298
if (r < 0) {
299
has_steal_clock = 0;
300
pr_err("Failed to install cpu hotplug callbacks\n");
301
return r;
302
}
303
#endif
304
305
static_call_update(pv_steal_clock, paravt_steal_clock);
306
307
static_key_slow_inc(&paravirt_steal_enabled);
308
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
309
if (steal_acc)
310
static_key_slow_inc(&paravirt_steal_rq_enabled);
311
#endif
312
313
pr_info("Using paravirt steal-time\n");
314
315
return 0;
316
}
317
318
int __init pv_spinlock_init(void)
319
{
320
if (!cpu_has_hypervisor)
321
return 0;
322
323
static_branch_enable(&virt_spin_lock_key);
324
325
return 0;
326
}
327
328