Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_timer.c
49621 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4
*
5
* Authors:
6
* Atish Patra <[email protected]>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/err.h>
11
#include <linux/kvm_host.h>
12
#include <linux/uaccess.h>
13
#include <clocksource/timer-riscv.h>
14
#include <asm/delay.h>
15
#include <asm/kvm_nacl.h>
16
#include <asm/kvm_vcpu_timer.h>
17
18
static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
19
{
20
return get_cycles64() + gt->time_delta;
21
}
22
23
static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
24
struct kvm_guest_timer *gt,
25
struct kvm_vcpu_timer *t)
26
{
27
unsigned long flags;
28
u64 cycles_now, cycles_delta, delta_ns;
29
30
local_irq_save(flags);
31
cycles_now = kvm_riscv_current_cycles(gt);
32
if (cycles_now < cycles)
33
cycles_delta = cycles - cycles_now;
34
else
35
cycles_delta = 0;
36
delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
37
local_irq_restore(flags);
38
39
return delta_ns;
40
}
41
42
static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
43
{
44
u64 delta_ns;
45
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
46
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
47
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
48
49
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
50
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
51
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
52
return HRTIMER_RESTART;
53
}
54
55
t->next_set = false;
56
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
57
58
return HRTIMER_NORESTART;
59
}
60
61
static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
62
{
63
if (!t->init_done || !t->next_set)
64
return -EINVAL;
65
66
hrtimer_cancel(&t->hrt);
67
t->next_set = false;
68
69
return 0;
70
}
71
72
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
73
{
74
#if defined(CONFIG_32BIT)
75
ncsr_write(CSR_VSTIMECMP, ULONG_MAX);
76
ncsr_write(CSR_VSTIMECMPH, ncycles >> 32);
77
ncsr_write(CSR_VSTIMECMP, (u32)ncycles);
78
#else
79
ncsr_write(CSR_VSTIMECMP, ncycles);
80
#endif
81
return 0;
82
}
83
84
static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
85
{
86
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
87
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
88
u64 delta_ns;
89
90
if (!t->init_done)
91
return -EINVAL;
92
93
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
94
95
delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
96
t->next_cycles = ncycles;
97
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
98
t->next_set = true;
99
100
return 0;
101
}
102
103
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
104
{
105
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
106
107
return t->timer_next_event(vcpu, ncycles);
108
}
109
110
static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
111
{
112
u64 delta_ns;
113
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
114
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
115
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
116
117
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
118
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
119
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
120
return HRTIMER_RESTART;
121
}
122
123
t->next_set = false;
124
kvm_vcpu_kick(vcpu);
125
126
return HRTIMER_NORESTART;
127
}
128
129
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
130
{
131
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
132
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
133
134
if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
135
kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
136
return true;
137
else
138
return false;
139
}
140
141
static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
142
{
143
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
144
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
145
u64 delta_ns;
146
147
if (!t->init_done)
148
return;
149
150
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
151
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
152
t->next_set = true;
153
}
154
155
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
156
{
157
kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
158
}
159
160
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
161
const struct kvm_one_reg *reg)
162
{
163
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
164
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
165
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
166
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
167
KVM_REG_SIZE_MASK |
168
KVM_REG_RISCV_TIMER);
169
u64 reg_val;
170
171
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
172
return -EINVAL;
173
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
174
return -ENOENT;
175
176
switch (reg_num) {
177
case KVM_REG_RISCV_TIMER_REG(frequency):
178
reg_val = riscv_timebase;
179
break;
180
case KVM_REG_RISCV_TIMER_REG(time):
181
reg_val = kvm_riscv_current_cycles(gt);
182
break;
183
case KVM_REG_RISCV_TIMER_REG(compare):
184
reg_val = t->next_cycles;
185
break;
186
case KVM_REG_RISCV_TIMER_REG(state):
187
reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
188
KVM_RISCV_TIMER_STATE_OFF;
189
break;
190
default:
191
return -ENOENT;
192
}
193
194
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
195
return -EFAULT;
196
197
return 0;
198
}
199
200
int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
201
const struct kvm_one_reg *reg)
202
{
203
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
204
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
205
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
206
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
207
KVM_REG_SIZE_MASK |
208
KVM_REG_RISCV_TIMER);
209
u64 reg_val;
210
int ret = 0;
211
212
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
213
return -EINVAL;
214
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
215
return -ENOENT;
216
217
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
218
return -EFAULT;
219
220
switch (reg_num) {
221
case KVM_REG_RISCV_TIMER_REG(frequency):
222
if (reg_val != riscv_timebase)
223
return -EINVAL;
224
break;
225
case KVM_REG_RISCV_TIMER_REG(time):
226
gt->time_delta = reg_val - get_cycles64();
227
break;
228
case KVM_REG_RISCV_TIMER_REG(compare):
229
t->next_cycles = reg_val;
230
break;
231
case KVM_REG_RISCV_TIMER_REG(state):
232
if (reg_val == KVM_RISCV_TIMER_STATE_ON)
233
ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
234
else
235
ret = kvm_riscv_vcpu_timer_cancel(t);
236
break;
237
default:
238
ret = -ENOENT;
239
break;
240
}
241
242
return ret;
243
}
244
245
int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
246
{
247
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
248
249
if (t->init_done)
250
return -EINVAL;
251
252
t->init_done = true;
253
t->next_set = false;
254
255
/* Enable sstc for every vcpu if available in hardware */
256
if (riscv_isa_extension_available(NULL, SSTC)) {
257
t->sstc_enabled = true;
258
hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC,
259
HRTIMER_MODE_REL);
260
t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
261
} else {
262
t->sstc_enabled = false;
263
hrtimer_setup(&t->hrt, kvm_riscv_vcpu_hrtimer_expired, CLOCK_MONOTONIC,
264
HRTIMER_MODE_REL);
265
t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
266
}
267
268
return 0;
269
}
270
271
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
272
{
273
int ret;
274
275
ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
276
vcpu->arch.timer.init_done = false;
277
278
return ret;
279
}
280
281
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
282
{
283
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
284
285
t->next_cycles = -1ULL;
286
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
287
}
288
289
static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
290
{
291
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
292
293
#if defined(CONFIG_32BIT)
294
ncsr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
295
ncsr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
296
#else
297
ncsr_write(CSR_HTIMEDELTA, gt->time_delta);
298
#endif
299
}
300
301
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
302
{
303
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
304
305
kvm_riscv_vcpu_update_timedelta(vcpu);
306
307
if (!t->sstc_enabled)
308
return;
309
310
#if defined(CONFIG_32BIT)
311
ncsr_write(CSR_VSTIMECMP, ULONG_MAX);
312
ncsr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
313
ncsr_write(CSR_VSTIMECMP, (u32)(t->next_cycles));
314
#else
315
ncsr_write(CSR_VSTIMECMP, t->next_cycles);
316
#endif
317
318
/* timer should be enabled for the remaining operations */
319
if (unlikely(!t->init_done))
320
return;
321
322
kvm_riscv_vcpu_timer_unblocking(vcpu);
323
}
324
325
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
326
{
327
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
328
329
if (!t->sstc_enabled)
330
return;
331
332
#if defined(CONFIG_32BIT)
333
t->next_cycles = ncsr_read(CSR_VSTIMECMP);
334
t->next_cycles |= (u64)ncsr_read(CSR_VSTIMECMPH) << 32;
335
#else
336
t->next_cycles = ncsr_read(CSR_VSTIMECMP);
337
#endif
338
}
339
340
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
341
{
342
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
343
344
if (!t->sstc_enabled)
345
return;
346
347
/*
348
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
349
* upon every VM exit so no need to save here.
350
*
351
* If VS-timer expires when no VCPU running on a host CPU then
352
* WFI executed by such host CPU will be effective NOP resulting
353
* in no power savings. This is because as-per RISC-V Privileged
354
* specificaiton: "WFI is also required to resume execution for
355
* locally enabled interrupts pending at any privilege level,
356
* regardless of the global interrupt enable at each privilege
357
* level."
358
*
359
* To address the above issue, vstimecmp CSR must be set to -1UL
360
* over here when VCPU is scheduled-out or exits to user space.
361
*/
362
363
csr_write(CSR_VSTIMECMP, -1UL);
364
#if defined(CONFIG_32BIT)
365
csr_write(CSR_VSTIMECMPH, -1UL);
366
#endif
367
368
/* timer should be enabled for the remaining operations */
369
if (unlikely(!t->init_done))
370
return;
371
372
if (kvm_vcpu_is_blocking(vcpu))
373
kvm_riscv_vcpu_timer_blocking(vcpu);
374
}
375
376
void kvm_riscv_guest_timer_init(struct kvm *kvm)
377
{
378
struct kvm_guest_timer *gt = &kvm->arch.timer;
379
380
riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
381
gt->time_delta = -get_cycles64();
382
}
383
384