Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_timer.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4
*
5
* Authors:
6
* Atish Patra <[email protected]>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/err.h>
11
#include <linux/kvm_host.h>
12
#include <linux/uaccess.h>
13
#include <clocksource/timer-riscv.h>
14
#include <asm/delay.h>
15
#include <asm/kvm_nacl.h>
16
#include <asm/kvm_vcpu_timer.h>
17
18
static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
19
{
20
return get_cycles64() + gt->time_delta;
21
}
22
23
static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
24
struct kvm_guest_timer *gt,
25
struct kvm_vcpu_timer *t)
26
{
27
unsigned long flags;
28
u64 cycles_now, cycles_delta, delta_ns;
29
30
local_irq_save(flags);
31
cycles_now = kvm_riscv_current_cycles(gt);
32
if (cycles_now < cycles)
33
cycles_delta = cycles - cycles_now;
34
else
35
cycles_delta = 0;
36
delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
37
local_irq_restore(flags);
38
39
return delta_ns;
40
}
41
42
static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
43
{
44
u64 delta_ns;
45
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
46
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
47
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
48
49
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
50
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
51
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
52
return HRTIMER_RESTART;
53
}
54
55
t->next_set = false;
56
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
57
58
return HRTIMER_NORESTART;
59
}
60
61
static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
62
{
63
if (!t->init_done || !t->next_set)
64
return -EINVAL;
65
66
hrtimer_cancel(&t->hrt);
67
t->next_set = false;
68
69
return 0;
70
}
71
72
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
73
{
74
#if defined(CONFIG_32BIT)
75
ncsr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
76
ncsr_write(CSR_VSTIMECMPH, ncycles >> 32);
77
#else
78
ncsr_write(CSR_VSTIMECMP, ncycles);
79
#endif
80
return 0;
81
}
82
83
static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
84
{
85
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
86
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
87
u64 delta_ns;
88
89
if (!t->init_done)
90
return -EINVAL;
91
92
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
93
94
delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
95
t->next_cycles = ncycles;
96
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
97
t->next_set = true;
98
99
return 0;
100
}
101
102
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
103
{
104
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
105
106
return t->timer_next_event(vcpu, ncycles);
107
}
108
109
static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
110
{
111
u64 delta_ns;
112
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
113
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
114
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
115
116
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
117
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
118
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
119
return HRTIMER_RESTART;
120
}
121
122
t->next_set = false;
123
kvm_vcpu_kick(vcpu);
124
125
return HRTIMER_NORESTART;
126
}
127
128
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
129
{
130
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
131
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
132
133
if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
134
kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
135
return true;
136
else
137
return false;
138
}
139
140
static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
141
{
142
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
143
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
144
u64 delta_ns;
145
146
if (!t->init_done)
147
return;
148
149
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
150
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
151
t->next_set = true;
152
}
153
154
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
155
{
156
kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
157
}
158
159
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
160
const struct kvm_one_reg *reg)
161
{
162
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
163
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
164
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
165
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
166
KVM_REG_SIZE_MASK |
167
KVM_REG_RISCV_TIMER);
168
u64 reg_val;
169
170
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
171
return -EINVAL;
172
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
173
return -ENOENT;
174
175
switch (reg_num) {
176
case KVM_REG_RISCV_TIMER_REG(frequency):
177
reg_val = riscv_timebase;
178
break;
179
case KVM_REG_RISCV_TIMER_REG(time):
180
reg_val = kvm_riscv_current_cycles(gt);
181
break;
182
case KVM_REG_RISCV_TIMER_REG(compare):
183
reg_val = t->next_cycles;
184
break;
185
case KVM_REG_RISCV_TIMER_REG(state):
186
reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
187
KVM_RISCV_TIMER_STATE_OFF;
188
break;
189
default:
190
return -ENOENT;
191
}
192
193
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
194
return -EFAULT;
195
196
return 0;
197
}
198
199
int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
200
const struct kvm_one_reg *reg)
201
{
202
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
203
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
204
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
205
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
206
KVM_REG_SIZE_MASK |
207
KVM_REG_RISCV_TIMER);
208
u64 reg_val;
209
int ret = 0;
210
211
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
212
return -EINVAL;
213
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
214
return -ENOENT;
215
216
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
217
return -EFAULT;
218
219
switch (reg_num) {
220
case KVM_REG_RISCV_TIMER_REG(frequency):
221
if (reg_val != riscv_timebase)
222
return -EINVAL;
223
break;
224
case KVM_REG_RISCV_TIMER_REG(time):
225
gt->time_delta = reg_val - get_cycles64();
226
break;
227
case KVM_REG_RISCV_TIMER_REG(compare):
228
t->next_cycles = reg_val;
229
break;
230
case KVM_REG_RISCV_TIMER_REG(state):
231
if (reg_val == KVM_RISCV_TIMER_STATE_ON)
232
ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
233
else
234
ret = kvm_riscv_vcpu_timer_cancel(t);
235
break;
236
default:
237
ret = -ENOENT;
238
break;
239
}
240
241
return ret;
242
}
243
244
int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
245
{
246
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
247
248
if (t->init_done)
249
return -EINVAL;
250
251
t->init_done = true;
252
t->next_set = false;
253
254
/* Enable sstc for every vcpu if available in hardware */
255
if (riscv_isa_extension_available(NULL, SSTC)) {
256
t->sstc_enabled = true;
257
hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC,
258
HRTIMER_MODE_REL);
259
t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
260
} else {
261
t->sstc_enabled = false;
262
hrtimer_setup(&t->hrt, kvm_riscv_vcpu_hrtimer_expired, CLOCK_MONOTONIC,
263
HRTIMER_MODE_REL);
264
t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
265
}
266
267
return 0;
268
}
269
270
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
271
{
272
int ret;
273
274
ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
275
vcpu->arch.timer.init_done = false;
276
277
return ret;
278
}
279
280
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
281
{
282
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
283
284
t->next_cycles = -1ULL;
285
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
286
}
287
288
static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
289
{
290
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
291
292
#if defined(CONFIG_32BIT)
293
ncsr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
294
ncsr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
295
#else
296
ncsr_write(CSR_HTIMEDELTA, gt->time_delta);
297
#endif
298
}
299
300
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
301
{
302
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
303
304
kvm_riscv_vcpu_update_timedelta(vcpu);
305
306
if (!t->sstc_enabled)
307
return;
308
309
#if defined(CONFIG_32BIT)
310
ncsr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
311
ncsr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
312
#else
313
ncsr_write(CSR_VSTIMECMP, t->next_cycles);
314
#endif
315
316
/* timer should be enabled for the remaining operations */
317
if (unlikely(!t->init_done))
318
return;
319
320
kvm_riscv_vcpu_timer_unblocking(vcpu);
321
}
322
323
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
324
{
325
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
326
327
if (!t->sstc_enabled)
328
return;
329
330
#if defined(CONFIG_32BIT)
331
t->next_cycles = ncsr_read(CSR_VSTIMECMP);
332
t->next_cycles |= (u64)ncsr_read(CSR_VSTIMECMPH) << 32;
333
#else
334
t->next_cycles = ncsr_read(CSR_VSTIMECMP);
335
#endif
336
}
337
338
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
339
{
340
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
341
342
if (!t->sstc_enabled)
343
return;
344
345
/*
346
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
347
* upon every VM exit so no need to save here.
348
*
349
* If VS-timer expires when no VCPU running on a host CPU then
350
* WFI executed by such host CPU will be effective NOP resulting
351
* in no power savings. This is because as-per RISC-V Privileged
352
* specificaiton: "WFI is also required to resume execution for
353
* locally enabled interrupts pending at any privilege level,
354
* regardless of the global interrupt enable at each privilege
355
* level."
356
*
357
* To address the above issue, vstimecmp CSR must be set to -1UL
358
* over here when VCPU is scheduled-out or exits to user space.
359
*/
360
361
csr_write(CSR_VSTIMECMP, -1UL);
362
#if defined(CONFIG_32BIT)
363
csr_write(CSR_VSTIMECMPH, -1UL);
364
#endif
365
366
/* timer should be enabled for the remaining operations */
367
if (unlikely(!t->init_done))
368
return;
369
370
if (kvm_vcpu_is_blocking(vcpu))
371
kvm_riscv_vcpu_timer_blocking(vcpu);
372
}
373
374
void kvm_riscv_guest_timer_init(struct kvm *kvm)
375
{
376
struct kvm_guest_timer *gt = &kvm->arch.timer;
377
378
riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
379
gt->time_delta = -get_cycles64();
380
}
381
382