Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_sbi.c
51347 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
4
*
5
* Authors:
6
* Atish Patra <[email protected]>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/err.h>
11
#include <linux/kvm_host.h>
12
#include <asm/sbi.h>
13
#include <asm/kvm_vcpu_sbi.h>
14
15
#ifndef CONFIG_RISCV_SBI_V01
16
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17
.extid_start = -1UL,
18
.extid_end = -1UL,
19
.handler = NULL,
20
};
21
#endif
22
23
#ifndef CONFIG_RISCV_PMU_SBI
24
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25
.extid_start = -1UL,
26
.extid_end = -1UL,
27
.handler = NULL,
28
};
29
#endif
30
31
struct kvm_riscv_sbi_extension_entry {
32
enum KVM_RISCV_SBI_EXT_ID ext_idx;
33
const struct kvm_vcpu_sbi_extension *ext_ptr;
34
};
35
36
static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37
{
38
.ext_idx = KVM_RISCV_SBI_EXT_V01,
39
.ext_ptr = &vcpu_sbi_ext_v01,
40
},
41
{
42
.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43
.ext_ptr = &vcpu_sbi_ext_base,
44
},
45
{
46
.ext_idx = KVM_RISCV_SBI_EXT_TIME,
47
.ext_ptr = &vcpu_sbi_ext_time,
48
},
49
{
50
.ext_idx = KVM_RISCV_SBI_EXT_IPI,
51
.ext_ptr = &vcpu_sbi_ext_ipi,
52
},
53
{
54
.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55
.ext_ptr = &vcpu_sbi_ext_rfence,
56
},
57
{
58
.ext_idx = KVM_RISCV_SBI_EXT_SRST,
59
.ext_ptr = &vcpu_sbi_ext_srst,
60
},
61
{
62
.ext_idx = KVM_RISCV_SBI_EXT_HSM,
63
.ext_ptr = &vcpu_sbi_ext_hsm,
64
},
65
{
66
.ext_idx = KVM_RISCV_SBI_EXT_PMU,
67
.ext_ptr = &vcpu_sbi_ext_pmu,
68
},
69
{
70
.ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71
.ext_ptr = &vcpu_sbi_ext_dbcn,
72
},
73
{
74
.ext_idx = KVM_RISCV_SBI_EXT_SUSP,
75
.ext_ptr = &vcpu_sbi_ext_susp,
76
},
77
{
78
.ext_idx = KVM_RISCV_SBI_EXT_STA,
79
.ext_ptr = &vcpu_sbi_ext_sta,
80
},
81
{
82
.ext_idx = KVM_RISCV_SBI_EXT_FWFT,
83
.ext_ptr = &vcpu_sbi_ext_fwft,
84
},
85
{
86
.ext_idx = KVM_RISCV_SBI_EXT_MPXY,
87
.ext_ptr = &vcpu_sbi_ext_mpxy,
88
},
89
{
90
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
91
.ext_ptr = &vcpu_sbi_ext_experimental,
92
},
93
{
94
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
95
.ext_ptr = &vcpu_sbi_ext_vendor,
96
},
97
};
98
99
static const struct kvm_riscv_sbi_extension_entry *
100
riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
101
{
102
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
103
104
if (idx >= KVM_RISCV_SBI_EXT_MAX)
105
return NULL;
106
107
for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
108
if (sbi_ext[i].ext_idx == idx) {
109
sext = &sbi_ext[i];
110
break;
111
}
112
}
113
114
return sext;
115
}
116
117
static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
118
{
119
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
120
const struct kvm_riscv_sbi_extension_entry *sext;
121
122
sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
123
124
return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
125
}
126
127
int kvm_riscv_vcpu_sbi_forward_handler(struct kvm_vcpu *vcpu,
128
struct kvm_run *run,
129
struct kvm_vcpu_sbi_return *retdata)
130
{
131
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
132
133
vcpu->arch.sbi_context.return_handled = 0;
134
vcpu->stat.ecall_exit_stat++;
135
run->exit_reason = KVM_EXIT_RISCV_SBI;
136
run->riscv_sbi.extension_id = cp->a7;
137
run->riscv_sbi.function_id = cp->a6;
138
run->riscv_sbi.args[0] = cp->a0;
139
run->riscv_sbi.args[1] = cp->a1;
140
run->riscv_sbi.args[2] = cp->a2;
141
run->riscv_sbi.args[3] = cp->a3;
142
run->riscv_sbi.args[4] = cp->a4;
143
run->riscv_sbi.args[5] = cp->a5;
144
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
145
run->riscv_sbi.ret[1] = 0;
146
retdata->uexit = true;
147
return 0;
148
}
149
150
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
151
struct kvm_run *run,
152
u32 type, u64 reason)
153
{
154
unsigned long i;
155
struct kvm_vcpu *tmp;
156
157
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
158
spin_lock(&tmp->arch.mp_state_lock);
159
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
160
spin_unlock(&tmp->arch.mp_state_lock);
161
}
162
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
163
164
memset(&run->system_event, 0, sizeof(run->system_event));
165
run->system_event.type = type;
166
run->system_event.ndata = 1;
167
run->system_event.data[0] = reason;
168
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
169
}
170
171
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
172
unsigned long pc, unsigned long a1)
173
{
174
spin_lock(&vcpu->arch.reset_state.lock);
175
vcpu->arch.reset_state.pc = pc;
176
vcpu->arch.reset_state.a1 = a1;
177
spin_unlock(&vcpu->arch.reset_state.lock);
178
179
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
180
}
181
182
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
183
{
184
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
185
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
186
struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
187
188
cntx->a0 = vcpu->vcpu_id;
189
190
spin_lock(&vcpu->arch.reset_state.lock);
191
cntx->sepc = reset_state->pc;
192
cntx->a1 = reset_state->a1;
193
spin_unlock(&vcpu->arch.reset_state.lock);
194
195
cntx->sstatus &= ~SR_SIE;
196
csr->vsatp = 0;
197
}
198
199
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
200
{
201
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
202
203
/* Handle SBI return only once */
204
if (vcpu->arch.sbi_context.return_handled)
205
return 0;
206
vcpu->arch.sbi_context.return_handled = 1;
207
208
/* Update return values */
209
cp->a0 = run->riscv_sbi.ret[0];
210
cp->a1 = run->riscv_sbi.ret[1];
211
212
/* Move to next instruction */
213
vcpu->arch.guest_context.sepc += 4;
214
215
return 0;
216
}
217
218
static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
219
unsigned long reg_num,
220
unsigned long reg_val)
221
{
222
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
223
const struct kvm_riscv_sbi_extension_entry *sext;
224
225
if (reg_val != 1 && reg_val != 0)
226
return -EINVAL;
227
228
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
229
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
230
return -ENOENT;
231
232
scontext->ext_status[sext->ext_idx] = (reg_val) ?
233
KVM_RISCV_SBI_EXT_STATUS_ENABLED :
234
KVM_RISCV_SBI_EXT_STATUS_DISABLED;
235
236
return 0;
237
}
238
239
static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
240
unsigned long reg_num,
241
unsigned long *reg_val)
242
{
243
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
244
const struct kvm_riscv_sbi_extension_entry *sext;
245
246
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
247
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
248
return -ENOENT;
249
250
*reg_val = scontext->ext_status[sext->ext_idx] ==
251
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
252
253
return 0;
254
}
255
256
static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
257
unsigned long reg_num,
258
unsigned long reg_val, bool enable)
259
{
260
unsigned long i, ext_id;
261
262
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
263
return -ENOENT;
264
265
for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
266
ext_id = i + reg_num * BITS_PER_LONG;
267
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
268
break;
269
270
riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
271
}
272
273
return 0;
274
}
275
276
static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
277
unsigned long reg_num,
278
unsigned long *reg_val)
279
{
280
unsigned long i, ext_id, ext_val;
281
282
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
283
return -ENOENT;
284
285
for (i = 0; i < BITS_PER_LONG; i++) {
286
ext_id = i + reg_num * BITS_PER_LONG;
287
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
288
break;
289
290
ext_val = 0;
291
riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
292
if (ext_val)
293
*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
294
}
295
296
return 0;
297
}
298
299
int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices)
300
{
301
unsigned int n = 0;
302
303
for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
304
u64 size = IS_ENABLED(CONFIG_32BIT) ?
305
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
306
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
307
KVM_REG_RISCV_SBI_SINGLE | i;
308
309
if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
310
continue;
311
312
if (uindices) {
313
if (put_user(reg, uindices))
314
return -EFAULT;
315
uindices++;
316
}
317
318
n++;
319
}
320
321
return n;
322
}
323
324
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
325
const struct kvm_one_reg *reg)
326
{
327
unsigned long __user *uaddr =
328
(unsigned long __user *)(unsigned long)reg->addr;
329
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
330
KVM_REG_SIZE_MASK |
331
KVM_REG_RISCV_SBI_EXT);
332
unsigned long reg_val, reg_subtype;
333
334
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
335
return -EINVAL;
336
337
if (vcpu->arch.ran_atleast_once)
338
return -EBUSY;
339
340
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
341
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
342
343
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
344
return -EFAULT;
345
346
switch (reg_subtype) {
347
case KVM_REG_RISCV_SBI_SINGLE:
348
return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
349
case KVM_REG_RISCV_SBI_MULTI_EN:
350
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
351
case KVM_REG_RISCV_SBI_MULTI_DIS:
352
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
353
default:
354
return -ENOENT;
355
}
356
357
return 0;
358
}
359
360
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
361
const struct kvm_one_reg *reg)
362
{
363
int rc;
364
unsigned long __user *uaddr =
365
(unsigned long __user *)(unsigned long)reg->addr;
366
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
367
KVM_REG_SIZE_MASK |
368
KVM_REG_RISCV_SBI_EXT);
369
unsigned long reg_val, reg_subtype;
370
371
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
372
return -EINVAL;
373
374
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
375
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
376
377
reg_val = 0;
378
switch (reg_subtype) {
379
case KVM_REG_RISCV_SBI_SINGLE:
380
rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
381
break;
382
case KVM_REG_RISCV_SBI_MULTI_EN:
383
case KVM_REG_RISCV_SBI_MULTI_DIS:
384
rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
385
if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
386
reg_val = ~reg_val;
387
break;
388
default:
389
rc = -ENOENT;
390
}
391
if (rc)
392
return rc;
393
394
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
395
return -EFAULT;
396
397
return 0;
398
}
399
400
int kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu *vcpu, u64 __user *uindices)
401
{
402
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
403
const struct kvm_riscv_sbi_extension_entry *entry;
404
const struct kvm_vcpu_sbi_extension *ext;
405
unsigned long state_reg_count;
406
int i, j, rc, count = 0;
407
u64 reg;
408
409
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
410
entry = &sbi_ext[i];
411
ext = entry->ext_ptr;
412
413
if (!ext->get_state_reg_count ||
414
scontext->ext_status[entry->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED)
415
continue;
416
417
state_reg_count = ext->get_state_reg_count(vcpu);
418
if (!uindices)
419
goto skip_put_user;
420
421
for (j = 0; j < state_reg_count; j++) {
422
if (ext->get_state_reg_id) {
423
rc = ext->get_state_reg_id(vcpu, j, &reg);
424
if (rc)
425
return rc;
426
} else {
427
reg = KVM_REG_RISCV |
428
(IS_ENABLED(CONFIG_32BIT) ?
429
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
430
KVM_REG_RISCV_SBI_STATE |
431
ext->state_reg_subtype | j;
432
}
433
434
if (put_user(reg, uindices))
435
return -EFAULT;
436
uindices++;
437
}
438
439
skip_put_user:
440
count += state_reg_count;
441
}
442
443
return count;
444
}
445
446
static const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext_withstate(struct kvm_vcpu *vcpu,
447
unsigned long subtype)
448
{
449
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
450
const struct kvm_riscv_sbi_extension_entry *entry;
451
const struct kvm_vcpu_sbi_extension *ext;
452
int i;
453
454
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
455
entry = &sbi_ext[i];
456
ext = entry->ext_ptr;
457
458
if (ext->get_state_reg_count &&
459
ext->state_reg_subtype == subtype &&
460
scontext->ext_status[entry->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_ENABLED)
461
return ext;
462
}
463
464
return NULL;
465
}
466
467
int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
468
{
469
unsigned long __user *uaddr =
470
(unsigned long __user *)(unsigned long)reg->addr;
471
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
472
KVM_REG_SIZE_MASK |
473
KVM_REG_RISCV_SBI_STATE);
474
const struct kvm_vcpu_sbi_extension *ext;
475
unsigned long reg_subtype;
476
void *reg_val;
477
u64 data64;
478
u32 data32;
479
u16 data16;
480
u8 data8;
481
482
switch (KVM_REG_SIZE(reg->id)) {
483
case 1:
484
reg_val = &data8;
485
break;
486
case 2:
487
reg_val = &data16;
488
break;
489
case 4:
490
reg_val = &data32;
491
break;
492
case 8:
493
reg_val = &data64;
494
break;
495
default:
496
return -EINVAL;
497
}
498
499
if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
500
return -EFAULT;
501
502
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
503
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
504
505
ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
506
if (!ext || !ext->set_state_reg)
507
return -EINVAL;
508
509
return ext->set_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
510
}
511
512
int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
513
{
514
unsigned long __user *uaddr =
515
(unsigned long __user *)(unsigned long)reg->addr;
516
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
517
KVM_REG_SIZE_MASK |
518
KVM_REG_RISCV_SBI_STATE);
519
const struct kvm_vcpu_sbi_extension *ext;
520
unsigned long reg_subtype;
521
void *reg_val;
522
u64 data64;
523
u32 data32;
524
u16 data16;
525
u8 data8;
526
int ret;
527
528
switch (KVM_REG_SIZE(reg->id)) {
529
case 1:
530
reg_val = &data8;
531
break;
532
case 2:
533
reg_val = &data16;
534
break;
535
case 4:
536
reg_val = &data32;
537
break;
538
case 8:
539
reg_val = &data64;
540
break;
541
default:
542
return -EINVAL;
543
}
544
545
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
546
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
547
548
ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
549
if (!ext || !ext->get_state_reg)
550
return -EINVAL;
551
552
ret = ext->get_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
553
if (ret)
554
return ret;
555
556
if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
557
return -EFAULT;
558
559
return 0;
560
}
561
562
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
563
struct kvm_vcpu *vcpu, unsigned long extid)
564
{
565
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
566
const struct kvm_riscv_sbi_extension_entry *entry;
567
const struct kvm_vcpu_sbi_extension *ext;
568
int i;
569
570
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
571
entry = &sbi_ext[i];
572
ext = entry->ext_ptr;
573
574
if (ext->extid_start <= extid && ext->extid_end >= extid) {
575
if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
576
scontext->ext_status[entry->ext_idx] ==
577
KVM_RISCV_SBI_EXT_STATUS_ENABLED)
578
return ext;
579
580
return NULL;
581
}
582
}
583
584
return NULL;
585
}
586
587
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
588
{
589
int ret = 1;
590
bool next_sepc = true;
591
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
592
const struct kvm_vcpu_sbi_extension *sbi_ext;
593
struct kvm_cpu_trap utrap = {0};
594
struct kvm_vcpu_sbi_return sbi_ret = {
595
.out_val = 0,
596
.err_val = 0,
597
.utrap = &utrap,
598
};
599
bool ext_is_v01 = false;
600
601
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
602
if (sbi_ext && sbi_ext->handler) {
603
#ifdef CONFIG_RISCV_SBI_V01
604
if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
605
cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
606
ext_is_v01 = true;
607
#endif
608
ret = sbi_ext->handler(vcpu, run, &sbi_ret);
609
} else {
610
/* Return error for unsupported SBI calls */
611
cp->a0 = SBI_ERR_NOT_SUPPORTED;
612
goto ecall_done;
613
}
614
615
/*
616
* When the SBI extension returns a Linux error code, it exits the ioctl
617
* loop and forwards the error to userspace.
618
*/
619
if (ret < 0) {
620
next_sepc = false;
621
goto ecall_done;
622
}
623
624
/* Handle special error cases i.e trap, exit or userspace forward */
625
if (sbi_ret.utrap->scause) {
626
/* No need to increment sepc or exit ioctl loop */
627
ret = 1;
628
sbi_ret.utrap->sepc = cp->sepc;
629
kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
630
next_sepc = false;
631
goto ecall_done;
632
}
633
634
/* Exit ioctl loop or Propagate the error code the guest */
635
if (sbi_ret.uexit) {
636
next_sepc = false;
637
ret = 0;
638
} else {
639
cp->a0 = sbi_ret.err_val;
640
ret = 1;
641
}
642
ecall_done:
643
if (next_sepc)
644
cp->sepc += 4;
645
/* a1 should only be updated when we continue the ioctl loop */
646
if (!ext_is_v01 && ret == 1)
647
cp->a1 = sbi_ret.out_val;
648
649
return ret;
650
}
651
652
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
653
{
654
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
655
const struct kvm_riscv_sbi_extension_entry *entry;
656
const struct kvm_vcpu_sbi_extension *ext;
657
int idx, i;
658
659
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
660
entry = &sbi_ext[i];
661
ext = entry->ext_ptr;
662
idx = entry->ext_idx;
663
664
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
665
continue;
666
667
if (ext->probe && !ext->probe(vcpu)) {
668
scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
669
continue;
670
}
671
672
scontext->ext_status[idx] = ext->default_disabled ?
673
KVM_RISCV_SBI_EXT_STATUS_DISABLED :
674
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
675
676
if (ext->init && ext->init(vcpu) != 0)
677
scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
678
}
679
}
680
681
void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu)
682
{
683
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
684
const struct kvm_riscv_sbi_extension_entry *entry;
685
const struct kvm_vcpu_sbi_extension *ext;
686
int idx, i;
687
688
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
689
entry = &sbi_ext[i];
690
ext = entry->ext_ptr;
691
idx = entry->ext_idx;
692
693
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
694
continue;
695
696
if (scontext->ext_status[idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE ||
697
!ext->deinit)
698
continue;
699
700
ext->deinit(vcpu);
701
}
702
}
703
704
void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu)
705
{
706
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
707
const struct kvm_riscv_sbi_extension_entry *entry;
708
const struct kvm_vcpu_sbi_extension *ext;
709
int idx, i;
710
711
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
712
entry = &sbi_ext[i];
713
ext = entry->ext_ptr;
714
idx = entry->ext_idx;
715
716
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
717
continue;
718
719
if (scontext->ext_status[idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED ||
720
!ext->reset)
721
continue;
722
723
ext->reset(vcpu);
724
}
725
}
726
727