Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/kvm/book3s_pr_papr.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2011. Freescale Inc. All rights reserved.
4
*
5
* Authors:
6
* Alexander Graf <[email protected]>
7
* Paul Mackerras <[email protected]>
8
*
9
* Description:
10
*
11
* Hypercall handling for running PAPR guests in PR KVM on Book 3S
12
* processors.
13
*/
14
15
#include <linux/anon_inodes.h>
16
17
#include <linux/uaccess.h>
18
#include <asm/kvm_ppc.h>
19
#include <asm/kvm_book3s.h>
20
21
#define HPTE_SIZE 16 /* bytes per HPT entry */
22
23
static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
24
{
25
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
26
unsigned long pteg_addr;
27
28
pte_index <<= 4;
29
pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
30
pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
31
pteg_addr |= pte_index;
32
33
return pteg_addr;
34
}
35
36
static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
37
{
38
long flags = kvmppc_get_gpr(vcpu, 4);
39
long pte_index = kvmppc_get_gpr(vcpu, 5);
40
__be64 pteg[2 * 8];
41
__be64 *hpte;
42
unsigned long pteg_addr, i;
43
long int ret;
44
45
i = pte_index & 7;
46
pte_index &= ~7UL;
47
pteg_addr = get_pteg_addr(vcpu, pte_index);
48
49
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
50
ret = H_FUNCTION;
51
if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
52
goto done;
53
hpte = pteg;
54
55
ret = H_PTEG_FULL;
56
if (likely((flags & H_EXACT) == 0)) {
57
for (i = 0; ; ++i) {
58
if (i == 8)
59
goto done;
60
if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0)
61
break;
62
hpte += 2;
63
}
64
} else {
65
hpte += i * 2;
66
if (*hpte & HPTE_V_VALID)
67
goto done;
68
}
69
70
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
71
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
72
pteg_addr += i * HPTE_SIZE;
73
ret = H_FUNCTION;
74
if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
75
goto done;
76
kvmppc_set_gpr(vcpu, 4, pte_index | i);
77
ret = H_SUCCESS;
78
79
done:
80
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
81
kvmppc_set_gpr(vcpu, 3, ret);
82
83
return EMULATE_DONE;
84
}
85
86
static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
87
{
88
unsigned long flags= kvmppc_get_gpr(vcpu, 4);
89
unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
90
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
91
unsigned long v = 0, pteg, rb;
92
unsigned long pte[2];
93
long int ret;
94
95
pteg = get_pteg_addr(vcpu, pte_index);
96
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
97
ret = H_FUNCTION;
98
if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
99
goto done;
100
pte[0] = be64_to_cpu((__force __be64)pte[0]);
101
pte[1] = be64_to_cpu((__force __be64)pte[1]);
102
103
ret = H_NOT_FOUND;
104
if ((pte[0] & HPTE_V_VALID) == 0 ||
105
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
106
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
107
goto done;
108
109
ret = H_FUNCTION;
110
if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
111
goto done;
112
113
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
114
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
115
116
ret = H_SUCCESS;
117
kvmppc_set_gpr(vcpu, 4, pte[0]);
118
kvmppc_set_gpr(vcpu, 5, pte[1]);
119
120
done:
121
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
122
kvmppc_set_gpr(vcpu, 3, ret);
123
124
return EMULATE_DONE;
125
}
126
127
/* Request defs for kvmppc_h_pr_bulk_remove() */
128
#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
129
#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
130
#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
131
#define H_BULK_REMOVE_END 0xc000000000000000ULL
132
#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
133
#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
134
#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
135
#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
136
#define H_BULK_REMOVE_HW 0x3000000000000000ULL
137
#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
138
#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
139
#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
140
#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
141
#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
142
#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
143
#define H_BULK_REMOVE_MAX_BATCH 4
144
145
static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
146
{
147
int i;
148
int paramnr = 4;
149
int ret = H_SUCCESS;
150
151
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
152
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
153
unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
154
unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
155
unsigned long pteg, rb, flags;
156
unsigned long pte[2];
157
unsigned long v = 0;
158
159
if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
160
break; /* Exit success */
161
} else if ((tsh & H_BULK_REMOVE_TYPE) !=
162
H_BULK_REMOVE_REQUEST) {
163
ret = H_PARAMETER;
164
break; /* Exit fail */
165
}
166
167
tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
168
tsh |= H_BULK_REMOVE_RESPONSE;
169
170
if ((tsh & H_BULK_REMOVE_ANDCOND) &&
171
(tsh & H_BULK_REMOVE_AVPN)) {
172
tsh |= H_BULK_REMOVE_PARM;
173
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
174
ret = H_PARAMETER;
175
break; /* Exit fail */
176
}
177
178
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
179
if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
180
ret = H_FUNCTION;
181
break;
182
}
183
pte[0] = be64_to_cpu((__force __be64)pte[0]);
184
pte[1] = be64_to_cpu((__force __be64)pte[1]);
185
186
/* tsl = AVPN */
187
flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
188
189
if ((pte[0] & HPTE_V_VALID) == 0 ||
190
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
191
((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
192
tsh |= H_BULK_REMOVE_NOT_FOUND;
193
} else {
194
/* Splat the pteg in (userland) hpt */
195
if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
196
ret = H_FUNCTION;
197
break;
198
}
199
200
rb = compute_tlbie_rb(pte[0], pte[1],
201
tsh & H_BULK_REMOVE_PTEX);
202
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
203
tsh |= H_BULK_REMOVE_SUCCESS;
204
tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
205
}
206
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
207
}
208
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
209
kvmppc_set_gpr(vcpu, 3, ret);
210
211
return EMULATE_DONE;
212
}
213
214
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
215
{
216
unsigned long flags = kvmppc_get_gpr(vcpu, 4);
217
unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
218
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
219
unsigned long rb, pteg, r, v;
220
unsigned long pte[2];
221
long int ret;
222
223
pteg = get_pteg_addr(vcpu, pte_index);
224
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
225
ret = H_FUNCTION;
226
if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
227
goto done;
228
pte[0] = be64_to_cpu((__force __be64)pte[0]);
229
pte[1] = be64_to_cpu((__force __be64)pte[1]);
230
231
ret = H_NOT_FOUND;
232
if ((pte[0] & HPTE_V_VALID) == 0 ||
233
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
234
goto done;
235
236
v = pte[0];
237
r = pte[1];
238
r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
239
HPTE_R_KEY_LO);
240
r |= (flags << 55) & HPTE_R_PP0;
241
r |= (flags << 48) & HPTE_R_KEY_HI;
242
r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
243
244
pte[1] = r;
245
246
rb = compute_tlbie_rb(v, r, pte_index);
247
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
248
pte[0] = (__force u64)cpu_to_be64(pte[0]);
249
pte[1] = (__force u64)cpu_to_be64(pte[1]);
250
ret = H_FUNCTION;
251
if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
252
goto done;
253
ret = H_SUCCESS;
254
255
done:
256
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
257
kvmppc_set_gpr(vcpu, 3, ret);
258
259
return EMULATE_DONE;
260
}
261
262
static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
263
{
264
long rc;
265
266
rc = kvmppc_h_logical_ci_load(vcpu);
267
if (rc == H_TOO_HARD)
268
return EMULATE_FAIL;
269
kvmppc_set_gpr(vcpu, 3, rc);
270
return EMULATE_DONE;
271
}
272
273
static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
274
{
275
long rc;
276
277
rc = kvmppc_h_logical_ci_store(vcpu);
278
if (rc == H_TOO_HARD)
279
return EMULATE_FAIL;
280
kvmppc_set_gpr(vcpu, 3, rc);
281
return EMULATE_DONE;
282
}
283
284
static int kvmppc_h_pr_set_mode(struct kvm_vcpu *vcpu)
285
{
286
unsigned long mflags = kvmppc_get_gpr(vcpu, 4);
287
unsigned long resource = kvmppc_get_gpr(vcpu, 5);
288
289
if (resource == H_SET_MODE_RESOURCE_ADDR_TRANS_MODE) {
290
/* KVM PR does not provide AIL!=0 to guests */
291
if (mflags == 0)
292
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
293
else
294
kvmppc_set_gpr(vcpu, 3, H_UNSUPPORTED_FLAG_START - 63);
295
return EMULATE_DONE;
296
}
297
return EMULATE_FAIL;
298
}
299
300
#ifdef CONFIG_SPAPR_TCE_IOMMU
301
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
302
{
303
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
304
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
305
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
306
long rc;
307
308
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
309
if (rc == H_TOO_HARD)
310
return EMULATE_FAIL;
311
kvmppc_set_gpr(vcpu, 3, rc);
312
return EMULATE_DONE;
313
}
314
315
static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
316
{
317
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
318
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
319
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
320
unsigned long npages = kvmppc_get_gpr(vcpu, 7);
321
long rc;
322
323
rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
324
tce, npages);
325
if (rc == H_TOO_HARD)
326
return EMULATE_FAIL;
327
kvmppc_set_gpr(vcpu, 3, rc);
328
return EMULATE_DONE;
329
}
330
331
static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
332
{
333
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
334
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
335
unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
336
unsigned long npages = kvmppc_get_gpr(vcpu, 7);
337
long rc;
338
339
rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
340
if (rc == H_TOO_HARD)
341
return EMULATE_FAIL;
342
kvmppc_set_gpr(vcpu, 3, rc);
343
return EMULATE_DONE;
344
}
345
346
#else /* CONFIG_SPAPR_TCE_IOMMU */
347
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
348
{
349
return EMULATE_FAIL;
350
}
351
352
static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
353
{
354
return EMULATE_FAIL;
355
}
356
357
static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
358
{
359
return EMULATE_FAIL;
360
}
361
#endif /* CONFIG_SPAPR_TCE_IOMMU */
362
363
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
364
{
365
long rc = kvmppc_xics_hcall(vcpu, cmd);
366
kvmppc_set_gpr(vcpu, 3, rc);
367
return EMULATE_DONE;
368
}
369
370
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
371
{
372
int rc, idx;
373
374
if (cmd <= MAX_HCALL_OPCODE &&
375
!test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
376
return EMULATE_FAIL;
377
378
switch (cmd) {
379
case H_ENTER:
380
return kvmppc_h_pr_enter(vcpu);
381
case H_REMOVE:
382
return kvmppc_h_pr_remove(vcpu);
383
case H_PROTECT:
384
return kvmppc_h_pr_protect(vcpu);
385
case H_BULK_REMOVE:
386
return kvmppc_h_pr_bulk_remove(vcpu);
387
case H_PUT_TCE:
388
return kvmppc_h_pr_put_tce(vcpu);
389
case H_PUT_TCE_INDIRECT:
390
return kvmppc_h_pr_put_tce_indirect(vcpu);
391
case H_STUFF_TCE:
392
return kvmppc_h_pr_stuff_tce(vcpu);
393
case H_CEDE:
394
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
395
kvm_vcpu_halt(vcpu);
396
vcpu->stat.generic.halt_wakeup++;
397
return EMULATE_DONE;
398
case H_LOGICAL_CI_LOAD:
399
return kvmppc_h_pr_logical_ci_load(vcpu);
400
case H_LOGICAL_CI_STORE:
401
return kvmppc_h_pr_logical_ci_store(vcpu);
402
case H_SET_MODE:
403
return kvmppc_h_pr_set_mode(vcpu);
404
case H_XIRR:
405
case H_CPPR:
406
case H_EOI:
407
case H_IPI:
408
case H_IPOLL:
409
case H_XIRR_X:
410
if (kvmppc_xics_enabled(vcpu))
411
return kvmppc_h_pr_xics_hcall(vcpu, cmd);
412
break;
413
case H_RTAS:
414
if (list_empty(&vcpu->kvm->arch.rtas_tokens))
415
break;
416
idx = srcu_read_lock(&vcpu->kvm->srcu);
417
rc = kvmppc_rtas_hcall(vcpu);
418
srcu_read_unlock(&vcpu->kvm->srcu, idx);
419
if (rc)
420
break;
421
kvmppc_set_gpr(vcpu, 3, 0);
422
return EMULATE_DONE;
423
}
424
425
return EMULATE_FAIL;
426
}
427
428
int kvmppc_hcall_impl_pr(unsigned long cmd)
429
{
430
switch (cmd) {
431
case H_ENTER:
432
case H_REMOVE:
433
case H_PROTECT:
434
case H_BULK_REMOVE:
435
#ifdef CONFIG_SPAPR_TCE_IOMMU
436
case H_GET_TCE:
437
case H_PUT_TCE:
438
case H_PUT_TCE_INDIRECT:
439
case H_STUFF_TCE:
440
#endif
441
case H_CEDE:
442
case H_LOGICAL_CI_LOAD:
443
case H_LOGICAL_CI_STORE:
444
case H_SET_MODE:
445
#ifdef CONFIG_KVM_XICS
446
case H_XIRR:
447
case H_CPPR:
448
case H_EOI:
449
case H_IPI:
450
case H_IPOLL:
451
case H_XIRR_X:
452
#endif
453
return 1;
454
}
455
return 0;
456
}
457
458
/*
459
* List of hcall numbers to enable by default.
460
* For compatibility with old userspace, we enable by default
461
* all hcalls that were implemented before the hcall-enabling
462
* facility was added. Note this list should not include H_RTAS.
463
*/
464
static unsigned int default_hcall_list[] = {
465
H_ENTER,
466
H_REMOVE,
467
H_PROTECT,
468
H_BULK_REMOVE,
469
#ifdef CONFIG_SPAPR_TCE_IOMMU
470
H_GET_TCE,
471
H_PUT_TCE,
472
#endif
473
H_CEDE,
474
H_SET_MODE,
475
#ifdef CONFIG_KVM_XICS
476
H_XIRR,
477
H_CPPR,
478
H_EOI,
479
H_IPI,
480
H_IPOLL,
481
H_XIRR_X,
482
#endif
483
0
484
};
485
486
void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
487
{
488
int i;
489
unsigned int hcall;
490
491
for (i = 0; default_hcall_list[i]; ++i) {
492
hcall = default_hcall_list[i];
493
WARN_ON(!kvmppc_hcall_impl_pr(hcall));
494
__set_bit(hcall / 4, kvm->arch.enabled_hcalls);
495
}
496
}
497
498