Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/mmu.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4
*
5
* Authors:
6
* Anup Patel <[email protected]>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/hugetlb.h>
11
#include <linux/module.h>
12
#include <linux/uaccess.h>
13
#include <linux/vmalloc.h>
14
#include <linux/kvm_host.h>
15
#include <linux/sched/signal.h>
16
#include <asm/kvm_mmu.h>
17
#include <asm/kvm_nacl.h>
18
19
static void mmu_wp_memory_region(struct kvm *kvm, int slot)
20
{
21
struct kvm_memslots *slots = kvm_memslots(kvm);
22
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
23
phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
24
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
25
struct kvm_gstage gstage;
26
27
gstage.kvm = kvm;
28
gstage.flags = 0;
29
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
30
gstage.pgd = kvm->arch.pgd;
31
32
spin_lock(&kvm->mmu_lock);
33
kvm_riscv_gstage_wp_range(&gstage, start, end);
34
spin_unlock(&kvm->mmu_lock);
35
kvm_flush_remote_tlbs_memslot(kvm, memslot);
36
}
37
38
int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
39
unsigned long size, bool writable, bool in_atomic)
40
{
41
int ret = 0;
42
pgprot_t prot;
43
unsigned long pfn;
44
phys_addr_t addr, end;
45
struct kvm_mmu_memory_cache pcache = {
46
.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
47
.gfp_zero = __GFP_ZERO,
48
};
49
struct kvm_gstage_mapping map;
50
struct kvm_gstage gstage;
51
52
gstage.kvm = kvm;
53
gstage.flags = 0;
54
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
55
gstage.pgd = kvm->arch.pgd;
56
57
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
58
pfn = __phys_to_pfn(hpa);
59
prot = pgprot_noncached(PAGE_WRITE);
60
61
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
62
map.addr = addr;
63
map.pte = pfn_pte(pfn, prot);
64
map.pte = pte_mkdirty(map.pte);
65
map.level = 0;
66
67
if (!writable)
68
map.pte = pte_wrprotect(map.pte);
69
70
ret = kvm_mmu_topup_memory_cache(&pcache, kvm_riscv_gstage_pgd_levels);
71
if (ret)
72
goto out;
73
74
spin_lock(&kvm->mmu_lock);
75
ret = kvm_riscv_gstage_set_pte(&gstage, &pcache, &map);
76
spin_unlock(&kvm->mmu_lock);
77
if (ret)
78
goto out;
79
80
pfn++;
81
}
82
83
out:
84
kvm_mmu_free_memory_cache(&pcache);
85
return ret;
86
}
87
88
void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
89
{
90
struct kvm_gstage gstage;
91
92
gstage.kvm = kvm;
93
gstage.flags = 0;
94
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
95
gstage.pgd = kvm->arch.pgd;
96
97
spin_lock(&kvm->mmu_lock);
98
kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
99
spin_unlock(&kvm->mmu_lock);
100
}
101
102
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
103
struct kvm_memory_slot *slot,
104
gfn_t gfn_offset,
105
unsigned long mask)
106
{
107
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
108
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
109
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
110
struct kvm_gstage gstage;
111
112
gstage.kvm = kvm;
113
gstage.flags = 0;
114
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
115
gstage.pgd = kvm->arch.pgd;
116
117
kvm_riscv_gstage_wp_range(&gstage, start, end);
118
}
119
120
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
121
{
122
}
123
124
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
125
{
126
}
127
128
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
129
{
130
}
131
132
void kvm_arch_flush_shadow_all(struct kvm *kvm)
133
{
134
kvm_riscv_mmu_free_pgd(kvm);
135
}
136
137
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
138
struct kvm_memory_slot *slot)
139
{
140
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
141
phys_addr_t size = slot->npages << PAGE_SHIFT;
142
struct kvm_gstage gstage;
143
144
gstage.kvm = kvm;
145
gstage.flags = 0;
146
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
147
gstage.pgd = kvm->arch.pgd;
148
149
spin_lock(&kvm->mmu_lock);
150
kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
151
spin_unlock(&kvm->mmu_lock);
152
}
153
154
void kvm_arch_commit_memory_region(struct kvm *kvm,
155
struct kvm_memory_slot *old,
156
const struct kvm_memory_slot *new,
157
enum kvm_mr_change change)
158
{
159
/*
160
* At this point memslot has been committed and there is an
161
* allocated dirty_bitmap[], dirty pages will be tracked while
162
* the memory slot is write protected.
163
*/
164
if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
165
mmu_wp_memory_region(kvm, new->id);
166
}
167
168
int kvm_arch_prepare_memory_region(struct kvm *kvm,
169
const struct kvm_memory_slot *old,
170
struct kvm_memory_slot *new,
171
enum kvm_mr_change change)
172
{
173
hva_t hva, reg_end, size;
174
gpa_t base_gpa;
175
bool writable;
176
int ret = 0;
177
178
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
179
change != KVM_MR_FLAGS_ONLY)
180
return 0;
181
182
/*
183
* Prevent userspace from creating a memory region outside of the GPA
184
* space addressable by the KVM guest GPA space.
185
*/
186
if ((new->base_gfn + new->npages) >=
187
(kvm_riscv_gstage_gpa_size >> PAGE_SHIFT))
188
return -EFAULT;
189
190
hva = new->userspace_addr;
191
size = new->npages << PAGE_SHIFT;
192
reg_end = hva + size;
193
base_gpa = new->base_gfn << PAGE_SHIFT;
194
writable = !(new->flags & KVM_MEM_READONLY);
195
196
mmap_read_lock(current->mm);
197
198
/*
199
* A memory region could potentially cover multiple VMAs, and
200
* any holes between them, so iterate over all of them to find
201
* out if we can map any of them right now.
202
*
203
* +--------------------------------------------+
204
* +---------------+----------------+ +----------------+
205
* | : VMA 1 | VMA 2 | | VMA 3 : |
206
* +---------------+----------------+ +----------------+
207
* | memory region |
208
* +--------------------------------------------+
209
*/
210
do {
211
struct vm_area_struct *vma;
212
hva_t vm_start, vm_end;
213
214
vma = find_vma_intersection(current->mm, hva, reg_end);
215
if (!vma)
216
break;
217
218
/*
219
* Mapping a read-only VMA is only allowed if the
220
* memory region is configured as read-only.
221
*/
222
if (writable && !(vma->vm_flags & VM_WRITE)) {
223
ret = -EPERM;
224
break;
225
}
226
227
/* Take the intersection of this VMA with the memory region */
228
vm_start = max(hva, vma->vm_start);
229
vm_end = min(reg_end, vma->vm_end);
230
231
if (vma->vm_flags & VM_PFNMAP) {
232
gpa_t gpa = base_gpa + (vm_start - hva);
233
phys_addr_t pa;
234
235
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
236
pa += vm_start - vma->vm_start;
237
238
/* IO region dirty page logging not allowed */
239
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
240
ret = -EINVAL;
241
goto out;
242
}
243
244
ret = kvm_riscv_mmu_ioremap(kvm, gpa, pa, vm_end - vm_start,
245
writable, false);
246
if (ret)
247
break;
248
}
249
hva = vm_end;
250
} while (hva < reg_end);
251
252
if (change == KVM_MR_FLAGS_ONLY)
253
goto out;
254
255
if (ret)
256
kvm_riscv_mmu_iounmap(kvm, base_gpa, size);
257
258
out:
259
mmap_read_unlock(current->mm);
260
return ret;
261
}
262
263
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
264
{
265
struct kvm_gstage gstage;
266
267
if (!kvm->arch.pgd)
268
return false;
269
270
gstage.kvm = kvm;
271
gstage.flags = 0;
272
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
273
gstage.pgd = kvm->arch.pgd;
274
kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
275
(range->end - range->start) << PAGE_SHIFT,
276
range->may_block);
277
return false;
278
}
279
280
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
281
{
282
pte_t *ptep;
283
u32 ptep_level = 0;
284
u64 size = (range->end - range->start) << PAGE_SHIFT;
285
struct kvm_gstage gstage;
286
287
if (!kvm->arch.pgd)
288
return false;
289
290
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
291
292
gstage.kvm = kvm;
293
gstage.flags = 0;
294
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
295
gstage.pgd = kvm->arch.pgd;
296
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
297
&ptep, &ptep_level))
298
return false;
299
300
return ptep_test_and_clear_young(NULL, 0, ptep);
301
}
302
303
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
304
{
305
pte_t *ptep;
306
u32 ptep_level = 0;
307
u64 size = (range->end - range->start) << PAGE_SHIFT;
308
struct kvm_gstage gstage;
309
310
if (!kvm->arch.pgd)
311
return false;
312
313
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
314
315
gstage.kvm = kvm;
316
gstage.flags = 0;
317
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
318
gstage.pgd = kvm->arch.pgd;
319
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
320
&ptep, &ptep_level))
321
return false;
322
323
return pte_young(ptep_get(ptep));
324
}
325
326
int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
327
gpa_t gpa, unsigned long hva, bool is_write,
328
struct kvm_gstage_mapping *out_map)
329
{
330
int ret;
331
kvm_pfn_t hfn;
332
bool writable;
333
short vma_pageshift;
334
gfn_t gfn = gpa >> PAGE_SHIFT;
335
struct vm_area_struct *vma;
336
struct kvm *kvm = vcpu->kvm;
337
struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
338
bool logging = (memslot->dirty_bitmap &&
339
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
340
unsigned long vma_pagesize, mmu_seq;
341
struct kvm_gstage gstage;
342
struct page *page;
343
344
gstage.kvm = kvm;
345
gstage.flags = 0;
346
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
347
gstage.pgd = kvm->arch.pgd;
348
349
/* Setup initial state of output mapping */
350
memset(out_map, 0, sizeof(*out_map));
351
352
/* We need minimum second+third level pages */
353
ret = kvm_mmu_topup_memory_cache(pcache, kvm_riscv_gstage_pgd_levels);
354
if (ret) {
355
kvm_err("Failed to topup G-stage cache\n");
356
return ret;
357
}
358
359
mmap_read_lock(current->mm);
360
361
vma = vma_lookup(current->mm, hva);
362
if (unlikely(!vma)) {
363
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
364
mmap_read_unlock(current->mm);
365
return -EFAULT;
366
}
367
368
if (is_vm_hugetlb_page(vma))
369
vma_pageshift = huge_page_shift(hstate_vma(vma));
370
else
371
vma_pageshift = PAGE_SHIFT;
372
vma_pagesize = 1ULL << vma_pageshift;
373
if (logging || (vma->vm_flags & VM_PFNMAP))
374
vma_pagesize = PAGE_SIZE;
375
376
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
377
gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
378
379
/*
380
* Read mmu_invalidate_seq so that KVM can detect if the results of
381
* vma_lookup() or __kvm_faultin_pfn() become stale prior to acquiring
382
* kvm->mmu_lock.
383
*
384
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
385
* with the smp_wmb() in kvm_mmu_invalidate_end().
386
*/
387
mmu_seq = kvm->mmu_invalidate_seq;
388
mmap_read_unlock(current->mm);
389
390
if (vma_pagesize != PUD_SIZE &&
391
vma_pagesize != PMD_SIZE &&
392
vma_pagesize != PAGE_SIZE) {
393
kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
394
return -EFAULT;
395
}
396
397
hfn = __kvm_faultin_pfn(memslot, gfn, is_write ? FOLL_WRITE : 0,
398
&writable, &page);
399
if (hfn == KVM_PFN_ERR_HWPOISON) {
400
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
401
vma_pageshift, current);
402
return 0;
403
}
404
if (is_error_noslot_pfn(hfn))
405
return -EFAULT;
406
407
/*
408
* If logging is active then we allow writable pages only
409
* for write faults.
410
*/
411
if (logging && !is_write)
412
writable = false;
413
414
spin_lock(&kvm->mmu_lock);
415
416
if (mmu_invalidate_retry(kvm, mmu_seq))
417
goto out_unlock;
418
419
if (writable) {
420
mark_page_dirty_in_slot(kvm, memslot, gfn);
421
ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
422
vma_pagesize, false, true, out_map);
423
} else {
424
ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
425
vma_pagesize, true, true, out_map);
426
}
427
428
if (ret)
429
kvm_err("Failed to map in G-stage\n");
430
431
out_unlock:
432
kvm_release_faultin_page(kvm, page, ret && ret != -EEXIST, writable);
433
spin_unlock(&kvm->mmu_lock);
434
return ret;
435
}
436
437
int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm)
438
{
439
struct page *pgd_page;
440
441
if (kvm->arch.pgd != NULL) {
442
kvm_err("kvm_arch already initialized?\n");
443
return -EINVAL;
444
}
445
446
pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
447
get_order(kvm_riscv_gstage_pgd_size));
448
if (!pgd_page)
449
return -ENOMEM;
450
kvm->arch.pgd = page_to_virt(pgd_page);
451
kvm->arch.pgd_phys = page_to_phys(pgd_page);
452
453
return 0;
454
}
455
456
void kvm_riscv_mmu_free_pgd(struct kvm *kvm)
457
{
458
struct kvm_gstage gstage;
459
void *pgd = NULL;
460
461
spin_lock(&kvm->mmu_lock);
462
if (kvm->arch.pgd) {
463
gstage.kvm = kvm;
464
gstage.flags = 0;
465
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
466
gstage.pgd = kvm->arch.pgd;
467
kvm_riscv_gstage_unmap_range(&gstage, 0UL, kvm_riscv_gstage_gpa_size, false);
468
pgd = READ_ONCE(kvm->arch.pgd);
469
kvm->arch.pgd = NULL;
470
kvm->arch.pgd_phys = 0;
471
}
472
spin_unlock(&kvm->mmu_lock);
473
474
if (pgd)
475
free_pages((unsigned long)pgd, get_order(kvm_riscv_gstage_pgd_size));
476
}
477
478
void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu)
479
{
480
unsigned long hgatp = kvm_riscv_gstage_mode << HGATP_MODE_SHIFT;
481
struct kvm_arch *k = &vcpu->kvm->arch;
482
483
hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
484
hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
485
486
ncsr_write(CSR_HGATP, hgatp);
487
488
if (!kvm_riscv_gstage_vmid_bits())
489
kvm_riscv_local_hfence_gvma_all();
490
}
491
492