Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/nvhe/mem_protect.c
53294 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020 Google LLC
4
* Author: Quentin Perret <[email protected]>
5
*/
6
7
#include <linux/kvm_host.h>
8
#include <asm/kvm_emulate.h>
9
#include <asm/kvm_hyp.h>
10
#include <asm/kvm_mmu.h>
11
#include <asm/kvm_pgtable.h>
12
#include <asm/kvm_pkvm.h>
13
#include <asm/stage2_pgtable.h>
14
15
#include <hyp/fault.h>
16
17
#include <nvhe/gfp.h>
18
#include <nvhe/memory.h>
19
#include <nvhe/mem_protect.h>
20
#include <nvhe/mm.h>
21
22
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_AS_S1 | KVM_PGTABLE_S2_IDMAP)
23
24
struct host_mmu host_mmu;
25
26
static struct hyp_pool host_s2_pool;
27
28
static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
29
#define current_vm (*this_cpu_ptr(&__current_vm))
30
31
static void guest_lock_component(struct pkvm_hyp_vm *vm)
32
{
33
hyp_spin_lock(&vm->lock);
34
current_vm = vm;
35
}
36
37
static void guest_unlock_component(struct pkvm_hyp_vm *vm)
38
{
39
current_vm = NULL;
40
hyp_spin_unlock(&vm->lock);
41
}
42
43
static void host_lock_component(void)
44
{
45
hyp_spin_lock(&host_mmu.lock);
46
}
47
48
static void host_unlock_component(void)
49
{
50
hyp_spin_unlock(&host_mmu.lock);
51
}
52
53
static void hyp_lock_component(void)
54
{
55
hyp_spin_lock(&pkvm_pgd_lock);
56
}
57
58
static void hyp_unlock_component(void)
59
{
60
hyp_spin_unlock(&pkvm_pgd_lock);
61
}
62
63
#define for_each_hyp_page(__p, __st, __sz) \
64
for (struct hyp_page *__p = hyp_phys_to_page(__st), \
65
*__e = __p + ((__sz) >> PAGE_SHIFT); \
66
__p < __e; __p++)
67
68
static void *host_s2_zalloc_pages_exact(size_t size)
69
{
70
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
71
72
hyp_split_page(hyp_virt_to_page(addr));
73
74
/*
75
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
76
* so there should be no need to free any of the tail pages to make the
77
* allocation exact.
78
*/
79
WARN_ON(size != (PAGE_SIZE << get_order(size)));
80
81
return addr;
82
}
83
84
static void *host_s2_zalloc_page(void *pool)
85
{
86
return hyp_alloc_pages(pool, 0);
87
}
88
89
static void host_s2_get_page(void *addr)
90
{
91
hyp_get_page(&host_s2_pool, addr);
92
}
93
94
static void host_s2_put_page(void *addr)
95
{
96
hyp_put_page(&host_s2_pool, addr);
97
}
98
99
static void host_s2_free_unlinked_table(void *addr, s8 level)
100
{
101
kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
102
}
103
104
static int prepare_s2_pool(void *pgt_pool_base)
105
{
106
unsigned long nr_pages, pfn;
107
int ret;
108
109
pfn = hyp_virt_to_pfn(pgt_pool_base);
110
nr_pages = host_s2_pgtable_pages();
111
ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
112
if (ret)
113
return ret;
114
115
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
116
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
117
.zalloc_page = host_s2_zalloc_page,
118
.free_unlinked_table = host_s2_free_unlinked_table,
119
.phys_to_virt = hyp_phys_to_virt,
120
.virt_to_phys = hyp_virt_to_phys,
121
.page_count = hyp_page_count,
122
.get_page = host_s2_get_page,
123
.put_page = host_s2_put_page,
124
};
125
126
return 0;
127
}
128
129
static void prepare_host_vtcr(void)
130
{
131
u32 parange, phys_shift;
132
133
/* The host stage 2 is id-mapped, so use parange for T0SZ */
134
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
135
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
136
137
host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
138
id_aa64mmfr1_el1_sys_val, phys_shift);
139
}
140
141
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
142
143
int kvm_host_prepare_stage2(void *pgt_pool_base)
144
{
145
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
146
int ret;
147
148
prepare_host_vtcr();
149
hyp_spin_lock_init(&host_mmu.lock);
150
mmu->arch = &host_mmu.arch;
151
152
ret = prepare_s2_pool(pgt_pool_base);
153
if (ret)
154
return ret;
155
156
ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
157
&host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
158
host_stage2_force_pte_cb);
159
if (ret)
160
return ret;
161
162
mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
163
mmu->pgt = &host_mmu.pgt;
164
atomic64_set(&mmu->vmid.id, 0);
165
166
return 0;
167
}
168
169
static void *guest_s2_zalloc_pages_exact(size_t size)
170
{
171
void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
172
173
WARN_ON(size != (PAGE_SIZE << get_order(size)));
174
hyp_split_page(hyp_virt_to_page(addr));
175
176
return addr;
177
}
178
179
static void guest_s2_free_pages_exact(void *addr, unsigned long size)
180
{
181
u8 order = get_order(size);
182
unsigned int i;
183
184
for (i = 0; i < (1 << order); i++)
185
hyp_put_page(&current_vm->pool, addr + (i * PAGE_SIZE));
186
}
187
188
static void *guest_s2_zalloc_page(void *mc)
189
{
190
struct hyp_page *p;
191
void *addr;
192
193
addr = hyp_alloc_pages(&current_vm->pool, 0);
194
if (addr)
195
return addr;
196
197
addr = pop_hyp_memcache(mc, hyp_phys_to_virt);
198
if (!addr)
199
return addr;
200
201
memset(addr, 0, PAGE_SIZE);
202
p = hyp_virt_to_page(addr);
203
p->refcount = 1;
204
p->order = 0;
205
206
return addr;
207
}
208
209
static void guest_s2_get_page(void *addr)
210
{
211
hyp_get_page(&current_vm->pool, addr);
212
}
213
214
static void guest_s2_put_page(void *addr)
215
{
216
hyp_put_page(&current_vm->pool, addr);
217
}
218
219
static void __apply_guest_page(void *va, size_t size,
220
void (*func)(void *addr, size_t size))
221
{
222
size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE);
223
va = PTR_ALIGN_DOWN(va, PAGE_SIZE);
224
size = PAGE_ALIGN(size);
225
226
while (size) {
227
size_t map_size = PAGE_SIZE;
228
void *map;
229
230
if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE)
231
map = hyp_fixblock_map(__hyp_pa(va), &map_size);
232
else
233
map = hyp_fixmap_map(__hyp_pa(va));
234
235
func(map, map_size);
236
237
if (map_size == PMD_SIZE)
238
hyp_fixblock_unmap();
239
else
240
hyp_fixmap_unmap();
241
242
size -= map_size;
243
va += map_size;
244
}
245
}
246
247
static void clean_dcache_guest_page(void *va, size_t size)
248
{
249
__apply_guest_page(va, size, __clean_dcache_guest_page);
250
}
251
252
static void invalidate_icache_guest_page(void *va, size_t size)
253
{
254
__apply_guest_page(va, size, __invalidate_icache_guest_page);
255
}
256
257
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
258
{
259
struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
260
unsigned long nr_pages;
261
int ret;
262
263
nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT;
264
ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
265
if (ret)
266
return ret;
267
268
hyp_spin_lock_init(&vm->lock);
269
vm->mm_ops = (struct kvm_pgtable_mm_ops) {
270
.zalloc_pages_exact = guest_s2_zalloc_pages_exact,
271
.free_pages_exact = guest_s2_free_pages_exact,
272
.zalloc_page = guest_s2_zalloc_page,
273
.phys_to_virt = hyp_phys_to_virt,
274
.virt_to_phys = hyp_virt_to_phys,
275
.page_count = hyp_page_count,
276
.get_page = guest_s2_get_page,
277
.put_page = guest_s2_put_page,
278
.dcache_clean_inval_poc = clean_dcache_guest_page,
279
.icache_inval_pou = invalidate_icache_guest_page,
280
};
281
282
guest_lock_component(vm);
283
ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL);
284
guest_unlock_component(vm);
285
if (ret)
286
return ret;
287
288
vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
289
290
return 0;
291
}
292
293
void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
294
{
295
struct hyp_page *page;
296
void *addr;
297
298
/* Dump all pgtable pages in the hyp_pool */
299
guest_lock_component(vm);
300
kvm_pgtable_stage2_destroy(&vm->pgt);
301
vm->kvm.arch.mmu.pgd_phys = 0ULL;
302
guest_unlock_component(vm);
303
304
/* Drain the hyp_pool into the memcache */
305
addr = hyp_alloc_pages(&vm->pool, 0);
306
while (addr) {
307
page = hyp_virt_to_page(addr);
308
page->refcount = 0;
309
page->order = 0;
310
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
311
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
312
addr = hyp_alloc_pages(&vm->pool, 0);
313
}
314
}
315
316
int __pkvm_prot_finalize(void)
317
{
318
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
319
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
320
321
if (params->hcr_el2 & HCR_VM)
322
return -EPERM;
323
324
params->vttbr = kvm_get_vttbr(mmu);
325
params->vtcr = mmu->vtcr;
326
params->hcr_el2 |= HCR_VM;
327
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
328
params->hcr_el2 |= HCR_FWB;
329
330
/*
331
* The CMO below not only cleans the updated params to the
332
* PoC, but also provides the DSB that ensures ongoing
333
* page-table walks that have started before we trapped to EL2
334
* have completed.
335
*/
336
kvm_flush_dcache_to_poc(params, sizeof(*params));
337
338
write_sysreg_hcr(params->hcr_el2);
339
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
340
341
/*
342
* Make sure to have an ISB before the TLB maintenance below but only
343
* when __load_stage2() doesn't include one already.
344
*/
345
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
346
347
/* Invalidate stale HCR bits that may be cached in TLBs */
348
__tlbi(vmalls12e1);
349
dsb(nsh);
350
isb();
351
352
return 0;
353
}
354
355
static int host_stage2_unmap_dev_all(void)
356
{
357
struct kvm_pgtable *pgt = &host_mmu.pgt;
358
struct memblock_region *reg;
359
u64 addr = 0;
360
int i, ret;
361
362
/* Unmap all non-memory regions to recycle the pages */
363
for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
364
reg = &hyp_memory[i];
365
ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
366
if (ret)
367
return ret;
368
}
369
return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
370
}
371
372
/*
373
* Ensure the PFN range is contained within PA-range.
374
*
375
* This check is also robust to overflows and is therefore a requirement before
376
* using a pfn/nr_pages pair from an untrusted source.
377
*/
378
static bool pfn_range_is_valid(u64 pfn, u64 nr_pages)
379
{
380
u64 limit = BIT(kvm_phys_shift(&host_mmu.arch.mmu) - PAGE_SHIFT);
381
382
return pfn < limit && ((limit - pfn) >= nr_pages);
383
}
384
385
struct kvm_mem_range {
386
u64 start;
387
u64 end;
388
};
389
390
static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
391
{
392
int cur, left = 0, right = hyp_memblock_nr;
393
struct memblock_region *reg;
394
phys_addr_t end;
395
396
range->start = 0;
397
range->end = ULONG_MAX;
398
399
/* The list of memblock regions is sorted, binary search it */
400
while (left < right) {
401
cur = (left + right) >> 1;
402
reg = &hyp_memory[cur];
403
end = reg->base + reg->size;
404
if (addr < reg->base) {
405
right = cur;
406
range->end = reg->base;
407
} else if (addr >= end) {
408
left = cur + 1;
409
range->start = end;
410
} else {
411
range->start = reg->base;
412
range->end = end;
413
return reg;
414
}
415
}
416
417
return NULL;
418
}
419
420
bool addr_is_memory(phys_addr_t phys)
421
{
422
struct kvm_mem_range range;
423
424
return !!find_mem_range(phys, &range);
425
}
426
427
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
428
{
429
return range->start <= addr && addr < range->end;
430
}
431
432
static int check_range_allowed_memory(u64 start, u64 end)
433
{
434
struct memblock_region *reg;
435
struct kvm_mem_range range;
436
437
/*
438
* Callers can't check the state of a range that overlaps memory and
439
* MMIO regions, so ensure [start, end[ is in the same kvm_mem_range.
440
*/
441
reg = find_mem_range(start, &range);
442
if (!is_in_mem_range(end - 1, &range))
443
return -EINVAL;
444
445
if (!reg || reg->flags & MEMBLOCK_NOMAP)
446
return -EPERM;
447
448
return 0;
449
}
450
451
static bool range_is_memory(u64 start, u64 end)
452
{
453
struct kvm_mem_range r;
454
455
if (!find_mem_range(start, &r))
456
return false;
457
458
return is_in_mem_range(end - 1, &r);
459
}
460
461
static inline int __host_stage2_idmap(u64 start, u64 end,
462
enum kvm_pgtable_prot prot)
463
{
464
return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
465
prot, &host_s2_pool, 0);
466
}
467
468
/*
469
* The pool has been provided with enough pages to cover all of memory with
470
* page granularity, but it is difficult to know how much of the MMIO range
471
* we will need to cover upfront, so we may need to 'recycle' the pages if we
472
* run out.
473
*/
474
#define host_stage2_try(fn, ...) \
475
({ \
476
int __ret; \
477
hyp_assert_lock_held(&host_mmu.lock); \
478
__ret = fn(__VA_ARGS__); \
479
if (__ret == -ENOMEM) { \
480
__ret = host_stage2_unmap_dev_all(); \
481
if (!__ret) \
482
__ret = fn(__VA_ARGS__); \
483
} \
484
__ret; \
485
})
486
487
static inline bool range_included(struct kvm_mem_range *child,
488
struct kvm_mem_range *parent)
489
{
490
return parent->start <= child->start && child->end <= parent->end;
491
}
492
493
static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
494
{
495
struct kvm_mem_range cur;
496
kvm_pte_t pte;
497
u64 granule;
498
s8 level;
499
int ret;
500
501
hyp_assert_lock_held(&host_mmu.lock);
502
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
503
if (ret)
504
return ret;
505
506
if (kvm_pte_valid(pte))
507
return -EAGAIN;
508
509
if (pte) {
510
WARN_ON(addr_is_memory(addr) &&
511
get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
512
return -EPERM;
513
}
514
515
for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) {
516
if (!kvm_level_supports_block_mapping(level))
517
continue;
518
granule = kvm_granule_size(level);
519
cur.start = ALIGN_DOWN(addr, granule);
520
cur.end = cur.start + granule;
521
if (!range_included(&cur, range))
522
continue;
523
*range = cur;
524
return 0;
525
}
526
527
WARN_ON(1);
528
529
return -EINVAL;
530
}
531
532
int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
533
enum kvm_pgtable_prot prot)
534
{
535
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
536
}
537
538
static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
539
{
540
for_each_hyp_page(page, addr, size)
541
set_host_state(page, state);
542
}
543
544
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
545
{
546
int ret;
547
548
if (!range_is_memory(addr, addr + size))
549
return -EPERM;
550
551
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
552
addr, size, &host_s2_pool, owner_id);
553
if (ret)
554
return ret;
555
556
/* Don't forget to update the vmemmap tracking for the host */
557
if (owner_id == PKVM_ID_HOST)
558
__host_update_page_state(addr, size, PKVM_PAGE_OWNED);
559
else
560
__host_update_page_state(addr, size, PKVM_NOPAGE);
561
562
return 0;
563
}
564
565
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
566
{
567
/*
568
* Block mappings must be used with care in the host stage-2 as a
569
* kvm_pgtable_stage2_map() operation targeting a page in the range of
570
* an existing block will delete the block under the assumption that
571
* mappings in the rest of the block range can always be rebuilt lazily.
572
* That assumption is correct for the host stage-2 with RWX mappings
573
* targeting memory or RW mappings targeting MMIO ranges (see
574
* host_stage2_idmap() below which implements some of the host memory
575
* abort logic). However, this is not safe for any other mappings where
576
* the host stage-2 page-table is in fact the only place where this
577
* state is stored. In all those cases, it is safer to use page-level
578
* mappings, hence avoiding to lose the state because of side-effects in
579
* kvm_pgtable_stage2_map().
580
*/
581
if (range_is_memory(addr, end))
582
return prot != PKVM_HOST_MEM_PROT;
583
else
584
return prot != PKVM_HOST_MMIO_PROT;
585
}
586
587
static int host_stage2_idmap(u64 addr)
588
{
589
struct kvm_mem_range range;
590
bool is_memory = !!find_mem_range(addr, &range);
591
enum kvm_pgtable_prot prot;
592
int ret;
593
594
prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
595
596
host_lock_component();
597
ret = host_stage2_adjust_range(addr, &range);
598
if (ret)
599
goto unlock;
600
601
ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
602
unlock:
603
host_unlock_component();
604
605
return ret;
606
}
607
608
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
609
{
610
struct kvm_vcpu_fault_info fault;
611
u64 esr, addr;
612
int ret = 0;
613
614
esr = read_sysreg_el2(SYS_ESR);
615
if (!__get_fault_info(esr, &fault)) {
616
/*
617
* We've presumably raced with a page-table change which caused
618
* AT to fail, try again.
619
*/
620
return;
621
}
622
623
624
/*
625
* Yikes, we couldn't resolve the fault IPA. This should reinject an
626
* abort into the host when we figure out how to do that.
627
*/
628
BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS));
629
addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12;
630
631
ret = host_stage2_idmap(addr);
632
BUG_ON(ret && ret != -EAGAIN);
633
}
634
635
struct check_walk_data {
636
enum pkvm_page_state desired;
637
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
638
};
639
640
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
641
enum kvm_pgtable_walk_flags visit)
642
{
643
struct check_walk_data *d = ctx->arg;
644
645
return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
646
}
647
648
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
649
struct check_walk_data *data)
650
{
651
struct kvm_pgtable_walker walker = {
652
.cb = __check_page_state_visitor,
653
.arg = data,
654
.flags = KVM_PGTABLE_WALK_LEAF,
655
};
656
657
return kvm_pgtable_walk(pgt, addr, size, &walker);
658
}
659
660
static int __host_check_page_state_range(u64 addr, u64 size,
661
enum pkvm_page_state state)
662
{
663
int ret;
664
665
ret = check_range_allowed_memory(addr, addr + size);
666
if (ret)
667
return ret;
668
669
hyp_assert_lock_held(&host_mmu.lock);
670
671
for_each_hyp_page(page, addr, size) {
672
if (get_host_state(page) != state)
673
return -EPERM;
674
}
675
676
return 0;
677
}
678
679
static int __host_set_page_state_range(u64 addr, u64 size,
680
enum pkvm_page_state state)
681
{
682
if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) {
683
int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT);
684
685
if (ret)
686
return ret;
687
}
688
689
__host_update_page_state(addr, size, state);
690
691
return 0;
692
}
693
694
static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
695
{
696
for_each_hyp_page(page, phys, size)
697
set_hyp_state(page, state);
698
}
699
700
static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
701
{
702
for_each_hyp_page(page, phys, size) {
703
if (get_hyp_state(page) != state)
704
return -EPERM;
705
}
706
707
return 0;
708
}
709
710
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
711
{
712
if (!kvm_pte_valid(pte))
713
return PKVM_NOPAGE;
714
715
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
716
}
717
718
static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr,
719
u64 size, enum pkvm_page_state state)
720
{
721
struct check_walk_data d = {
722
.desired = state,
723
.get_page_state = guest_get_page_state,
724
};
725
726
hyp_assert_lock_held(&vm->lock);
727
return check_page_state_range(&vm->pgt, addr, size, &d);
728
}
729
730
int __pkvm_host_share_hyp(u64 pfn)
731
{
732
u64 phys = hyp_pfn_to_phys(pfn);
733
u64 size = PAGE_SIZE;
734
int ret;
735
736
host_lock_component();
737
hyp_lock_component();
738
739
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
740
if (ret)
741
goto unlock;
742
ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
743
if (ret)
744
goto unlock;
745
746
__hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
747
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
748
749
unlock:
750
hyp_unlock_component();
751
host_unlock_component();
752
753
return ret;
754
}
755
756
int __pkvm_host_unshare_hyp(u64 pfn)
757
{
758
u64 phys = hyp_pfn_to_phys(pfn);
759
u64 virt = (u64)__hyp_va(phys);
760
u64 size = PAGE_SIZE;
761
int ret;
762
763
host_lock_component();
764
hyp_lock_component();
765
766
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
767
if (ret)
768
goto unlock;
769
ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
770
if (ret)
771
goto unlock;
772
if (hyp_page_count((void *)virt)) {
773
ret = -EBUSY;
774
goto unlock;
775
}
776
777
__hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
778
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
779
780
unlock:
781
hyp_unlock_component();
782
host_unlock_component();
783
784
return ret;
785
}
786
787
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
788
{
789
u64 phys = hyp_pfn_to_phys(pfn);
790
u64 size = PAGE_SIZE * nr_pages;
791
void *virt = __hyp_va(phys);
792
int ret;
793
794
if (!pfn_range_is_valid(pfn, nr_pages))
795
return -EINVAL;
796
797
host_lock_component();
798
hyp_lock_component();
799
800
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
801
if (ret)
802
goto unlock;
803
ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
804
if (ret)
805
goto unlock;
806
807
__hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
808
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
809
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
810
811
unlock:
812
hyp_unlock_component();
813
host_unlock_component();
814
815
return ret;
816
}
817
818
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
819
{
820
u64 phys = hyp_pfn_to_phys(pfn);
821
u64 size = PAGE_SIZE * nr_pages;
822
u64 virt = (u64)__hyp_va(phys);
823
int ret;
824
825
if (!pfn_range_is_valid(pfn, nr_pages))
826
return -EINVAL;
827
828
host_lock_component();
829
hyp_lock_component();
830
831
ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
832
if (ret)
833
goto unlock;
834
ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
835
if (ret)
836
goto unlock;
837
838
__hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
839
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
840
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
841
842
unlock:
843
hyp_unlock_component();
844
host_unlock_component();
845
846
return ret;
847
}
848
849
int hyp_pin_shared_mem(void *from, void *to)
850
{
851
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
852
u64 end = PAGE_ALIGN((u64)to);
853
u64 phys = __hyp_pa(start);
854
u64 size = end - start;
855
struct hyp_page *p;
856
int ret;
857
858
host_lock_component();
859
hyp_lock_component();
860
861
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
862
if (ret)
863
goto unlock;
864
865
ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
866
if (ret)
867
goto unlock;
868
869
for (cur = start; cur < end; cur += PAGE_SIZE) {
870
p = hyp_virt_to_page(cur);
871
hyp_page_ref_inc(p);
872
if (p->refcount == 1)
873
WARN_ON(pkvm_create_mappings_locked((void *)cur,
874
(void *)cur + PAGE_SIZE,
875
PAGE_HYP));
876
}
877
878
unlock:
879
hyp_unlock_component();
880
host_unlock_component();
881
882
return ret;
883
}
884
885
void hyp_unpin_shared_mem(void *from, void *to)
886
{
887
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
888
u64 end = PAGE_ALIGN((u64)to);
889
struct hyp_page *p;
890
891
host_lock_component();
892
hyp_lock_component();
893
894
for (cur = start; cur < end; cur += PAGE_SIZE) {
895
p = hyp_virt_to_page(cur);
896
if (p->refcount == 1)
897
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
898
hyp_page_ref_dec(p);
899
}
900
901
hyp_unlock_component();
902
host_unlock_component();
903
}
904
905
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
906
{
907
u64 phys = hyp_pfn_to_phys(pfn);
908
u64 size = PAGE_SIZE * nr_pages;
909
int ret;
910
911
if (!pfn_range_is_valid(pfn, nr_pages))
912
return -EINVAL;
913
914
host_lock_component();
915
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
916
if (!ret)
917
ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
918
host_unlock_component();
919
920
return ret;
921
}
922
923
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
924
{
925
u64 phys = hyp_pfn_to_phys(pfn);
926
u64 size = PAGE_SIZE * nr_pages;
927
int ret;
928
929
if (!pfn_range_is_valid(pfn, nr_pages))
930
return -EINVAL;
931
932
host_lock_component();
933
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
934
if (!ret)
935
ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
936
host_unlock_component();
937
938
return ret;
939
}
940
941
static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size)
942
{
943
size_t block_size;
944
945
if (nr_pages == 1) {
946
*size = PAGE_SIZE;
947
return 0;
948
}
949
950
/* We solely support second to last level huge mapping */
951
block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1);
952
953
if (nr_pages != block_size >> PAGE_SHIFT)
954
return -EINVAL;
955
956
if (!IS_ALIGNED(phys | ipa, block_size))
957
return -EINVAL;
958
959
*size = block_size;
960
return 0;
961
}
962
963
int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
964
enum kvm_pgtable_prot prot)
965
{
966
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
967
u64 phys = hyp_pfn_to_phys(pfn);
968
u64 ipa = hyp_pfn_to_phys(gfn);
969
u64 size;
970
int ret;
971
972
if (prot & ~KVM_PGTABLE_PROT_RWX)
973
return -EINVAL;
974
975
if (!pfn_range_is_valid(pfn, nr_pages))
976
return -EINVAL;
977
978
ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
979
if (ret)
980
return ret;
981
982
ret = check_range_allowed_memory(phys, phys + size);
983
if (ret)
984
return ret;
985
986
host_lock_component();
987
guest_lock_component(vm);
988
989
ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE);
990
if (ret)
991
goto unlock;
992
993
for_each_hyp_page(page, phys, size) {
994
switch (get_host_state(page)) {
995
case PKVM_PAGE_OWNED:
996
continue;
997
case PKVM_PAGE_SHARED_OWNED:
998
if (page->host_share_guest_count == U32_MAX) {
999
ret = -EBUSY;
1000
goto unlock;
1001
}
1002
1003
/* Only host to np-guest multi-sharing is tolerated */
1004
if (page->host_share_guest_count)
1005
continue;
1006
1007
fallthrough;
1008
default:
1009
ret = -EPERM;
1010
goto unlock;
1011
}
1012
}
1013
1014
for_each_hyp_page(page, phys, size) {
1015
set_host_state(page, PKVM_PAGE_SHARED_OWNED);
1016
page->host_share_guest_count++;
1017
}
1018
1019
WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys,
1020
pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
1021
&vcpu->vcpu.arch.pkvm_memcache, 0));
1022
1023
unlock:
1024
guest_unlock_component(vm);
1025
host_unlock_component();
1026
1027
return ret;
1028
}
1029
1030
static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size)
1031
{
1032
enum pkvm_page_state state;
1033
kvm_pte_t pte;
1034
u64 phys;
1035
s8 level;
1036
int ret;
1037
1038
ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
1039
if (ret)
1040
return ret;
1041
if (!kvm_pte_valid(pte))
1042
return -ENOENT;
1043
if (size && kvm_granule_size(level) != size)
1044
return -E2BIG;
1045
1046
if (!size)
1047
size = kvm_granule_size(level);
1048
1049
state = guest_get_page_state(pte, ipa);
1050
if (state != PKVM_PAGE_SHARED_BORROWED)
1051
return -EPERM;
1052
1053
phys = kvm_pte_to_phys(pte);
1054
ret = check_range_allowed_memory(phys, phys + size);
1055
if (WARN_ON(ret))
1056
return ret;
1057
1058
for_each_hyp_page(page, phys, size) {
1059
if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
1060
return -EPERM;
1061
if (WARN_ON(!page->host_share_guest_count))
1062
return -EINVAL;
1063
}
1064
1065
*__phys = phys;
1066
1067
return 0;
1068
}
1069
1070
int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
1071
{
1072
u64 ipa = hyp_pfn_to_phys(gfn);
1073
u64 size, phys;
1074
int ret;
1075
1076
ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
1077
if (ret)
1078
return ret;
1079
1080
host_lock_component();
1081
guest_lock_component(vm);
1082
1083
ret = __check_host_shared_guest(vm, &phys, ipa, size);
1084
if (ret)
1085
goto unlock;
1086
1087
ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size);
1088
if (ret)
1089
goto unlock;
1090
1091
for_each_hyp_page(page, phys, size) {
1092
/* __check_host_shared_guest() protects against underflow */
1093
page->host_share_guest_count--;
1094
if (!page->host_share_guest_count)
1095
set_host_state(page, PKVM_PAGE_OWNED);
1096
}
1097
1098
unlock:
1099
guest_unlock_component(vm);
1100
host_unlock_component();
1101
1102
return ret;
1103
}
1104
1105
static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size)
1106
{
1107
u64 phys;
1108
int ret;
1109
1110
if (!IS_ENABLED(CONFIG_NVHE_EL2_DEBUG))
1111
return;
1112
1113
host_lock_component();
1114
guest_lock_component(vm);
1115
1116
ret = __check_host_shared_guest(vm, &phys, ipa, size);
1117
1118
guest_unlock_component(vm);
1119
host_unlock_component();
1120
1121
WARN_ON(ret && ret != -ENOENT);
1122
}
1123
1124
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot)
1125
{
1126
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
1127
u64 ipa = hyp_pfn_to_phys(gfn);
1128
int ret;
1129
1130
if (pkvm_hyp_vm_is_protected(vm))
1131
return -EPERM;
1132
1133
if (prot & ~KVM_PGTABLE_PROT_RWX)
1134
return -EINVAL;
1135
1136
assert_host_shared_guest(vm, ipa, 0);
1137
guest_lock_component(vm);
1138
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
1139
guest_unlock_component(vm);
1140
1141
return ret;
1142
}
1143
1144
int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
1145
{
1146
u64 size, ipa = hyp_pfn_to_phys(gfn);
1147
int ret;
1148
1149
if (pkvm_hyp_vm_is_protected(vm))
1150
return -EPERM;
1151
1152
ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
1153
if (ret)
1154
return ret;
1155
1156
assert_host_shared_guest(vm, ipa, size);
1157
guest_lock_component(vm);
1158
ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size);
1159
guest_unlock_component(vm);
1160
1161
return ret;
1162
}
1163
1164
int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm)
1165
{
1166
u64 size, ipa = hyp_pfn_to_phys(gfn);
1167
int ret;
1168
1169
if (pkvm_hyp_vm_is_protected(vm))
1170
return -EPERM;
1171
1172
ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
1173
if (ret)
1174
return ret;
1175
1176
assert_host_shared_guest(vm, ipa, size);
1177
guest_lock_component(vm);
1178
ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold);
1179
guest_unlock_component(vm);
1180
1181
return ret;
1182
}
1183
1184
int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
1185
{
1186
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
1187
u64 ipa = hyp_pfn_to_phys(gfn);
1188
1189
if (pkvm_hyp_vm_is_protected(vm))
1190
return -EPERM;
1191
1192
assert_host_shared_guest(vm, ipa, 0);
1193
guest_lock_component(vm);
1194
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
1195
guest_unlock_component(vm);
1196
1197
return 0;
1198
}
1199
1200
#ifdef CONFIG_NVHE_EL2_DEBUG
1201
struct pkvm_expected_state {
1202
enum pkvm_page_state host;
1203
enum pkvm_page_state hyp;
1204
enum pkvm_page_state guest[2]; /* [ gfn, gfn + 1 ] */
1205
};
1206
1207
static struct pkvm_expected_state selftest_state;
1208
static struct hyp_page *selftest_page;
1209
1210
static struct pkvm_hyp_vm selftest_vm = {
1211
.kvm = {
1212
.arch = {
1213
.mmu = {
1214
.arch = &selftest_vm.kvm.arch,
1215
.pgt = &selftest_vm.pgt,
1216
},
1217
},
1218
},
1219
};
1220
1221
static struct pkvm_hyp_vcpu selftest_vcpu = {
1222
.vcpu = {
1223
.arch = {
1224
.hw_mmu = &selftest_vm.kvm.arch.mmu,
1225
},
1226
.kvm = &selftest_vm.kvm,
1227
},
1228
};
1229
1230
static void init_selftest_vm(void *virt)
1231
{
1232
struct hyp_page *p = hyp_virt_to_page(virt);
1233
int i;
1234
1235
selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
1236
WARN_ON(kvm_guest_prepare_stage2(&selftest_vm, virt));
1237
1238
for (i = 0; i < pkvm_selftest_pages(); i++) {
1239
if (p[i].refcount)
1240
continue;
1241
p[i].refcount = 1;
1242
hyp_put_page(&selftest_vm.pool, hyp_page_to_virt(&p[i]));
1243
}
1244
}
1245
1246
static u64 selftest_ipa(void)
1247
{
1248
return BIT(selftest_vm.pgt.ia_bits - 1);
1249
}
1250
1251
static void assert_page_state(void)
1252
{
1253
void *virt = hyp_page_to_virt(selftest_page);
1254
u64 size = PAGE_SIZE << selftest_page->order;
1255
struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
1256
u64 phys = hyp_virt_to_phys(virt);
1257
u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE };
1258
struct pkvm_hyp_vm *vm;
1259
1260
vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
1261
1262
host_lock_component();
1263
WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host));
1264
host_unlock_component();
1265
1266
hyp_lock_component();
1267
WARN_ON(__hyp_check_page_state_range(phys, size, selftest_state.hyp));
1268
hyp_unlock_component();
1269
1270
guest_lock_component(&selftest_vm);
1271
WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0]));
1272
WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1]));
1273
guest_unlock_component(&selftest_vm);
1274
}
1275
1276
#define assert_transition_res(res, fn, ...) \
1277
do { \
1278
WARN_ON(fn(__VA_ARGS__) != res); \
1279
assert_page_state(); \
1280
} while (0)
1281
1282
void pkvm_ownership_selftest(void *base)
1283
{
1284
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_RWX;
1285
void *virt = hyp_alloc_pages(&host_s2_pool, 0);
1286
struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu;
1287
struct pkvm_hyp_vm *vm = &selftest_vm;
1288
u64 phys, size, pfn, gfn;
1289
1290
WARN_ON(!virt);
1291
selftest_page = hyp_virt_to_page(virt);
1292
selftest_page->refcount = 0;
1293
init_selftest_vm(base);
1294
1295
size = PAGE_SIZE << selftest_page->order;
1296
phys = hyp_virt_to_phys(virt);
1297
pfn = hyp_phys_to_pfn(phys);
1298
gfn = hyp_phys_to_pfn(selftest_ipa());
1299
1300
selftest_state.host = PKVM_NOPAGE;
1301
selftest_state.hyp = PKVM_PAGE_OWNED;
1302
selftest_state.guest[0] = selftest_state.guest[1] = PKVM_NOPAGE;
1303
assert_page_state();
1304
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
1305
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
1306
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
1307
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
1308
assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
1309
assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
1310
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
1311
assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
1312
1313
selftest_state.host = PKVM_PAGE_OWNED;
1314
selftest_state.hyp = PKVM_NOPAGE;
1315
assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1);
1316
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
1317
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
1318
assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
1319
assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
1320
assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
1321
1322
selftest_state.host = PKVM_PAGE_SHARED_OWNED;
1323
selftest_state.hyp = PKVM_PAGE_SHARED_BORROWED;
1324
assert_transition_res(0, __pkvm_host_share_hyp, pfn);
1325
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
1326
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
1327
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
1328
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
1329
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
1330
assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
1331
1332
assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size);
1333
assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size);
1334
hyp_unpin_shared_mem(virt, virt + size);
1335
WARN_ON(hyp_page_count(virt) != 1);
1336
assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn);
1337
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
1338
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
1339
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
1340
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
1341
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
1342
assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
1343
1344
hyp_unpin_shared_mem(virt, virt + size);
1345
assert_page_state();
1346
WARN_ON(hyp_page_count(virt));
1347
1348
selftest_state.host = PKVM_PAGE_OWNED;
1349
selftest_state.hyp = PKVM_NOPAGE;
1350
assert_transition_res(0, __pkvm_host_unshare_hyp, pfn);
1351
1352
selftest_state.host = PKVM_PAGE_SHARED_OWNED;
1353
selftest_state.hyp = PKVM_NOPAGE;
1354
assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1);
1355
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
1356
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
1357
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
1358
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
1359
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
1360
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
1361
assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm);
1362
assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
1363
1364
selftest_state.host = PKVM_PAGE_OWNED;
1365
selftest_state.hyp = PKVM_NOPAGE;
1366
assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1);
1367
assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
1368
1369
selftest_state.host = PKVM_PAGE_SHARED_OWNED;
1370
selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED;
1371
assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
1372
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
1373
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
1374
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
1375
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
1376
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
1377
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
1378
assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size);
1379
1380
selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED;
1381
assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot);
1382
WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2);
1383
1384
selftest_state.guest[0] = PKVM_NOPAGE;
1385
assert_transition_res(0, __pkvm_host_unshare_guest, gfn, 1, vm);
1386
1387
selftest_state.guest[1] = PKVM_NOPAGE;
1388
selftest_state.host = PKVM_PAGE_OWNED;
1389
assert_transition_res(0, __pkvm_host_unshare_guest, gfn + 1, 1, vm);
1390
1391
selftest_state.host = PKVM_NOPAGE;
1392
selftest_state.hyp = PKVM_PAGE_OWNED;
1393
assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1);
1394
1395
selftest_page->refcount = 1;
1396
hyp_put_page(&host_s2_pool, virt);
1397
}
1398
#endif
1399
1400