Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/nvhe/mm.c
26516 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020 Google LLC
4
* Author: Quentin Perret <[email protected]>
5
*/
6
7
#include <linux/kvm_host.h>
8
#include <asm/kvm_hyp.h>
9
#include <asm/kvm_mmu.h>
10
#include <asm/kvm_pgtable.h>
11
#include <asm/kvm_pkvm.h>
12
#include <asm/spectre.h>
13
14
#include <nvhe/early_alloc.h>
15
#include <nvhe/gfp.h>
16
#include <nvhe/memory.h>
17
#include <nvhe/mem_protect.h>
18
#include <nvhe/mm.h>
19
#include <nvhe/spinlock.h>
20
21
struct kvm_pgtable pkvm_pgtable;
22
hyp_spinlock_t pkvm_pgd_lock;
23
24
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
25
unsigned int hyp_memblock_nr;
26
27
static u64 __io_map_base;
28
29
struct hyp_fixmap_slot {
30
u64 addr;
31
kvm_pte_t *ptep;
32
};
33
static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
34
35
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
36
unsigned long phys, enum kvm_pgtable_prot prot)
37
{
38
int err;
39
40
hyp_spin_lock(&pkvm_pgd_lock);
41
err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
42
hyp_spin_unlock(&pkvm_pgd_lock);
43
44
return err;
45
}
46
47
static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
48
{
49
unsigned long cur;
50
51
hyp_assert_lock_held(&pkvm_pgd_lock);
52
53
if (!start || start < __io_map_base)
54
return -EINVAL;
55
56
/* The allocated size is always a multiple of PAGE_SIZE */
57
cur = start + PAGE_ALIGN(size);
58
59
/* Are we overflowing on the vmemmap ? */
60
if (cur > __hyp_vmemmap)
61
return -ENOMEM;
62
63
__io_map_base = cur;
64
65
return 0;
66
}
67
68
/**
69
* pkvm_alloc_private_va_range - Allocates a private VA range.
70
* @size: The size of the VA range to reserve.
71
* @haddr: The hypervisor virtual start address of the allocation.
72
*
73
* The private virtual address (VA) range is allocated above __io_map_base
74
* and aligned based on the order of @size.
75
*
76
* Return: 0 on success or negative error code on failure.
77
*/
78
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
79
{
80
unsigned long addr;
81
int ret;
82
83
hyp_spin_lock(&pkvm_pgd_lock);
84
addr = __io_map_base;
85
ret = __pkvm_alloc_private_va_range(addr, size);
86
hyp_spin_unlock(&pkvm_pgd_lock);
87
88
*haddr = addr;
89
90
return ret;
91
}
92
93
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
94
enum kvm_pgtable_prot prot,
95
unsigned long *haddr)
96
{
97
unsigned long addr;
98
int err;
99
100
size = PAGE_ALIGN(size + offset_in_page(phys));
101
err = pkvm_alloc_private_va_range(size, &addr);
102
if (err)
103
return err;
104
105
err = __pkvm_create_mappings(addr, size, phys, prot);
106
if (err)
107
return err;
108
109
*haddr = addr + offset_in_page(phys);
110
return err;
111
}
112
113
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
114
{
115
unsigned long start = (unsigned long)from;
116
unsigned long end = (unsigned long)to;
117
unsigned long virt_addr;
118
phys_addr_t phys;
119
120
hyp_assert_lock_held(&pkvm_pgd_lock);
121
122
start = start & PAGE_MASK;
123
end = PAGE_ALIGN(end);
124
125
for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
126
int err;
127
128
phys = hyp_virt_to_phys((void *)virt_addr);
129
err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
130
phys, prot);
131
if (err)
132
return err;
133
}
134
135
return 0;
136
}
137
138
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
139
{
140
int ret;
141
142
hyp_spin_lock(&pkvm_pgd_lock);
143
ret = pkvm_create_mappings_locked(from, to, prot);
144
hyp_spin_unlock(&pkvm_pgd_lock);
145
146
return ret;
147
}
148
149
int hyp_back_vmemmap(phys_addr_t back)
150
{
151
unsigned long i, start, size, end = 0;
152
int ret;
153
154
for (i = 0; i < hyp_memblock_nr; i++) {
155
start = hyp_memory[i].base;
156
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
157
/*
158
* The beginning of the hyp_vmemmap region for the current
159
* memblock may already be backed by the page backing the end
160
* the previous region, so avoid mapping it twice.
161
*/
162
start = max(start, end);
163
164
end = hyp_memory[i].base + hyp_memory[i].size;
165
end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
166
if (start >= end)
167
continue;
168
169
size = end - start;
170
ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
171
if (ret)
172
return ret;
173
174
memset(hyp_phys_to_virt(back), 0, size);
175
back += size;
176
}
177
178
return 0;
179
}
180
181
static void *__hyp_bp_vect_base;
182
int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
183
{
184
void *vector;
185
186
switch (slot) {
187
case HYP_VECTOR_DIRECT: {
188
vector = __kvm_hyp_vector;
189
break;
190
}
191
case HYP_VECTOR_SPECTRE_DIRECT: {
192
vector = __bp_harden_hyp_vecs;
193
break;
194
}
195
case HYP_VECTOR_INDIRECT:
196
case HYP_VECTOR_SPECTRE_INDIRECT: {
197
vector = (void *)__hyp_bp_vect_base;
198
break;
199
}
200
default:
201
return -EINVAL;
202
}
203
204
vector = __kvm_vector_slot2addr(vector, slot);
205
*this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
206
207
return 0;
208
}
209
210
int hyp_map_vectors(void)
211
{
212
phys_addr_t phys;
213
unsigned long bp_base;
214
int ret;
215
216
if (!kvm_system_needs_idmapped_vectors()) {
217
__hyp_bp_vect_base = __bp_harden_hyp_vecs;
218
return 0;
219
}
220
221
phys = __hyp_pa(__bp_harden_hyp_vecs);
222
ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
223
PAGE_HYP_EXEC, &bp_base);
224
if (ret)
225
return ret;
226
227
__hyp_bp_vect_base = (void *)bp_base;
228
229
return 0;
230
}
231
232
static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys)
233
{
234
kvm_pte_t pte, *ptep = slot->ptep;
235
236
pte = *ptep;
237
pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
238
pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
239
WRITE_ONCE(*ptep, pte);
240
dsb(ishst);
241
242
return (void *)slot->addr;
243
}
244
245
void *hyp_fixmap_map(phys_addr_t phys)
246
{
247
return fixmap_map_slot(this_cpu_ptr(&fixmap_slots), phys);
248
}
249
250
static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
251
{
252
kvm_pte_t *ptep = slot->ptep;
253
u64 addr = slot->addr;
254
u32 level;
255
256
if (FIELD_GET(KVM_PTE_TYPE, *ptep) == KVM_PTE_TYPE_PAGE)
257
level = KVM_PGTABLE_LAST_LEVEL;
258
else
259
level = KVM_PGTABLE_LAST_LEVEL - 1; /* create_fixblock() guarantees PMD level */
260
261
WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
262
263
/*
264
* Irritatingly, the architecture requires that we use inner-shareable
265
* broadcast TLB invalidation here in case another CPU speculates
266
* through our fixmap and decides to create an "amalagamation of the
267
* values held in the TLB" due to the apparent lack of a
268
* break-before-make sequence.
269
*
270
* https://lore.kernel.org/kvm/[email protected]/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
271
*/
272
dsb(ishst);
273
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
274
dsb(ish);
275
isb();
276
}
277
278
void hyp_fixmap_unmap(void)
279
{
280
fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
281
}
282
283
static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
284
enum kvm_pgtable_walk_flags visit)
285
{
286
struct hyp_fixmap_slot *slot = (struct hyp_fixmap_slot *)ctx->arg;
287
288
if (!kvm_pte_valid(ctx->old) || (ctx->end - ctx->start) != kvm_granule_size(ctx->level))
289
return -EINVAL;
290
291
slot->addr = ctx->addr;
292
slot->ptep = ctx->ptep;
293
294
/*
295
* Clear the PTE, but keep the page-table page refcount elevated to
296
* prevent it from ever being freed. This lets us manipulate the PTEs
297
* by hand safely without ever needing to allocate memory.
298
*/
299
fixmap_clear_slot(slot);
300
301
return 0;
302
}
303
304
static int create_fixmap_slot(u64 addr, u64 cpu)
305
{
306
struct kvm_pgtable_walker walker = {
307
.cb = __create_fixmap_slot_cb,
308
.flags = KVM_PGTABLE_WALK_LEAF,
309
.arg = per_cpu_ptr(&fixmap_slots, cpu),
310
};
311
312
return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
313
}
314
315
#if PAGE_SHIFT < 16
316
#define HAS_FIXBLOCK
317
static struct hyp_fixmap_slot hyp_fixblock_slot;
318
static DEFINE_HYP_SPINLOCK(hyp_fixblock_lock);
319
#endif
320
321
static int create_fixblock(void)
322
{
323
#ifdef HAS_FIXBLOCK
324
struct kvm_pgtable_walker walker = {
325
.cb = __create_fixmap_slot_cb,
326
.flags = KVM_PGTABLE_WALK_LEAF,
327
.arg = &hyp_fixblock_slot,
328
};
329
unsigned long addr;
330
phys_addr_t phys;
331
int ret, i;
332
333
/* Find a RAM phys address, PMD aligned */
334
for (i = 0; i < hyp_memblock_nr; i++) {
335
phys = ALIGN(hyp_memory[i].base, PMD_SIZE);
336
if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size))
337
break;
338
}
339
340
if (i >= hyp_memblock_nr)
341
return -EINVAL;
342
343
hyp_spin_lock(&pkvm_pgd_lock);
344
addr = ALIGN(__io_map_base, PMD_SIZE);
345
ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE);
346
if (ret)
347
goto unlock;
348
349
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP);
350
if (ret)
351
goto unlock;
352
353
ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker);
354
355
unlock:
356
hyp_spin_unlock(&pkvm_pgd_lock);
357
358
return ret;
359
#else
360
return 0;
361
#endif
362
}
363
364
void *hyp_fixblock_map(phys_addr_t phys, size_t *size)
365
{
366
#ifdef HAS_FIXBLOCK
367
*size = PMD_SIZE;
368
hyp_spin_lock(&hyp_fixblock_lock);
369
return fixmap_map_slot(&hyp_fixblock_slot, phys);
370
#else
371
*size = PAGE_SIZE;
372
return hyp_fixmap_map(phys);
373
#endif
374
}
375
376
void hyp_fixblock_unmap(void)
377
{
378
#ifdef HAS_FIXBLOCK
379
fixmap_clear_slot(&hyp_fixblock_slot);
380
hyp_spin_unlock(&hyp_fixblock_lock);
381
#else
382
hyp_fixmap_unmap();
383
#endif
384
}
385
386
int hyp_create_fixmap(void)
387
{
388
unsigned long addr, i;
389
int ret;
390
391
for (i = 0; i < hyp_nr_cpus; i++) {
392
ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
393
if (ret)
394
return ret;
395
396
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
397
__hyp_pa(__hyp_bss_start), PAGE_HYP);
398
if (ret)
399
return ret;
400
401
ret = create_fixmap_slot(addr, i);
402
if (ret)
403
return ret;
404
}
405
406
return create_fixblock();
407
}
408
409
int hyp_create_idmap(u32 hyp_va_bits)
410
{
411
unsigned long start, end;
412
413
start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
414
start = ALIGN_DOWN(start, PAGE_SIZE);
415
416
end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
417
end = ALIGN(end, PAGE_SIZE);
418
419
/*
420
* One half of the VA space is reserved to linearly map portions of
421
* memory -- see va_layout.c for more details. The other half of the VA
422
* space contains the trampoline page, and needs some care. Split that
423
* second half in two and find the quarter of VA space not conflicting
424
* with the idmap to place the IOs and the vmemmap. IOs use the lower
425
* half of the quarter and the vmemmap the upper half.
426
*/
427
__io_map_base = start & BIT(hyp_va_bits - 2);
428
__io_map_base ^= BIT(hyp_va_bits - 2);
429
__hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
430
431
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
432
}
433
434
int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
435
{
436
unsigned long addr, prev_base;
437
size_t size;
438
int ret;
439
440
hyp_spin_lock(&pkvm_pgd_lock);
441
442
prev_base = __io_map_base;
443
/*
444
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
445
* an alignment of our allocation on the order of the size.
446
*/
447
size = NVHE_STACK_SIZE * 2;
448
addr = ALIGN(__io_map_base, size);
449
450
ret = __pkvm_alloc_private_va_range(addr, size);
451
if (!ret) {
452
/*
453
* Since the stack grows downwards, map the stack to the page
454
* at the higher address and leave the lower guard page
455
* unbacked.
456
*
457
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
458
* and addresses corresponding to the guard page have the
459
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
460
*/
461
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
462
NVHE_STACK_SIZE, phys, PAGE_HYP);
463
if (ret)
464
__io_map_base = prev_base;
465
}
466
hyp_spin_unlock(&pkvm_pgd_lock);
467
468
*haddr = addr + size;
469
470
return ret;
471
}
472
473
static void *admit_host_page(void *arg)
474
{
475
struct kvm_hyp_memcache *host_mc = arg;
476
477
if (!host_mc->nr_pages)
478
return NULL;
479
480
/*
481
* The host still owns the pages in its memcache, so we need to go
482
* through a full host-to-hyp donation cycle to change it. Fortunately,
483
* __pkvm_host_donate_hyp() takes care of races for us, so if it
484
* succeeds we're good to go.
485
*/
486
if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
487
return NULL;
488
489
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
490
}
491
492
/* Refill our local memcache by popping pages from the one provided by the host. */
493
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
494
struct kvm_hyp_memcache *host_mc)
495
{
496
struct kvm_hyp_memcache tmp = *host_mc;
497
int ret;
498
499
ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
500
hyp_virt_to_phys, &tmp);
501
*host_mc = tmp;
502
503
return ret;
504
}
505
506