Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/virt/kvm/pfncache.c
26288 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Kernel-based Virtual Machine driver for Linux
4
*
5
* This module enables kernel and guest-mode vCPU access to guest physical
6
* memory with suitable invalidation mechanisms.
7
*
8
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
9
*
10
* Authors:
11
* David Woodhouse <[email protected]>
12
*/
13
14
#include <linux/kvm_host.h>
15
#include <linux/kvm.h>
16
#include <linux/highmem.h>
17
#include <linux/module.h>
18
#include <linux/errno.h>
19
20
#include "kvm_mm.h"
21
22
/*
23
* MMU notifier 'invalidate_range_start' hook.
24
*/
25
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26
unsigned long end)
27
{
28
struct gfn_to_pfn_cache *gpc;
29
30
spin_lock(&kvm->gpc_lock);
31
list_for_each_entry(gpc, &kvm->gpc_list, list) {
32
read_lock_irq(&gpc->lock);
33
34
/* Only a single page so no need to care about length */
35
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
36
gpc->uhva >= start && gpc->uhva < end) {
37
read_unlock_irq(&gpc->lock);
38
39
/*
40
* There is a small window here where the cache could
41
* be modified, and invalidation would no longer be
42
* necessary. Hence check again whether invalidation
43
* is still necessary once the write lock has been
44
* acquired.
45
*/
46
47
write_lock_irq(&gpc->lock);
48
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
49
gpc->uhva >= start && gpc->uhva < end)
50
gpc->valid = false;
51
write_unlock_irq(&gpc->lock);
52
continue;
53
}
54
55
read_unlock_irq(&gpc->lock);
56
}
57
spin_unlock(&kvm->gpc_lock);
58
}
59
60
static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
61
unsigned long len)
62
{
63
unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
64
offset_in_page(gpa);
65
66
/*
67
* The cached access must fit within a single page. The 'len' argument
68
* to activate() and refresh() exists only to enforce that.
69
*/
70
return offset + len <= PAGE_SIZE;
71
}
72
73
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
74
{
75
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
76
77
if (!gpc->active)
78
return false;
79
80
/*
81
* If the page was cached from a memslot, make sure the memslots have
82
* not been re-configured.
83
*/
84
if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
85
return false;
86
87
if (kvm_is_error_hva(gpc->uhva))
88
return false;
89
90
if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
91
return false;
92
93
if (!gpc->valid)
94
return false;
95
96
return true;
97
}
98
99
static void *gpc_map(kvm_pfn_t pfn)
100
{
101
if (pfn_valid(pfn))
102
return kmap(pfn_to_page(pfn));
103
104
#ifdef CONFIG_HAS_IOMEM
105
return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
106
#else
107
return NULL;
108
#endif
109
}
110
111
static void gpc_unmap(kvm_pfn_t pfn, void *khva)
112
{
113
/* Unmap the old pfn/page if it was mapped before. */
114
if (is_error_noslot_pfn(pfn) || !khva)
115
return;
116
117
if (pfn_valid(pfn)) {
118
kunmap(pfn_to_page(pfn));
119
return;
120
}
121
122
#ifdef CONFIG_HAS_IOMEM
123
memunmap(khva);
124
#endif
125
}
126
127
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
128
{
129
/*
130
* mn_active_invalidate_count acts for all intents and purposes
131
* like mmu_invalidate_in_progress here; but the latter cannot
132
* be used here because the invalidation of caches in the
133
* mmu_notifier event occurs _before_ mmu_invalidate_in_progress
134
* is elevated.
135
*
136
* Note, it does not matter that mn_active_invalidate_count
137
* is not protected by gpc->lock. It is guaranteed to
138
* be elevated before the mmu_notifier acquires gpc->lock, and
139
* isn't dropped until after mmu_invalidate_seq is updated.
140
*/
141
if (kvm->mn_active_invalidate_count)
142
return true;
143
144
/*
145
* Ensure mn_active_invalidate_count is read before
146
* mmu_invalidate_seq. This pairs with the smp_wmb() in
147
* mmu_notifier_invalidate_range_end() to guarantee either the
148
* old (non-zero) value of mn_active_invalidate_count or the
149
* new (incremented) value of mmu_invalidate_seq is observed.
150
*/
151
smp_rmb();
152
return kvm->mmu_invalidate_seq != mmu_seq;
153
}
154
155
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
156
{
157
/* Note, the new page offset may be different than the old! */
158
void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
159
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
160
void *new_khva = NULL;
161
unsigned long mmu_seq;
162
struct page *page;
163
164
struct kvm_follow_pfn kfp = {
165
.slot = gpc->memslot,
166
.gfn = gpa_to_gfn(gpc->gpa),
167
.flags = FOLL_WRITE,
168
.hva = gpc->uhva,
169
.refcounted_page = &page,
170
};
171
172
lockdep_assert_held(&gpc->refresh_lock);
173
174
lockdep_assert_held_write(&gpc->lock);
175
176
/*
177
* Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
178
* assets have already been updated and so a concurrent check() from a
179
* different task may not fail the gpa/uhva/generation checks.
180
*/
181
gpc->valid = false;
182
183
do {
184
mmu_seq = gpc->kvm->mmu_invalidate_seq;
185
smp_rmb();
186
187
write_unlock_irq(&gpc->lock);
188
189
/*
190
* If the previous iteration "failed" due to an mmu_notifier
191
* event, release the pfn and unmap the kernel virtual address
192
* from the previous attempt. Unmapping might sleep, so this
193
* needs to be done after dropping the lock. Opportunistically
194
* check for resched while the lock isn't held.
195
*/
196
if (new_pfn != KVM_PFN_ERR_FAULT) {
197
/*
198
* Keep the mapping if the previous iteration reused
199
* the existing mapping and didn't create a new one.
200
*/
201
if (new_khva != old_khva)
202
gpc_unmap(new_pfn, new_khva);
203
204
kvm_release_page_unused(page);
205
206
cond_resched();
207
}
208
209
new_pfn = hva_to_pfn(&kfp);
210
if (is_error_noslot_pfn(new_pfn))
211
goto out_error;
212
213
/*
214
* Obtain a new kernel mapping if KVM itself will access the
215
* pfn. Note, kmap() and memremap() can both sleep, so this
216
* too must be done outside of gpc->lock!
217
*/
218
if (new_pfn == gpc->pfn)
219
new_khva = old_khva;
220
else
221
new_khva = gpc_map(new_pfn);
222
223
if (!new_khva) {
224
kvm_release_page_unused(page);
225
goto out_error;
226
}
227
228
write_lock_irq(&gpc->lock);
229
230
/*
231
* Other tasks must wait for _this_ refresh to complete before
232
* attempting to refresh.
233
*/
234
WARN_ON_ONCE(gpc->valid);
235
} while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
236
237
gpc->valid = true;
238
gpc->pfn = new_pfn;
239
gpc->khva = new_khva + offset_in_page(gpc->uhva);
240
241
/*
242
* Put the reference to the _new_ page. The page is now tracked by the
243
* cache and can be safely migrated, swapped, etc... as the cache will
244
* invalidate any mappings in response to relevant mmu_notifier events.
245
*/
246
kvm_release_page_clean(page);
247
248
return 0;
249
250
out_error:
251
write_lock_irq(&gpc->lock);
252
253
return -EFAULT;
254
}
255
256
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
257
{
258
unsigned long page_offset;
259
bool unmap_old = false;
260
unsigned long old_uhva;
261
kvm_pfn_t old_pfn;
262
bool hva_change = false;
263
void *old_khva;
264
int ret;
265
266
/* Either gpa or uhva must be valid, but not both */
267
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
268
return -EINVAL;
269
270
lockdep_assert_held(&gpc->refresh_lock);
271
272
write_lock_irq(&gpc->lock);
273
274
if (!gpc->active) {
275
ret = -EINVAL;
276
goto out_unlock;
277
}
278
279
old_pfn = gpc->pfn;
280
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
281
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
282
283
if (kvm_is_error_gpa(gpa)) {
284
page_offset = offset_in_page(uhva);
285
286
gpc->gpa = INVALID_GPA;
287
gpc->memslot = NULL;
288
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
289
290
if (gpc->uhva != old_uhva)
291
hva_change = true;
292
} else {
293
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
294
295
page_offset = offset_in_page(gpa);
296
297
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
298
kvm_is_error_hva(gpc->uhva)) {
299
gfn_t gfn = gpa_to_gfn(gpa);
300
301
gpc->gpa = gpa;
302
gpc->generation = slots->generation;
303
gpc->memslot = __gfn_to_memslot(slots, gfn);
304
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
305
306
if (kvm_is_error_hva(gpc->uhva)) {
307
ret = -EFAULT;
308
goto out;
309
}
310
311
/*
312
* Even if the GPA and/or the memslot generation changed, the
313
* HVA may still be the same.
314
*/
315
if (gpc->uhva != old_uhva)
316
hva_change = true;
317
} else {
318
gpc->uhva = old_uhva;
319
}
320
}
321
322
/* Note: the offset must be correct before calling hva_to_pfn_retry() */
323
gpc->uhva += page_offset;
324
325
/*
326
* If the userspace HVA changed or the PFN was already invalid,
327
* drop the lock and do the HVA to PFN lookup again.
328
*/
329
if (!gpc->valid || hva_change) {
330
ret = hva_to_pfn_retry(gpc);
331
} else {
332
/*
333
* If the HVA→PFN mapping was already valid, don't unmap it.
334
* But do update gpc->khva because the offset within the page
335
* may have changed.
336
*/
337
gpc->khva = old_khva + page_offset;
338
ret = 0;
339
goto out_unlock;
340
}
341
342
out:
343
/*
344
* Invalidate the cache and purge the pfn/khva if the refresh failed.
345
* Some/all of the uhva, gpa, and memslot generation info may still be
346
* valid, leave it as is.
347
*/
348
if (ret) {
349
gpc->valid = false;
350
gpc->pfn = KVM_PFN_ERR_FAULT;
351
gpc->khva = NULL;
352
}
353
354
/* Detect a pfn change before dropping the lock! */
355
unmap_old = (old_pfn != gpc->pfn);
356
357
out_unlock:
358
write_unlock_irq(&gpc->lock);
359
360
if (unmap_old)
361
gpc_unmap(old_pfn, old_khva);
362
363
return ret;
364
}
365
366
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
367
{
368
unsigned long uhva;
369
370
guard(mutex)(&gpc->refresh_lock);
371
372
if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
373
return -EINVAL;
374
375
/*
376
* If the GPA is valid then ignore the HVA, as a cache can be GPA-based
377
* or HVA-based, not both. For GPA-based caches, the HVA will be
378
* recomputed during refresh if necessary.
379
*/
380
uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
381
382
return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
383
}
384
385
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
386
{
387
rwlock_init(&gpc->lock);
388
mutex_init(&gpc->refresh_lock);
389
390
gpc->kvm = kvm;
391
gpc->pfn = KVM_PFN_ERR_FAULT;
392
gpc->gpa = INVALID_GPA;
393
gpc->uhva = KVM_HVA_ERR_BAD;
394
gpc->active = gpc->valid = false;
395
}
396
397
static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
398
unsigned long len)
399
{
400
struct kvm *kvm = gpc->kvm;
401
402
if (!kvm_gpc_is_valid_len(gpa, uhva, len))
403
return -EINVAL;
404
405
guard(mutex)(&gpc->refresh_lock);
406
407
if (!gpc->active) {
408
if (KVM_BUG_ON(gpc->valid, kvm))
409
return -EIO;
410
411
spin_lock(&kvm->gpc_lock);
412
list_add(&gpc->list, &kvm->gpc_list);
413
spin_unlock(&kvm->gpc_lock);
414
415
/*
416
* Activate the cache after adding it to the list, a concurrent
417
* refresh must not establish a mapping until the cache is
418
* reachable by mmu_notifier events.
419
*/
420
write_lock_irq(&gpc->lock);
421
gpc->active = true;
422
write_unlock_irq(&gpc->lock);
423
}
424
return __kvm_gpc_refresh(gpc, gpa, uhva);
425
}
426
427
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
428
{
429
/*
430
* Explicitly disallow INVALID_GPA so that the magic value can be used
431
* by KVM to differentiate between GPA-based and HVA-based caches.
432
*/
433
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
434
return -EINVAL;
435
436
return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
437
}
438
439
int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
440
{
441
if (!access_ok((void __user *)uhva, len))
442
return -EINVAL;
443
444
return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
445
}
446
447
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
448
{
449
struct kvm *kvm = gpc->kvm;
450
kvm_pfn_t old_pfn;
451
void *old_khva;
452
453
guard(mutex)(&gpc->refresh_lock);
454
455
if (gpc->active) {
456
/*
457
* Deactivate the cache before removing it from the list, KVM
458
* must stall mmu_notifier events until all users go away, i.e.
459
* until gpc->lock is dropped and refresh is guaranteed to fail.
460
*/
461
write_lock_irq(&gpc->lock);
462
gpc->active = false;
463
gpc->valid = false;
464
465
/*
466
* Leave the GPA => uHVA cache intact, it's protected by the
467
* memslot generation. The PFN lookup needs to be redone every
468
* time as mmu_notifier protection is lost when the cache is
469
* removed from the VM's gpc_list.
470
*/
471
old_khva = gpc->khva - offset_in_page(gpc->khva);
472
gpc->khva = NULL;
473
474
old_pfn = gpc->pfn;
475
gpc->pfn = KVM_PFN_ERR_FAULT;
476
write_unlock_irq(&gpc->lock);
477
478
spin_lock(&kvm->gpc_lock);
479
list_del(&gpc->list);
480
spin_unlock(&kvm->gpc_lock);
481
482
gpc_unmap(old_pfn, old_khva);
483
}
484
}
485
486