Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/mmu/paging_tmpl.h
26481 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Kernel-based Virtual Machine driver for Linux
4
*
5
* This module enables machines with Intel VT-x extensions to run virtual
6
* machines without emulation or binary translation.
7
*
8
* MMU support
9
*
10
* Copyright (C) 2006 Qumranet, Inc.
11
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
12
*
13
* Authors:
14
* Yaniv Kamay <[email protected]>
15
* Avi Kivity <[email protected]>
16
*/
17
18
/*
19
* The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
20
* as well as guest EPT tables, so the code in this file is compiled thrice,
21
* once per guest PTE type. The per-type defines are #undef'd at the end.
22
*/
23
24
#if PTTYPE == 64
25
#define pt_element_t u64
26
#define guest_walker guest_walker64
27
#define FNAME(name) paging##64_##name
28
#define PT_LEVEL_BITS 9
29
#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
30
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
31
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
32
#ifdef CONFIG_X86_64
33
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
34
#else
35
#define PT_MAX_FULL_LEVELS 2
36
#endif
37
#elif PTTYPE == 32
38
#define pt_element_t u32
39
#define guest_walker guest_walker32
40
#define FNAME(name) paging##32_##name
41
#define PT_LEVEL_BITS 10
42
#define PT_MAX_FULL_LEVELS 2
43
#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
44
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
45
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
46
47
#define PT32_DIR_PSE36_SIZE 4
48
#define PT32_DIR_PSE36_SHIFT 13
49
#define PT32_DIR_PSE36_MASK \
50
(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
51
#elif PTTYPE == PTTYPE_EPT
52
#define pt_element_t u64
53
#define guest_walker guest_walkerEPT
54
#define FNAME(name) ept_##name
55
#define PT_LEVEL_BITS 9
56
#define PT_GUEST_DIRTY_SHIFT 9
57
#define PT_GUEST_ACCESSED_SHIFT 8
58
#define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
59
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
60
#else
61
#error Invalid PTTYPE value
62
#endif
63
64
/* Common logic, but per-type values. These also need to be undefined. */
65
#define PT_BASE_ADDR_MASK ((pt_element_t)__PT_BASE_ADDR_MASK)
66
#define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
67
#define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
68
#define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS)
69
70
#define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
71
#define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
72
73
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
74
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
75
76
/*
77
* The guest_walker structure emulates the behavior of the hardware page
78
* table walker.
79
*/
80
struct guest_walker {
81
int level;
82
unsigned max_level;
83
gfn_t table_gfn[PT_MAX_FULL_LEVELS];
84
pt_element_t ptes[PT_MAX_FULL_LEVELS];
85
pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
86
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
87
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
88
bool pte_writable[PT_MAX_FULL_LEVELS];
89
unsigned int pt_access[PT_MAX_FULL_LEVELS];
90
unsigned int pte_access;
91
gfn_t gfn;
92
struct x86_exception fault;
93
};
94
95
#if PTTYPE == 32
96
static inline gfn_t pse36_gfn_delta(u32 gpte)
97
{
98
int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
99
100
return (gpte & PT32_DIR_PSE36_MASK) << shift;
101
}
102
#endif
103
104
static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
105
{
106
return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
107
}
108
109
static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
110
unsigned gpte)
111
{
112
unsigned mask;
113
114
/* dirty bit is not supported, so no need to track it */
115
if (!PT_HAVE_ACCESSED_DIRTY(mmu))
116
return;
117
118
BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
119
120
mask = (unsigned)~ACC_WRITE_MASK;
121
/* Allow write access to dirty gptes */
122
mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
123
PT_WRITABLE_MASK;
124
*access &= mask;
125
}
126
127
static inline int FNAME(is_present_gpte)(unsigned long pte)
128
{
129
#if PTTYPE != PTTYPE_EPT
130
return pte & PT_PRESENT_MASK;
131
#else
132
return pte & 7;
133
#endif
134
}
135
136
static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
137
{
138
#if PTTYPE != PTTYPE_EPT
139
return false;
140
#else
141
return __is_bad_mt_xwr(rsvd_check, gpte);
142
#endif
143
}
144
145
static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
146
{
147
return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
148
FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
149
}
150
151
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
152
struct kvm_mmu_page *sp, u64 *spte,
153
u64 gpte)
154
{
155
if (!FNAME(is_present_gpte)(gpte))
156
goto no_present;
157
158
/* Prefetch only accessed entries (unless A/D bits are disabled). */
159
if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
160
!(gpte & PT_GUEST_ACCESSED_MASK))
161
goto no_present;
162
163
if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
164
goto no_present;
165
166
return false;
167
168
no_present:
169
drop_spte(vcpu->kvm, spte);
170
return true;
171
}
172
173
/*
174
* For PTTYPE_EPT, a page table can be executable but not readable
175
* on supported processors. Therefore, set_spte does not automatically
176
* set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
177
* to signify readability since it isn't used in the EPT case
178
*/
179
static inline unsigned FNAME(gpte_access)(u64 gpte)
180
{
181
unsigned access;
182
#if PTTYPE == PTTYPE_EPT
183
access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
184
((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
185
((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
186
#else
187
BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
188
BUILD_BUG_ON(ACC_EXEC_MASK != 1);
189
access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
190
/* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
191
access ^= (gpte >> PT64_NX_SHIFT);
192
#endif
193
194
return access;
195
}
196
197
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
198
struct kvm_mmu *mmu,
199
struct guest_walker *walker,
200
gpa_t addr, int write_fault)
201
{
202
unsigned level, index;
203
pt_element_t pte, orig_pte;
204
pt_element_t __user *ptep_user;
205
gfn_t table_gfn;
206
int ret;
207
208
/* dirty/accessed bits are not supported, so no need to update them */
209
if (!PT_HAVE_ACCESSED_DIRTY(mmu))
210
return 0;
211
212
for (level = walker->max_level; level >= walker->level; --level) {
213
pte = orig_pte = walker->ptes[level - 1];
214
table_gfn = walker->table_gfn[level - 1];
215
ptep_user = walker->ptep_user[level - 1];
216
index = offset_in_page(ptep_user) / sizeof(pt_element_t);
217
if (!(pte & PT_GUEST_ACCESSED_MASK)) {
218
trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
219
pte |= PT_GUEST_ACCESSED_MASK;
220
}
221
if (level == walker->level && write_fault &&
222
!(pte & PT_GUEST_DIRTY_MASK)) {
223
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
224
#if PTTYPE == PTTYPE_EPT
225
if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
226
return -EINVAL;
227
#endif
228
pte |= PT_GUEST_DIRTY_MASK;
229
}
230
if (pte == orig_pte)
231
continue;
232
233
/*
234
* If the slot is read-only, simply do not process the accessed
235
* and dirty bits. This is the correct thing to do if the slot
236
* is ROM, and page tables in read-as-ROM/write-as-MMIO slots
237
* are only supported if the accessed and dirty bits are already
238
* set in the ROM (so that MMIO writes are never needed).
239
*
240
* Note that NPT does not allow this at all and faults, since
241
* it always wants nested page table entries for the guest
242
* page tables to be writable. And EPT works but will simply
243
* overwrite the read-only memory to set the accessed and dirty
244
* bits.
245
*/
246
if (unlikely(!walker->pte_writable[level - 1]))
247
continue;
248
249
ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
250
if (ret)
251
return ret;
252
253
kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
254
walker->ptes[level - 1] = pte;
255
}
256
return 0;
257
}
258
259
static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
260
{
261
unsigned pkeys = 0;
262
#if PTTYPE == 64
263
pte_t pte = {.pte = gpte};
264
265
pkeys = pte_flags_pkey(pte_flags(pte));
266
#endif
267
return pkeys;
268
}
269
270
static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
271
unsigned int level, unsigned int gpte)
272
{
273
/*
274
* For EPT and PAE paging (both variants), bit 7 is either reserved at
275
* all level or indicates a huge page (ignoring CR3/EPTP). In either
276
* case, bit 7 being set terminates the walk.
277
*/
278
#if PTTYPE == 32
279
/*
280
* 32-bit paging requires special handling because bit 7 is ignored if
281
* CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
282
* greater than the last level for which bit 7 is the PAGE_SIZE bit.
283
*
284
* The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
285
* is not reserved and does not indicate a large page at this level,
286
* so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
287
*/
288
gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_role.ext.cr4_pse);
289
#endif
290
/*
291
* PG_LEVEL_4K always terminates. The RHS has bit 7 set
292
* iff level <= PG_LEVEL_4K, which for our purpose means
293
* level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
294
*/
295
gpte |= level - PG_LEVEL_4K - 1;
296
297
return gpte & PT_PAGE_SIZE_MASK;
298
}
299
/*
300
* Fetch a guest pte for a guest virtual address, or for an L2's GPA.
301
*/
302
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
303
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
304
gpa_t addr, u64 access)
305
{
306
int ret;
307
pt_element_t pte;
308
pt_element_t __user *ptep_user;
309
gfn_t table_gfn;
310
u64 pt_access, pte_access;
311
unsigned index, accessed_dirty, pte_pkey;
312
u64 nested_access;
313
gpa_t pte_gpa;
314
bool have_ad;
315
int offset;
316
u64 walk_nx_mask = 0;
317
const int write_fault = access & PFERR_WRITE_MASK;
318
const int user_fault = access & PFERR_USER_MASK;
319
const int fetch_fault = access & PFERR_FETCH_MASK;
320
u16 errcode = 0;
321
gpa_t real_gpa;
322
gfn_t gfn;
323
324
trace_kvm_mmu_pagetable_walk(addr, access);
325
retry_walk:
326
walker->level = mmu->cpu_role.base.level;
327
pte = kvm_mmu_get_guest_pgd(vcpu, mmu);
328
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
329
330
#if PTTYPE == 64
331
walk_nx_mask = 1ULL << PT64_NX_SHIFT;
332
if (walker->level == PT32E_ROOT_LEVEL) {
333
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
334
trace_kvm_mmu_paging_element(pte, walker->level);
335
if (!FNAME(is_present_gpte)(pte))
336
goto error;
337
--walker->level;
338
}
339
#endif
340
walker->max_level = walker->level;
341
342
/*
343
* FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
344
* by the MOV to CR instruction are treated as reads and do not cause the
345
* processor to set the dirty flag in any EPT paging-structure entry.
346
*/
347
nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
348
349
pte_access = ~0;
350
351
/*
352
* Queue a page fault for injection if this assertion fails, as callers
353
* assume that walker.fault contains sane info on a walk failure. I.e.
354
* avoid making the situation worse by inducing even worse badness
355
* between when the assertion fails and when KVM kicks the vCPU out to
356
* userspace (because the VM is bugged).
357
*/
358
if (KVM_BUG_ON(is_long_mode(vcpu) && !is_pae(vcpu), vcpu->kvm))
359
goto error;
360
361
++walker->level;
362
363
do {
364
struct kvm_memory_slot *slot;
365
unsigned long host_addr;
366
367
pt_access = pte_access;
368
--walker->level;
369
370
index = PT_INDEX(addr, walker->level);
371
table_gfn = gpte_to_gfn(pte);
372
offset = index * sizeof(pt_element_t);
373
pte_gpa = gfn_to_gpa(table_gfn) + offset;
374
375
BUG_ON(walker->level < 1);
376
walker->table_gfn[walker->level - 1] = table_gfn;
377
walker->pte_gpa[walker->level - 1] = pte_gpa;
378
379
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
380
nested_access, &walker->fault);
381
382
/*
383
* FIXME: This can happen if emulation (for of an INS/OUTS
384
* instruction) triggers a nested page fault. The exit
385
* qualification / exit info field will incorrectly have
386
* "guest page access" as the nested page fault's cause,
387
* instead of "guest page structure access". To fix this,
388
* the x86_exception struct should be augmented with enough
389
* information to fix the exit_qualification or exit_info_1
390
* fields.
391
*/
392
if (unlikely(real_gpa == INVALID_GPA))
393
return 0;
394
395
slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(real_gpa));
396
if (!kvm_is_visible_memslot(slot))
397
goto error;
398
399
host_addr = gfn_to_hva_memslot_prot(slot, gpa_to_gfn(real_gpa),
400
&walker->pte_writable[walker->level - 1]);
401
if (unlikely(kvm_is_error_hva(host_addr)))
402
goto error;
403
404
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
405
if (unlikely(__get_user(pte, ptep_user)))
406
goto error;
407
walker->ptep_user[walker->level - 1] = ptep_user;
408
409
trace_kvm_mmu_paging_element(pte, walker->level);
410
411
/*
412
* Inverting the NX it lets us AND it like other
413
* permission bits.
414
*/
415
pte_access = pt_access & (pte ^ walk_nx_mask);
416
417
if (unlikely(!FNAME(is_present_gpte)(pte)))
418
goto error;
419
420
if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
421
errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
422
goto error;
423
}
424
425
walker->ptes[walker->level - 1] = pte;
426
427
/* Convert to ACC_*_MASK flags for struct guest_walker. */
428
walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
429
} while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
430
431
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
432
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
433
434
/* Convert to ACC_*_MASK flags for struct guest_walker. */
435
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
436
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
437
if (unlikely(errcode))
438
goto error;
439
440
gfn = gpte_to_gfn_lvl(pte, walker->level);
441
gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
442
443
#if PTTYPE == 32
444
if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
445
gfn += pse36_gfn_delta(pte);
446
#endif
447
448
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
449
if (real_gpa == INVALID_GPA)
450
return 0;
451
452
walker->gfn = real_gpa >> PAGE_SHIFT;
453
454
if (!write_fault)
455
FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
456
else
457
/*
458
* On a write fault, fold the dirty bit into accessed_dirty.
459
* For modes without A/D bits support accessed_dirty will be
460
* always clear.
461
*/
462
accessed_dirty &= pte >>
463
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
464
465
if (unlikely(!accessed_dirty)) {
466
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
467
addr, write_fault);
468
if (unlikely(ret < 0))
469
goto error;
470
else if (ret)
471
goto retry_walk;
472
}
473
474
return 1;
475
476
error:
477
errcode |= write_fault | user_fault;
478
if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
479
errcode |= PFERR_FETCH_MASK;
480
481
walker->fault.vector = PF_VECTOR;
482
walker->fault.error_code_valid = true;
483
walker->fault.error_code = errcode;
484
485
#if PTTYPE == PTTYPE_EPT
486
/*
487
* Use PFERR_RSVD_MASK in error_code to tell if EPT
488
* misconfiguration requires to be injected. The detection is
489
* done by is_rsvd_bits_set() above.
490
*
491
* We set up the value of exit_qualification to inject:
492
* [2:0] - Derive from the access bits. The exit_qualification might be
493
* out of date if it is serving an EPT misconfiguration.
494
* [5:3] - Calculated by the page walk of the guest EPT page tables
495
* [7:8] - Derived from [7:8] of real exit_qualification
496
*
497
* The other bits are set to 0.
498
*/
499
if (!(errcode & PFERR_RSVD_MASK)) {
500
walker->fault.exit_qualification = 0;
501
502
if (write_fault)
503
walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
504
if (user_fault)
505
walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ;
506
if (fetch_fault)
507
walker->fault.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
508
509
/*
510
* Note, pte_access holds the raw RWX bits from the EPTE, not
511
* ACC_*_MASK flags!
512
*/
513
walker->fault.exit_qualification |= EPT_VIOLATION_RWX_TO_PROT(pte_access);
514
}
515
#endif
516
walker->fault.address = addr;
517
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
518
walker->fault.async_page_fault = false;
519
520
trace_kvm_mmu_walker_error(walker->fault.error_code);
521
return 0;
522
}
523
524
static int FNAME(walk_addr)(struct guest_walker *walker,
525
struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
526
{
527
return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
528
access);
529
}
530
531
static bool
532
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
533
u64 *spte, pt_element_t gpte)
534
{
535
unsigned pte_access;
536
gfn_t gfn;
537
538
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
539
return false;
540
541
gfn = gpte_to_gfn(gpte);
542
pte_access = sp->role.access & FNAME(gpte_access)(gpte);
543
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
544
545
return kvm_mmu_prefetch_sptes(vcpu, gfn, spte, 1, pte_access);
546
}
547
548
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
549
struct guest_walker *gw, int level)
550
{
551
pt_element_t curr_pte;
552
gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
553
u64 mask;
554
int r, index;
555
556
if (level == PG_LEVEL_4K) {
557
mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
558
base_gpa = pte_gpa & ~mask;
559
index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
560
561
r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
562
gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
563
curr_pte = gw->prefetch_ptes[index];
564
} else
565
r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
566
&curr_pte, sizeof(curr_pte));
567
568
return r || curr_pte != gw->ptes[level - 1];
569
}
570
571
static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
572
u64 *sptep)
573
{
574
struct kvm_mmu_page *sp;
575
pt_element_t *gptep = gw->prefetch_ptes;
576
u64 *spte;
577
int i;
578
579
sp = sptep_to_sp(sptep);
580
581
if (sp->role.level > PG_LEVEL_4K)
582
return;
583
584
/*
585
* If addresses are being invalidated, skip prefetching to avoid
586
* accidentally prefetching those addresses.
587
*/
588
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
589
return;
590
591
if (sp->role.direct)
592
return __direct_pte_prefetch(vcpu, sp, sptep);
593
594
i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
595
spte = sp->spt + i;
596
597
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
598
if (spte == sptep)
599
continue;
600
601
if (is_shadow_present_pte(*spte))
602
continue;
603
604
if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i]))
605
break;
606
}
607
}
608
609
/*
610
* Fetch a shadow pte for a specific level in the paging hierarchy.
611
* If the guest tries to write a write-protected page, we need to
612
* emulate this operation, return 1 to indicate this case.
613
*/
614
static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
615
struct guest_walker *gw)
616
{
617
struct kvm_mmu_page *sp = NULL;
618
struct kvm_shadow_walk_iterator it;
619
unsigned int direct_access, access;
620
int top_level, ret;
621
gfn_t base_gfn = fault->gfn;
622
623
WARN_ON_ONCE(gw->gfn != base_gfn);
624
direct_access = gw->pte_access;
625
626
top_level = vcpu->arch.mmu->cpu_role.base.level;
627
if (top_level == PT32E_ROOT_LEVEL)
628
top_level = PT32_ROOT_LEVEL;
629
/*
630
* Verify that the top-level gpte is still there. Since the page
631
* is a root page, it is either write protected (and cannot be
632
* changed from now on) or it is invalid (in which case, we don't
633
* really care if it changes underneath us after this point).
634
*/
635
if (FNAME(gpte_changed)(vcpu, gw, top_level))
636
return RET_PF_RETRY;
637
638
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
639
return RET_PF_RETRY;
640
641
/*
642
* Load a new root and retry the faulting instruction in the extremely
643
* unlikely scenario that the guest root gfn became visible between
644
* loading a dummy root and handling the resulting page fault, e.g. if
645
* userspace create a memslot in the interim.
646
*/
647
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
648
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
649
return RET_PF_RETRY;
650
}
651
652
for_each_shadow_entry(vcpu, fault->addr, it) {
653
gfn_t table_gfn;
654
655
clear_sp_write_flooding_count(it.sptep);
656
if (it.level == gw->level)
657
break;
658
659
table_gfn = gw->table_gfn[it.level - 2];
660
access = gw->pt_access[it.level - 2];
661
sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
662
false, access);
663
664
/*
665
* Synchronize the new page before linking it, as the CPU (KVM)
666
* is architecturally disallowed from inserting non-present
667
* entries into the TLB, i.e. the guest isn't required to flush
668
* the TLB when changing the gPTE from non-present to present.
669
*
670
* For PG_LEVEL_4K, kvm_mmu_find_shadow_page() has already
671
* synchronized the page via kvm_sync_page().
672
*
673
* For higher level pages, which cannot be unsync themselves
674
* but can have unsync children, synchronize via the slower
675
* mmu_sync_children(). If KVM needs to drop mmu_lock due to
676
* contention or to reschedule, instruct the caller to retry
677
* the #PF (mmu_sync_children() ensures forward progress will
678
* be made).
679
*/
680
if (sp != ERR_PTR(-EEXIST) && sp->unsync_children &&
681
mmu_sync_children(vcpu, sp, false))
682
return RET_PF_RETRY;
683
684
/*
685
* Verify that the gpte in the page, which is now either
686
* write-protected or unsync, wasn't modified between the fault
687
* and acquiring mmu_lock. This needs to be done even when
688
* reusing an existing shadow page to ensure the information
689
* gathered by the walker matches the information stored in the
690
* shadow page (which could have been modified by a different
691
* vCPU even if the page was already linked). Holding mmu_lock
692
* prevents the shadow page from changing after this point.
693
*/
694
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
695
return RET_PF_RETRY;
696
697
if (sp != ERR_PTR(-EEXIST))
698
link_shadow_page(vcpu, it.sptep, sp);
699
700
if (fault->write && table_gfn == fault->gfn)
701
fault->write_fault_to_shadow_pgtable = true;
702
}
703
704
/*
705
* Adjust the hugepage size _after_ resolving indirect shadow pages.
706
* KVM doesn't support mapping hugepages into the guest for gfns that
707
* are being shadowed by KVM, i.e. allocating a new shadow page may
708
* affect the allowed hugepage size.
709
*/
710
kvm_mmu_hugepage_adjust(vcpu, fault);
711
712
trace_kvm_mmu_spte_requested(fault);
713
714
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
715
/*
716
* We cannot overwrite existing page tables with an NX
717
* large page, as the leaf could be executable.
718
*/
719
if (fault->nx_huge_page_workaround_enabled)
720
disallowed_hugepage_adjust(fault, *it.sptep, it.level);
721
722
base_gfn = gfn_round_for_level(fault->gfn, it.level);
723
if (it.level == fault->goal_level)
724
break;
725
726
validate_direct_spte(vcpu, it.sptep, direct_access);
727
728
sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
729
true, direct_access);
730
if (sp == ERR_PTR(-EEXIST))
731
continue;
732
733
link_shadow_page(vcpu, it.sptep, sp);
734
if (fault->huge_page_disallowed)
735
account_nx_huge_page(vcpu->kvm, sp,
736
fault->req_level >= it.level);
737
}
738
739
if (WARN_ON_ONCE(it.level != fault->goal_level))
740
return -EFAULT;
741
742
ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access,
743
base_gfn, fault->pfn, fault);
744
if (ret == RET_PF_SPURIOUS)
745
return ret;
746
747
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
748
return ret;
749
}
750
751
/*
752
* Page fault handler. There are several causes for a page fault:
753
* - there is no shadow pte for the guest pte
754
* - write access through a shadow pte marked read only so that we can set
755
* the dirty bit
756
* - write access to a shadow pte marked read only so we can update the page
757
* dirty bitmap, when userspace requests it
758
* - mmio access; in this case we will never install a present shadow pte
759
* - normal guest page fault due to the guest pte marked not present, not
760
* writable, or not executable
761
*
762
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or
763
* a negative value on error.
764
*/
765
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
766
{
767
struct guest_walker walker;
768
int r;
769
770
WARN_ON_ONCE(fault->is_tdp);
771
772
/*
773
* Look up the guest pte for the faulting address.
774
* If PFEC.RSVD is set, this is a shadow page fault.
775
* The bit needs to be cleared before walking guest page tables.
776
*/
777
r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
778
fault->error_code & ~PFERR_RSVD_MASK);
779
780
/*
781
* The page is not mapped by the guest. Let the guest handle it.
782
*/
783
if (!r) {
784
if (!fault->prefetch)
785
kvm_inject_emulated_page_fault(vcpu, &walker.fault);
786
787
return RET_PF_RETRY;
788
}
789
790
fault->gfn = walker.gfn;
791
fault->max_level = walker.level;
792
fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
793
794
if (page_fault_handle_page_track(vcpu, fault)) {
795
shadow_page_table_clear_flood(vcpu, fault->addr);
796
return RET_PF_WRITE_PROTECTED;
797
}
798
799
r = mmu_topup_memory_caches(vcpu, true);
800
if (r)
801
return r;
802
803
r = kvm_mmu_faultin_pfn(vcpu, fault, walker.pte_access);
804
if (r != RET_PF_CONTINUE)
805
return r;
806
807
#if PTTYPE != PTTYPE_EPT
808
/*
809
* Treat the guest PTE protections as writable, supervisor-only if this
810
* is a supervisor write fault and CR0.WP=0 (supervisor accesses ignore
811
* PTE.W if CR0.WP=0). Don't change the access type for emulated MMIO,
812
* otherwise KVM will cache incorrect access information in the SPTE.
813
*/
814
if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
815
!is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
816
walker.pte_access |= ACC_WRITE_MASK;
817
walker.pte_access &= ~ACC_USER_MASK;
818
819
/*
820
* If we converted a user page to a kernel page,
821
* so that the kernel can write to it when cr0.wp=0,
822
* then we should prevent the kernel from executing it
823
* if SMEP is enabled.
824
*/
825
if (is_cr4_smep(vcpu->arch.mmu))
826
walker.pte_access &= ~ACC_EXEC_MASK;
827
}
828
#endif
829
830
r = RET_PF_RETRY;
831
write_lock(&vcpu->kvm->mmu_lock);
832
833
if (is_page_fault_stale(vcpu, fault))
834
goto out_unlock;
835
836
r = make_mmu_pages_available(vcpu);
837
if (r)
838
goto out_unlock;
839
r = FNAME(fetch)(vcpu, fault, &walker);
840
841
out_unlock:
842
kvm_mmu_finish_page_fault(vcpu, fault, r);
843
write_unlock(&vcpu->kvm->mmu_lock);
844
return r;
845
}
846
847
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
848
{
849
int offset = 0;
850
851
WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
852
853
if (PTTYPE == 32)
854
offset = sp->role.quadrant << SPTE_LEVEL_BITS;
855
856
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
857
}
858
859
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
860
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
861
gpa_t addr, u64 access,
862
struct x86_exception *exception)
863
{
864
struct guest_walker walker;
865
gpa_t gpa = INVALID_GPA;
866
int r;
867
868
#ifndef CONFIG_X86_64
869
/* A 64-bit GVA should be impossible on 32-bit KVM. */
870
WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
871
#endif
872
873
r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
874
875
if (r) {
876
gpa = gfn_to_gpa(walker.gfn);
877
gpa |= addr & ~PAGE_MASK;
878
} else if (exception)
879
*exception = walker.fault;
880
881
return gpa;
882
}
883
884
/*
885
* Using the information in sp->shadowed_translation (kvm_mmu_page_get_gfn()) is
886
* safe because SPTEs are protected by mmu_notifiers and memslot generations, so
887
* the pfn for a given gfn can't change unless all SPTEs pointing to the gfn are
888
* nuked first.
889
*
890
* Returns
891
* < 0: failed to sync spte
892
* 0: the spte is synced and no tlb flushing is required
893
* > 0: the spte is synced and tlb flushing is required
894
*/
895
static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
896
{
897
bool host_writable;
898
gpa_t first_pte_gpa;
899
u64 *sptep, spte;
900
struct kvm_memory_slot *slot;
901
unsigned pte_access;
902
pt_element_t gpte;
903
gpa_t pte_gpa;
904
gfn_t gfn;
905
906
if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE ||
907
!sp->shadowed_translation))
908
return 0;
909
910
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
911
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
912
913
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
914
sizeof(pt_element_t)))
915
return -1;
916
917
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
918
return 1;
919
920
gfn = gpte_to_gfn(gpte);
921
pte_access = sp->role.access;
922
pte_access &= FNAME(gpte_access)(gpte);
923
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
924
925
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
926
return 0;
927
928
/*
929
* Drop the SPTE if the new protections result in no effective
930
* "present" bit or if the gfn is changing. The former case
931
* only affects EPT with execute-only support with pte_access==0;
932
* all other paging modes will create a read-only SPTE if
933
* pte_access is zero.
934
*/
935
if ((pte_access | shadow_present_mask) == SHADOW_NONPRESENT_VALUE ||
936
gfn != kvm_mmu_page_get_gfn(sp, i)) {
937
drop_spte(vcpu->kvm, &sp->spt[i]);
938
return 1;
939
}
940
/*
941
* Do nothing if the permissions are unchanged. The existing SPTE is
942
* still, and prefetch_invalid_gpte() has verified that the A/D bits
943
* are set in the "new" gPTE, i.e. there is no danger of missing an A/D
944
* update due to A/D bits being set in the SPTE but not the gPTE.
945
*/
946
if (kvm_mmu_page_get_access(sp, i) == pte_access)
947
return 0;
948
949
/* Update the shadowed access bits in case they changed. */
950
kvm_mmu_page_set_access(sp, i, pte_access);
951
952
sptep = &sp->spt[i];
953
spte = *sptep;
954
host_writable = spte & shadow_host_writable_mask;
955
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
956
make_spte(vcpu, sp, slot, pte_access, gfn,
957
spte_to_pfn(spte), spte, true, true,
958
host_writable, &spte);
959
960
/*
961
* There is no need to mark the pfn dirty, as the new protections must
962
* be a subset of the old protections, i.e. synchronizing a SPTE cannot
963
* change the SPTE from read-only to writable.
964
*/
965
return mmu_spte_update(sptep, spte);
966
}
967
968
#undef pt_element_t
969
#undef guest_walker
970
#undef FNAME
971
#undef PT_BASE_ADDR_MASK
972
#undef PT_INDEX
973
#undef PT_LVL_ADDR_MASK
974
#undef PT_LVL_OFFSET_MASK
975
#undef PT_LEVEL_BITS
976
#undef PT_MAX_FULL_LEVELS
977
#undef gpte_to_gfn
978
#undef gpte_to_gfn_lvl
979
#undef PT_GUEST_ACCESSED_MASK
980
#undef PT_GUEST_DIRTY_MASK
981
#undef PT_GUEST_DIRTY_SHIFT
982
#undef PT_GUEST_ACCESSED_SHIFT
983
#undef PT_HAVE_ACCESSED_DIRTY
984
985