Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/kvm_cache_regs.h
26439 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef ASM_KVM_CACHE_REGS_H
3
#define ASM_KVM_CACHE_REGS_H
4
5
#include <linux/kvm_host.h>
6
7
#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
8
#define KVM_POSSIBLE_CR4_GUEST_BITS \
9
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12
#define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
13
#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
14
#define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
15
16
static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
17
18
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
19
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
20
{ \
21
return vcpu->arch.regs[VCPU_REGS_##uname]; \
22
} \
23
static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
24
unsigned long val) \
25
{ \
26
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
27
}
28
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
29
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
30
BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
31
BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
32
BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
33
BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
34
BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
35
#ifdef CONFIG_X86_64
36
BUILD_KVM_GPR_ACCESSORS(r8, R8)
37
BUILD_KVM_GPR_ACCESSORS(r9, R9)
38
BUILD_KVM_GPR_ACCESSORS(r10, R10)
39
BUILD_KVM_GPR_ACCESSORS(r11, R11)
40
BUILD_KVM_GPR_ACCESSORS(r12, R12)
41
BUILD_KVM_GPR_ACCESSORS(r13, R13)
42
BUILD_KVM_GPR_ACCESSORS(r14, R14)
43
BUILD_KVM_GPR_ACCESSORS(r15, R15)
44
#endif
45
46
/*
47
* Using the register cache from interrupt context is generally not allowed, as
48
* caching a register and marking it available/dirty can't be done atomically,
49
* i.e. accesses from interrupt context may clobber state or read stale data if
50
* the vCPU task is in the process of updating the cache. The exception is if
51
* KVM is handling a PMI IRQ/NMI VM-Exit, as that bound code sequence doesn't
52
* touch the cache, it runs after the cache is reset (post VM-Exit), and PMIs
53
* need to access several registers that are cacheable.
54
*/
55
#define kvm_assert_register_caching_allowed(vcpu) \
56
lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu))
57
58
/*
59
* avail dirty
60
* 0 0 register in VMCS/VMCB
61
* 0 1 *INVALID*
62
* 1 0 register in vcpu->arch
63
* 1 1 register in vcpu->arch, needs to be stored back
64
*/
65
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
66
enum kvm_reg reg)
67
{
68
kvm_assert_register_caching_allowed(vcpu);
69
return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
70
}
71
72
static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
73
enum kvm_reg reg)
74
{
75
kvm_assert_register_caching_allowed(vcpu);
76
return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
77
}
78
79
static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
80
enum kvm_reg reg)
81
{
82
kvm_assert_register_caching_allowed(vcpu);
83
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
84
}
85
86
static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
87
enum kvm_reg reg)
88
{
89
kvm_assert_register_caching_allowed(vcpu);
90
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
91
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
92
}
93
94
/*
95
* kvm_register_test_and_mark_available() is a special snowflake that uses an
96
* arch bitop directly to avoid the explicit instrumentation that comes with
97
* the generic bitops. This allows code that cannot be instrumented (noinstr
98
* functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
99
*/
100
static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
101
enum kvm_reg reg)
102
{
103
kvm_assert_register_caching_allowed(vcpu);
104
return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
105
}
106
107
/*
108
* The "raw" register helpers are only for cases where the full 64 bits of a
109
* register are read/written irrespective of current vCPU mode. In other words,
110
* odds are good you shouldn't be using the raw variants.
111
*/
112
static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
113
{
114
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
115
return 0;
116
117
if (!kvm_register_is_available(vcpu, reg))
118
kvm_x86_call(cache_reg)(vcpu, reg);
119
120
return vcpu->arch.regs[reg];
121
}
122
123
static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
124
unsigned long val)
125
{
126
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
127
return;
128
129
vcpu->arch.regs[reg] = val;
130
kvm_register_mark_dirty(vcpu, reg);
131
}
132
133
static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
134
{
135
return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
136
}
137
138
static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
139
{
140
kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
141
}
142
143
static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
144
{
145
return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
146
}
147
148
static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
149
{
150
kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
151
}
152
153
static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
154
{
155
might_sleep(); /* on svm */
156
157
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
158
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
159
160
return vcpu->arch.walk_mmu->pdptrs[index];
161
}
162
163
static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
164
{
165
vcpu->arch.walk_mmu->pdptrs[index] = value;
166
}
167
168
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
169
{
170
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
171
if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
172
!kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
173
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
174
return vcpu->arch.cr0 & mask;
175
}
176
177
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
178
unsigned long cr0_bit)
179
{
180
BUILD_BUG_ON(!is_power_of_2(cr0_bit));
181
182
return !!kvm_read_cr0_bits(vcpu, cr0_bit);
183
}
184
185
static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
186
{
187
return kvm_read_cr0_bits(vcpu, ~0UL);
188
}
189
190
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
191
{
192
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
193
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
194
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
195
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
196
return vcpu->arch.cr4 & mask;
197
}
198
199
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
200
unsigned long cr4_bit)
201
{
202
BUILD_BUG_ON(!is_power_of_2(cr4_bit));
203
204
return !!kvm_read_cr4_bits(vcpu, cr4_bit);
205
}
206
207
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
208
{
209
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
210
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
211
return vcpu->arch.cr3;
212
}
213
214
static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
215
{
216
return kvm_read_cr4_bits(vcpu, ~0UL);
217
}
218
219
static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
220
{
221
return (kvm_rax_read(vcpu) & -1u)
222
| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
223
}
224
225
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
226
{
227
vcpu->arch.hflags |= HF_GUEST_MASK;
228
vcpu->stat.guest_mode = 1;
229
}
230
231
static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
232
{
233
vcpu->arch.hflags &= ~HF_GUEST_MASK;
234
235
if (vcpu->arch.load_eoi_exitmap_pending) {
236
vcpu->arch.load_eoi_exitmap_pending = false;
237
kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
238
}
239
240
vcpu->stat.guest_mode = 0;
241
}
242
243
static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
244
{
245
return vcpu->arch.hflags & HF_GUEST_MASK;
246
}
247
248
#endif
249
250