Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
38237 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
#include "test_util.h"
3
#include "kvm_util.h"
4
#include "processor.h"
5
#include "vmx.h"
6
#include "svm_util.h"
7
8
#define L2_GUEST_STACK_SIZE 256
9
10
/*
11
* Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
12
* the "real" exceptions used, #SS/#GP/#DF (12/13/8).
13
*/
14
#define FAKE_TRIPLE_FAULT_VECTOR 0xaa
15
16
/* Arbitrary 32-bit error code injected by this test. */
17
#define SS_ERROR_CODE 0xdeadbeef
18
19
/*
20
* Bit '0' is set on Intel if the exception occurs while delivering a previous
21
* event/exception. AMD's wording is ambiguous, but presumably the bit is set
22
* if the exception occurs while delivering an external event, e.g. NMI or INTR,
23
* but not for exceptions that occur when delivering other exceptions or
24
* software interrupts.
25
*
26
* Note, Intel's name for it, "External event", is misleading and much more
27
* aligned with AMD's behavior, but the SDM is quite clear on its behavior.
28
*/
29
#define ERROR_CODE_EXT_FLAG BIT(0)
30
31
/*
32
* Bit '1' is set if the fault occurred when looking up a descriptor in the
33
* IDT, which is the case here as the IDT is empty/NULL.
34
*/
35
#define ERROR_CODE_IDT_FLAG BIT(1)
36
37
/*
38
* The #GP that occurs when vectoring #SS should show the index into the IDT
39
* for #SS, plus have the "IDT flag" set.
40
*/
41
#define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
42
#define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
43
44
/*
45
* Intel and AMD both shove '0' into the error code on #DF, regardless of what
46
* led to the double fault.
47
*/
48
#define DF_ERROR_CODE 0
49
50
#define INTERCEPT_SS (BIT_ULL(SS_VECTOR))
51
#define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR))
52
#define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
53
54
static void l2_ss_pending_test(void)
55
{
56
GUEST_SYNC(SS_VECTOR);
57
}
58
59
static void l2_ss_injected_gp_test(void)
60
{
61
GUEST_SYNC(GP_VECTOR);
62
}
63
64
static void l2_ss_injected_df_test(void)
65
{
66
GUEST_SYNC(DF_VECTOR);
67
}
68
69
static void l2_ss_injected_tf_test(void)
70
{
71
GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);
72
}
73
74
static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
75
uint32_t error_code)
76
{
77
struct vmcb *vmcb = svm->vmcb;
78
struct vmcb_control_area *ctrl = &vmcb->control;
79
80
vmcb->save.rip = (u64)l2_code;
81
run_guest(vmcb, svm->vmcb_gpa);
82
83
if (vector == FAKE_TRIPLE_FAULT_VECTOR)
84
return;
85
86
GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
87
GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
88
GUEST_ASSERT(!ctrl->int_state);
89
}
90
91
static void l1_svm_code(struct svm_test_data *svm)
92
{
93
struct vmcb_control_area *ctrl = &svm->vmcb->control;
94
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
95
96
generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
97
svm->vmcb->save.idtr.limit = 0;
98
ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);
99
100
ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;
101
svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);
102
svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);
103
104
ctrl->intercept_exceptions = INTERCEPT_SS_DF;
105
svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
106
107
ctrl->intercept_exceptions = INTERCEPT_SS;
108
svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
109
GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);
110
111
GUEST_DONE();
112
}
113
114
static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
115
{
116
GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
117
118
GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);
119
120
if (vector == FAKE_TRIPLE_FAULT_VECTOR)
121
return;
122
123
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
124
GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
125
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
126
GUEST_ASSERT(!vmreadz(GUEST_INTERRUPTIBILITY_INFO));
127
}
128
129
static void l1_vmx_code(struct vmx_pages *vmx)
130
{
131
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
132
133
GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
134
135
GUEST_ASSERT_EQ(load_vmcs(vmx), true);
136
137
prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
138
GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);
139
140
/*
141
* VMX disallows injecting an exception with error_code[31:16] != 0,
142
* and hardware will never generate a VM-Exit with bits 31:16 set.
143
* KVM should likewise truncate the "bad" userspace value.
144
*/
145
GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);
146
vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);
147
vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);
148
149
GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);
150
vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
151
152
GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);
153
vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
154
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);
155
156
GUEST_DONE();
157
}
158
159
static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
160
{
161
if (this_cpu_has(X86_FEATURE_SVM))
162
l1_svm_code(test_data);
163
else
164
l1_vmx_code(test_data);
165
}
166
167
static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
168
{
169
struct ucall uc;
170
171
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
172
173
switch (get_ucall(vcpu, &uc)) {
174
case UCALL_SYNC:
175
TEST_ASSERT(vector == uc.args[1],
176
"Expected L2 to ask for %d, got %ld", vector, uc.args[1]);
177
break;
178
case UCALL_DONE:
179
TEST_ASSERT(vector == -1,
180
"Expected L2 to ask for %d, L2 says it's done", vector);
181
break;
182
case UCALL_ABORT:
183
REPORT_GUEST_ASSERT(uc);
184
break;
185
default:
186
TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
187
}
188
}
189
190
static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
191
{
192
struct kvm_vcpu_events events;
193
194
vcpu_events_get(vcpu, &events);
195
196
TEST_ASSERT(!events.exception.pending,
197
"Vector %d unexpectedlt pending", events.exception.nr);
198
TEST_ASSERT(!events.exception.injected,
199
"Vector %d unexpectedly injected", events.exception.nr);
200
201
events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;
202
events.exception.pending = !inject;
203
events.exception.injected = inject;
204
events.exception.nr = SS_VECTOR;
205
events.exception.has_error_code = true;
206
events.exception.error_code = SS_ERROR_CODE;
207
vcpu_events_set(vcpu, &events);
208
}
209
210
/*
211
* Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
212
* when an exception is being queued for L2. Specifically, verify that KVM
213
* honors L1 exception intercept controls when a #SS is pending/injected,
214
* triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
215
* by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
216
*/
217
int main(int argc, char *argv[])
218
{
219
vm_vaddr_t nested_test_data_gva;
220
struct kvm_vcpu_events events;
221
struct kvm_vcpu *vcpu;
222
struct kvm_vm *vm;
223
224
TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));
225
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
226
227
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
228
vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
229
230
if (kvm_cpu_has(X86_FEATURE_SVM))
231
vcpu_alloc_svm(vm, &nested_test_data_gva);
232
else
233
vcpu_alloc_vmx(vm, &nested_test_data_gva);
234
235
vcpu_args_set(vcpu, 1, nested_test_data_gva);
236
237
/* Run L1 => L2. L2 should sync and request #SS. */
238
vcpu_run(vcpu);
239
assert_ucall_vector(vcpu, SS_VECTOR);
240
241
/* Pend #SS and request immediate exit. #SS should still be pending. */
242
queue_ss_exception(vcpu, false);
243
vcpu->run->immediate_exit = true;
244
vcpu_run_complete_io(vcpu);
245
246
/* Verify the pending events comes back out the same as it went in. */
247
vcpu_events_get(vcpu, &events);
248
TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
249
KVM_VCPUEVENT_VALID_PAYLOAD);
250
TEST_ASSERT_EQ(events.exception.pending, true);
251
TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
252
TEST_ASSERT_EQ(events.exception.has_error_code, true);
253
TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
254
255
/*
256
* Run for real with the pending #SS, L1 should get a VM-Exit due to
257
* #SS interception and re-enter L2 to request #GP (via injected #SS).
258
*/
259
vcpu->run->immediate_exit = false;
260
vcpu_run(vcpu);
261
assert_ucall_vector(vcpu, GP_VECTOR);
262
263
/*
264
* Inject #SS, the #SS should bypass interception and cause #GP, which
265
* L1 should intercept before KVM morphs it to #DF. L1 should then
266
* disable #GP interception and run L2 to request #DF (via #SS => #GP).
267
*/
268
queue_ss_exception(vcpu, true);
269
vcpu_run(vcpu);
270
assert_ucall_vector(vcpu, DF_VECTOR);
271
272
/*
273
* Inject #SS, the #SS should bypass interception and cause #GP, which
274
* L1 is no longer interception, and so should see a #DF VM-Exit. L1
275
* should then signal that is done.
276
*/
277
queue_ss_exception(vcpu, true);
278
vcpu_run(vcpu);
279
assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);
280
281
/*
282
* Inject #SS yet again. L1 is not intercepting #GP or #DF, and so
283
* should see nested TRIPLE_FAULT / SHUTDOWN.
284
*/
285
queue_ss_exception(vcpu, true);
286
vcpu_run(vcpu);
287
assert_ucall_vector(vcpu, -1);
288
289
kvm_vm_free(vm);
290
}
291
292