Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/coco/sev/noinstr.c
51624 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AMD Memory Encryption Support
4
*
5
* Copyright (C) 2019 SUSE
6
*
7
* Author: Joerg Roedel <[email protected]>
8
*/
9
10
#define pr_fmt(fmt) "SEV: " fmt
11
12
#include <linux/bug.h>
13
#include <linux/kernel.h>
14
15
#include <asm/cpu_entry_area.h>
16
#include <asm/msr.h>
17
#include <asm/ptrace.h>
18
#include <asm/sev.h>
19
20
#include "internal.h"
21
22
static __always_inline bool on_vc_stack(struct pt_regs *regs)
23
{
24
unsigned long sp = regs->sp;
25
26
/* User-mode RSP is not trusted */
27
if (user_mode(regs))
28
return false;
29
30
/* SYSCALL gap still has user-mode RSP */
31
if (ip_within_syscall_gap(regs))
32
return false;
33
34
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
35
}
36
37
/*
38
* This function handles the case when an NMI is raised in the #VC
39
* exception handler entry code, before the #VC handler has switched off
40
* its IST stack. In this case, the IST entry for #VC must be adjusted,
41
* so that any nested #VC exception will not overwrite the stack
42
* contents of the interrupted #VC handler.
43
*
44
* The IST entry is adjusted unconditionally so that it can be also be
45
* unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
46
* nested sev_es_ist_exit() call may adjust back the IST entry too
47
* early.
48
*
49
* The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
50
* on the NMI IST stack, as they are only called from NMI handling code
51
* right now.
52
*/
53
void noinstr __sev_es_ist_enter(struct pt_regs *regs)
54
{
55
unsigned long old_ist, new_ist;
56
57
/* Read old IST entry */
58
new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
59
60
/*
61
* If NMI happened while on the #VC IST stack, set the new IST
62
* value below regs->sp, so that the interrupted stack frame is
63
* not overwritten by subsequent #VC exceptions.
64
*/
65
if (on_vc_stack(regs))
66
new_ist = regs->sp;
67
68
/*
69
* Reserve additional 8 bytes and store old IST value so this
70
* adjustment can be unrolled in __sev_es_ist_exit().
71
*/
72
new_ist -= sizeof(old_ist);
73
*(unsigned long *)new_ist = old_ist;
74
75
/* Set new IST entry */
76
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
77
}
78
79
void noinstr __sev_es_ist_exit(void)
80
{
81
unsigned long ist;
82
83
/* Read IST entry */
84
ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
85
86
if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
87
return;
88
89
/* Read back old IST entry and write it to the TSS */
90
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
91
}
92
93
void noinstr __sev_es_nmi_complete(void)
94
{
95
struct ghcb_state state;
96
struct ghcb *ghcb;
97
98
ghcb = __sev_get_ghcb(&state);
99
100
vc_ghcb_invalidate(ghcb);
101
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
102
ghcb_set_sw_exit_info_1(ghcb, 0);
103
ghcb_set_sw_exit_info_2(ghcb, 0);
104
105
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
106
VMGEXIT();
107
108
__sev_put_ghcb(&state);
109
}
110
111
/*
112
* Nothing shall interrupt this code path while holding the per-CPU
113
* GHCB. The backup GHCB is only for NMIs interrupting this path.
114
*
115
* Callers must disable local interrupts around it.
116
*/
117
noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
118
{
119
struct sev_es_runtime_data *data;
120
struct ghcb *ghcb;
121
122
WARN_ON(!irqs_disabled());
123
124
data = this_cpu_read(runtime_data);
125
ghcb = &data->ghcb_page;
126
127
if (unlikely(data->ghcb_active)) {
128
/* GHCB is already in use - save its contents */
129
130
if (unlikely(data->backup_ghcb_active)) {
131
/*
132
* Backup-GHCB is also already in use. There is no way
133
* to continue here so just kill the machine. To make
134
* panic() work, mark GHCBs inactive so that messages
135
* can be printed out.
136
*/
137
data->ghcb_active = false;
138
data->backup_ghcb_active = false;
139
140
instrumentation_begin();
141
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
142
instrumentation_end();
143
}
144
145
/* Mark backup_ghcb active before writing to it */
146
data->backup_ghcb_active = true;
147
148
state->ghcb = &data->backup_ghcb;
149
150
/* Backup GHCB content */
151
*state->ghcb = *ghcb;
152
} else {
153
state->ghcb = NULL;
154
data->ghcb_active = true;
155
}
156
157
return ghcb;
158
}
159
160
noinstr void __sev_put_ghcb(struct ghcb_state *state)
161
{
162
struct sev_es_runtime_data *data;
163
struct ghcb *ghcb;
164
165
WARN_ON(!irqs_disabled());
166
167
data = this_cpu_read(runtime_data);
168
ghcb = &data->ghcb_page;
169
170
if (state->ghcb) {
171
/* Restore GHCB from Backup */
172
*ghcb = *state->ghcb;
173
data->backup_ghcb_active = false;
174
state->ghcb = NULL;
175
} else {
176
/*
177
* Invalidate the GHCB so a VMGEXIT instruction issued
178
* from userspace won't appear to be valid.
179
*/
180
vc_ghcb_invalidate(ghcb);
181
data->ghcb_active = false;
182
}
183
}
184
185