Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/coco/sev/internal.h
121834 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef __X86_COCO_SEV_INTERNAL_H__
3
#define __X86_COCO_SEV_INTERNAL_H__
4
5
#define DR7_RESET_VALUE 0x400
6
7
extern u64 sev_hv_features;
8
extern u64 sev_secrets_pa;
9
10
/* #VC handler runtime per-CPU data */
11
struct sev_es_runtime_data {
12
struct ghcb ghcb_page;
13
14
/*
15
* Reserve one page per CPU as backup storage for the unencrypted GHCB.
16
* It is needed when an NMI happens while the #VC handler uses the real
17
* GHCB, and the NMI handler itself is causing another #VC exception. In
18
* that case the GHCB content of the first handler needs to be backed up
19
* and restored.
20
*/
21
struct ghcb backup_ghcb;
22
23
/*
24
* Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
25
* There is no need for it to be atomic, because nothing is written to
26
* the GHCB between the read and the write of ghcb_active. So it is safe
27
* to use it when a nested #VC exception happens before the write.
28
*
29
* This is necessary for example in the #VC->NMI->#VC case when the NMI
30
* happens while the first #VC handler uses the GHCB. When the NMI code
31
* raises a second #VC handler it might overwrite the contents of the
32
* GHCB written by the first handler. To avoid this the content of the
33
* GHCB is saved and restored when the GHCB is detected to be in use
34
* already.
35
*/
36
bool ghcb_active;
37
bool backup_ghcb_active;
38
39
/*
40
* Cached DR7 value - write it on DR7 writes and return it on reads.
41
* That value will never make it to the real hardware DR7 as debugging
42
* is currently unsupported in SEV-ES guests.
43
*/
44
unsigned long dr7;
45
};
46
47
struct ghcb_state {
48
struct ghcb *ghcb;
49
};
50
51
extern struct svsm_ca boot_svsm_ca_page;
52
53
struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
54
void __sev_put_ghcb(struct ghcb_state *state);
55
56
DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
57
DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
58
59
void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
60
unsigned long npages, const struct psc_desc *desc);
61
62
DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
63
DECLARE_PER_CPU(u64, svsm_caa_pa);
64
65
extern u64 boot_svsm_caa_pa;
66
67
enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt);
68
void vc_forward_exception(struct es_em_ctxt *ctxt);
69
void svsm_pval_pages(struct snp_psc_desc *desc);
70
int svsm_perform_call_protocol(struct svsm_call *call);
71
bool snp_svsm_vtpm_probe(void);
72
73
static inline u64 sev_es_rd_ghcb_msr(void)
74
{
75
return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
76
}
77
78
static __always_inline void sev_es_wr_ghcb_msr(u64 val)
79
{
80
u32 low, high;
81
82
low = (u32)(val);
83
high = (u32)(val >> 32);
84
85
native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
86
}
87
88
enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write);
89
90
u64 get_hv_features(void);
91
92
const struct snp_cpuid_table *snp_cpuid_get_table(void);
93
94
static inline struct svsm_ca *svsm_get_caa(void)
95
{
96
if (sev_cfg.use_cas)
97
return this_cpu_read(svsm_caa);
98
else
99
return rip_rel_ptr(&boot_svsm_ca_page);
100
}
101
102
static inline u64 svsm_get_caa_pa(void)
103
{
104
if (sev_cfg.use_cas)
105
return this_cpu_read(svsm_caa_pa);
106
else
107
return boot_svsm_caa_pa;
108
}
109
110
static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
111
int ret, u64 svsm_ret)
112
{
113
WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
114
pfn, action, page_size, ret, svsm_ret);
115
116
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
117
}
118
119
#endif /* __X86_COCO_SEV_INTERNAL_H__ */
120
121