Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/vmx/tdx.h
51706 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef __KVM_X86_VMX_TDX_H
3
#define __KVM_X86_VMX_TDX_H
4
5
#include "tdx_arch.h"
6
#include "tdx_errno.h"
7
8
#ifdef CONFIG_KVM_INTEL_TDX
9
#include "common.h"
10
11
void tdx_hardware_setup(void);
12
int tdx_bringup(void);
13
void tdx_cleanup(void);
14
15
extern bool enable_tdx;
16
17
/* TDX module hardware states. These follow the TDX module OP_STATEs. */
18
enum kvm_tdx_state {
19
TD_STATE_UNINITIALIZED = 0,
20
TD_STATE_INITIALIZED,
21
TD_STATE_RUNNABLE,
22
};
23
24
struct kvm_tdx {
25
struct kvm kvm;
26
27
struct misc_cg *misc_cg;
28
int hkid;
29
enum kvm_tdx_state state;
30
31
u64 attributes;
32
u64 xfam;
33
34
u64 tsc_offset;
35
u64 tsc_multiplier;
36
37
struct tdx_td td;
38
39
/*
40
* Scratch pointer used to pass the source page to tdx_mem_page_add().
41
* Protected by slots_lock, and non-NULL only when mapping a private
42
* pfn via tdx_gmem_post_populate().
43
*/
44
struct page *page_add_src;
45
46
/*
47
* Prevent vCPUs from TD entry to ensure SEPT zap related SEAMCALLs do
48
* not contend with tdh_vp_enter() and TDCALLs.
49
* Set/unset is protected with kvm->mmu_lock.
50
*/
51
bool wait_for_sept_zap;
52
};
53
54
/* TDX module vCPU states */
55
enum vcpu_tdx_state {
56
VCPU_TD_STATE_UNINITIALIZED = 0,
57
VCPU_TD_STATE_INITIALIZED,
58
};
59
60
struct vcpu_tdx {
61
struct kvm_vcpu vcpu;
62
struct vcpu_vt vt;
63
u64 ext_exit_qualification;
64
gpa_t exit_gpa;
65
struct tdx_module_args vp_enter_args;
66
67
struct tdx_vp vp;
68
69
struct list_head cpu_list;
70
71
u64 vp_enter_ret;
72
73
enum vcpu_tdx_state state;
74
75
u64 map_gpa_next;
76
u64 map_gpa_end;
77
};
78
79
void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);
80
void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
81
u64 val, u64 err);
82
83
static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
84
{
85
u64 err, data;
86
87
err = tdh_mng_rd(&kvm_tdx->td, TDCS_EXEC(field), &data);
88
if (unlikely(err)) {
89
pr_err("TDH_MNG_RD[EXEC.0x%x] failed: 0x%llx\n", field, err);
90
return 0;
91
}
92
return data;
93
}
94
95
static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
96
{
97
#define VMCS_ENC_ACCESS_TYPE_MASK 0x1UL
98
#define VMCS_ENC_ACCESS_TYPE_FULL 0x0UL
99
#define VMCS_ENC_ACCESS_TYPE_HIGH 0x1UL
100
#define VMCS_ENC_ACCESS_TYPE(field) ((field) & VMCS_ENC_ACCESS_TYPE_MASK)
101
102
/* TDX is 64bit only. HIGH field isn't supported. */
103
BUILD_BUG_ON_MSG(__builtin_constant_p(field) &&
104
VMCS_ENC_ACCESS_TYPE(field) == VMCS_ENC_ACCESS_TYPE_HIGH,
105
"Read/Write to TD VMCS *_HIGH fields not supported");
106
107
BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64);
108
109
#define VMCS_ENC_WIDTH_MASK GENMASK(14, 13)
110
#define VMCS_ENC_WIDTH_16BIT (0UL << 13)
111
#define VMCS_ENC_WIDTH_64BIT (1UL << 13)
112
#define VMCS_ENC_WIDTH_32BIT (2UL << 13)
113
#define VMCS_ENC_WIDTH_NATURAL (3UL << 13)
114
#define VMCS_ENC_WIDTH(field) ((field) & VMCS_ENC_WIDTH_MASK)
115
116
/* TDX is 64bit only. i.e. natural width = 64bit. */
117
BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) &&
118
(VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_64BIT ||
119
VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_NATURAL),
120
"Invalid TD VMCS access for 64-bit field");
121
BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) &&
122
VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_32BIT,
123
"Invalid TD VMCS access for 32-bit field");
124
BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) &&
125
VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_16BIT,
126
"Invalid TD VMCS access for 16-bit field");
127
}
128
129
static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
130
static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
131
132
#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
133
static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
134
u32 field) \
135
{ \
136
u64 err, data; \
137
\
138
tdvps_##lclass##_check(field, bits); \
139
err = tdh_vp_rd(&tdx->vp, TDVPS_##uclass(field), &data); \
140
if (unlikely(err)) { \
141
tdh_vp_rd_failed(tdx, #uclass, field, err); \
142
return 0; \
143
} \
144
return (u##bits)data; \
145
} \
146
static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \
147
u32 field, u##bits val) \
148
{ \
149
u64 err; \
150
\
151
tdvps_##lclass##_check(field, bits); \
152
err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), val, \
153
GENMASK_ULL(bits - 1, 0)); \
154
if (unlikely(err)) \
155
tdh_vp_wr_failed(tdx, #uclass, " = ", field, (u64)val, err); \
156
} \
157
static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \
158
u32 field, u64 bit) \
159
{ \
160
u64 err; \
161
\
162
tdvps_##lclass##_check(field, bits); \
163
err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), bit, bit); \
164
if (unlikely(err)) \
165
tdh_vp_wr_failed(tdx, #uclass, " |= ", field, bit, err); \
166
} \
167
static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
168
u32 field, u64 bit) \
169
{ \
170
u64 err; \
171
\
172
tdvps_##lclass##_check(field, bits); \
173
err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), 0, bit); \
174
if (unlikely(err)) \
175
tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
176
}
177
178
179
bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
180
int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err);
181
182
TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
183
TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
184
TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
185
186
TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
187
TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
188
189
#else
190
static inline int tdx_bringup(void) { return 0; }
191
static inline void tdx_cleanup(void) {}
192
193
#define enable_tdx 0
194
195
struct kvm_tdx {
196
struct kvm kvm;
197
};
198
199
struct vcpu_tdx {
200
struct kvm_vcpu vcpu;
201
};
202
203
static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
204
static inline int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { return 0; }
205
206
#endif
207
208
#endif
209
210