Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/include/asm/cpuid/api.h
26496 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_X86_CPUID_API_H
3
#define _ASM_X86_CPUID_API_H
4
5
#include <asm/cpuid/types.h>
6
7
#include <linux/build_bug.h>
8
#include <linux/types.h>
9
10
#include <asm/string.h>
11
12
/*
13
* Raw CPUID accessors:
14
*/
15
16
#ifdef CONFIG_X86_32
17
bool cpuid_feature(void);
18
#else
19
static inline bool cpuid_feature(void)
20
{
21
return true;
22
}
23
#endif
24
25
static inline void native_cpuid(u32 *eax, u32 *ebx,
26
u32 *ecx, u32 *edx)
27
{
28
/* ecx is often an input as well as an output. */
29
asm volatile("cpuid"
30
: "=a" (*eax),
31
"=b" (*ebx),
32
"=c" (*ecx),
33
"=d" (*edx)
34
: "0" (*eax), "2" (*ecx)
35
: "memory");
36
}
37
38
#define NATIVE_CPUID_REG(reg) \
39
static inline u32 native_cpuid_##reg(u32 op) \
40
{ \
41
u32 eax = op, ebx, ecx = 0, edx; \
42
\
43
native_cpuid(&eax, &ebx, &ecx, &edx); \
44
\
45
return reg; \
46
}
47
48
/*
49
* Native CPUID functions returning a single datum:
50
*/
51
NATIVE_CPUID_REG(eax)
52
NATIVE_CPUID_REG(ebx)
53
NATIVE_CPUID_REG(ecx)
54
NATIVE_CPUID_REG(edx)
55
56
#ifdef CONFIG_PARAVIRT_XXL
57
# include <asm/paravirt.h>
58
#else
59
# define __cpuid native_cpuid
60
#endif
61
62
/*
63
* Generic CPUID function
64
*
65
* Clear ECX since some CPUs (Cyrix MII) do not set or clear ECX
66
* resulting in stale register contents being returned.
67
*/
68
static inline void cpuid(u32 op,
69
u32 *eax, u32 *ebx,
70
u32 *ecx, u32 *edx)
71
{
72
*eax = op;
73
*ecx = 0;
74
__cpuid(eax, ebx, ecx, edx);
75
}
76
77
/* Some CPUID calls want 'count' to be placed in ECX */
78
static inline void cpuid_count(u32 op, int count,
79
u32 *eax, u32 *ebx,
80
u32 *ecx, u32 *edx)
81
{
82
*eax = op;
83
*ecx = count;
84
__cpuid(eax, ebx, ecx, edx);
85
}
86
87
/*
88
* CPUID functions returning a single datum:
89
*/
90
91
static inline u32 cpuid_eax(u32 op)
92
{
93
u32 eax, ebx, ecx, edx;
94
95
cpuid(op, &eax, &ebx, &ecx, &edx);
96
97
return eax;
98
}
99
100
static inline u32 cpuid_ebx(u32 op)
101
{
102
u32 eax, ebx, ecx, edx;
103
104
cpuid(op, &eax, &ebx, &ecx, &edx);
105
106
return ebx;
107
}
108
109
static inline u32 cpuid_ecx(u32 op)
110
{
111
u32 eax, ebx, ecx, edx;
112
113
cpuid(op, &eax, &ebx, &ecx, &edx);
114
115
return ecx;
116
}
117
118
static inline u32 cpuid_edx(u32 op)
119
{
120
u32 eax, ebx, ecx, edx;
121
122
cpuid(op, &eax, &ebx, &ecx, &edx);
123
124
return edx;
125
}
126
127
static inline void __cpuid_read(u32 leaf, u32 subleaf, u32 *regs)
128
{
129
regs[CPUID_EAX] = leaf;
130
regs[CPUID_ECX] = subleaf;
131
__cpuid(regs + CPUID_EAX, regs + CPUID_EBX, regs + CPUID_ECX, regs + CPUID_EDX);
132
}
133
134
#define cpuid_subleaf(leaf, subleaf, regs) { \
135
static_assert(sizeof(*(regs)) == 16); \
136
__cpuid_read(leaf, subleaf, (u32 *)(regs)); \
137
}
138
139
#define cpuid_leaf(leaf, regs) { \
140
static_assert(sizeof(*(regs)) == 16); \
141
__cpuid_read(leaf, 0, (u32 *)(regs)); \
142
}
143
144
static inline void __cpuid_read_reg(u32 leaf, u32 subleaf,
145
enum cpuid_regs_idx regidx, u32 *reg)
146
{
147
u32 regs[4];
148
149
__cpuid_read(leaf, subleaf, regs);
150
*reg = regs[regidx];
151
}
152
153
#define cpuid_subleaf_reg(leaf, subleaf, regidx, reg) { \
154
static_assert(sizeof(*(reg)) == 4); \
155
__cpuid_read_reg(leaf, subleaf, regidx, (u32 *)(reg)); \
156
}
157
158
#define cpuid_leaf_reg(leaf, regidx, reg) { \
159
static_assert(sizeof(*(reg)) == 4); \
160
__cpuid_read_reg(leaf, 0, regidx, (u32 *)(reg)); \
161
}
162
163
/*
164
* Hypervisor-related APIs:
165
*/
166
167
static __always_inline bool cpuid_function_is_indexed(u32 function)
168
{
169
switch (function) {
170
case 4:
171
case 7:
172
case 0xb:
173
case 0xd:
174
case 0xf:
175
case 0x10:
176
case 0x12:
177
case 0x14:
178
case 0x17:
179
case 0x18:
180
case 0x1d:
181
case 0x1e:
182
case 0x1f:
183
case 0x24:
184
case 0x8000001d:
185
return true;
186
}
187
188
return false;
189
}
190
191
#define for_each_possible_cpuid_base_hypervisor(function) \
192
for (function = 0x40000000; function < 0x40010000; function += 0x100)
193
194
static inline u32 cpuid_base_hypervisor(const char *sig, u32 leaves)
195
{
196
u32 base, eax, signature[3];
197
198
for_each_possible_cpuid_base_hypervisor(base) {
199
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
200
201
/*
202
* This must not compile to "call memcmp" because it's called
203
* from PVH early boot code before instrumentation is set up
204
* and memcmp() itself may be instrumented.
205
*/
206
if (!__builtin_memcmp(sig, signature, 12) &&
207
(leaves == 0 || ((eax - base) >= leaves)))
208
return base;
209
}
210
211
return 0;
212
}
213
214
/*
215
* CPUID(0x2) parsing:
216
*/
217
218
/**
219
* cpuid_leaf_0x2() - Return sanitized CPUID(0x2) register output
220
* @regs: Output parameter
221
*
222
* Query CPUID(0x2) and store its output in @regs. Force set any
223
* invalid 1-byte descriptor returned by the hardware to zero (the NULL
224
* cache/TLB descriptor) before returning it to the caller.
225
*
226
* Use for_each_cpuid_0x2_desc() to iterate over the register output in
227
* parsed form.
228
*/
229
static inline void cpuid_leaf_0x2(union leaf_0x2_regs *regs)
230
{
231
cpuid_leaf(0x2, regs);
232
233
/*
234
* All Intel CPUs must report an iteration count of 1. In case
235
* of bogus hardware, treat all returned descriptors as NULL.
236
*/
237
if (regs->desc[0] != 0x01) {
238
for (int i = 0; i < 4; i++)
239
regs->regv[i] = 0;
240
return;
241
}
242
243
/*
244
* The most significant bit (MSB) of each register must be clear.
245
* If a register is invalid, replace its descriptors with NULL.
246
*/
247
for (int i = 0; i < 4; i++) {
248
if (regs->reg[i].invalid)
249
regs->regv[i] = 0;
250
}
251
}
252
253
/**
254
* for_each_cpuid_0x2_desc() - Iterator for parsed CPUID(0x2) descriptors
255
* @_regs: CPUID(0x2) register output, as returned by cpuid_leaf_0x2()
256
* @_ptr: u8 pointer, for macro internal use only
257
* @_desc: Pointer to the parsed CPUID(0x2) descriptor at each iteration
258
*
259
* Loop over the 1-byte descriptors in the passed CPUID(0x2) output registers
260
* @_regs. Provide the parsed information for each descriptor through @_desc.
261
*
262
* To handle cache-specific descriptors, switch on @_desc->c_type. For TLB
263
* descriptors, switch on @_desc->t_type.
264
*
265
* Example usage for cache descriptors::
266
*
267
* const struct leaf_0x2_table *desc;
268
* union leaf_0x2_regs regs;
269
* u8 *ptr;
270
*
271
* cpuid_leaf_0x2(&regs);
272
* for_each_cpuid_0x2_desc(regs, ptr, desc) {
273
* switch (desc->c_type) {
274
* ...
275
* }
276
* }
277
*/
278
#define for_each_cpuid_0x2_desc(_regs, _ptr, _desc) \
279
for (_ptr = &(_regs).desc[1]; \
280
_ptr < &(_regs).desc[16] && (_desc = &cpuid_0x2_table[*_ptr]); \
281
_ptr++)
282
283
/*
284
* CPUID(0x80000006) parsing:
285
*/
286
287
static inline bool cpuid_amd_hygon_has_l3_cache(void)
288
{
289
return cpuid_edx(0x80000006);
290
}
291
292
#endif /* _ASM_X86_CPUID_API_H */
293
294