Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/topology_common.c
26493 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/cpu.h>
3
4
#include <xen/xen.h>
5
6
#include <asm/intel-family.h>
7
#include <asm/apic.h>
8
#include <asm/processor.h>
9
#include <asm/smp.h>
10
11
#include "cpu.h"
12
13
struct x86_topology_system x86_topo_system __ro_after_init;
14
EXPORT_SYMBOL_GPL(x86_topo_system);
15
16
unsigned int __amd_nodes_per_pkg __ro_after_init;
17
EXPORT_SYMBOL_GPL(__amd_nodes_per_pkg);
18
19
void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
20
unsigned int shift, unsigned int ncpus)
21
{
22
topology_update_dom(tscan, dom, shift, ncpus);
23
24
/* Propagate to the upper levels */
25
for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
26
tscan->dom_shifts[dom] = tscan->dom_shifts[dom - 1];
27
tscan->dom_ncpus[dom] = tscan->dom_ncpus[dom - 1];
28
}
29
}
30
31
enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c)
32
{
33
if (c->x86_vendor == X86_VENDOR_INTEL) {
34
switch (c->topo.intel_type) {
35
case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY;
36
case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE;
37
}
38
}
39
if (c->x86_vendor == X86_VENDOR_AMD) {
40
switch (c->topo.amd_type) {
41
case 0: return TOPO_CPU_TYPE_PERFORMANCE;
42
case 1: return TOPO_CPU_TYPE_EFFICIENCY;
43
}
44
}
45
46
return TOPO_CPU_TYPE_UNKNOWN;
47
}
48
49
const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c)
50
{
51
switch (get_topology_cpu_type(c)) {
52
case TOPO_CPU_TYPE_PERFORMANCE:
53
return "performance";
54
case TOPO_CPU_TYPE_EFFICIENCY:
55
return "efficiency";
56
default:
57
return "unknown";
58
}
59
}
60
61
static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
62
{
63
struct {
64
u32 cache_type : 5,
65
unused : 21,
66
ncores : 6;
67
} eax;
68
69
if (c->cpuid_level < 4)
70
return 1;
71
72
cpuid_subleaf_reg(4, 0, CPUID_EAX, &eax);
73
if (!eax.cache_type)
74
return 1;
75
76
return eax.ncores + 1;
77
}
78
79
static void parse_legacy(struct topo_scan *tscan)
80
{
81
unsigned int cores, core_shift, smt_shift = 0;
82
struct cpuinfo_x86 *c = tscan->c;
83
84
cores = parse_num_cores_legacy(c);
85
core_shift = get_count_order(cores);
86
87
if (cpu_has(c, X86_FEATURE_HT)) {
88
if (!WARN_ON_ONCE(tscan->ebx1_nproc_shift < core_shift))
89
smt_shift = tscan->ebx1_nproc_shift - core_shift;
90
/*
91
* The parser expects leaf 0xb/0x1f format, which means
92
* the number of logical processors at core level is
93
* counting threads.
94
*/
95
core_shift += smt_shift;
96
cores <<= smt_shift;
97
}
98
99
topology_set_dom(tscan, TOPO_SMT_DOMAIN, smt_shift, 1U << smt_shift);
100
topology_set_dom(tscan, TOPO_CORE_DOMAIN, core_shift, cores);
101
}
102
103
static bool fake_topology(struct topo_scan *tscan)
104
{
105
/*
106
* Preset the CORE level shift for CPUID less systems and XEN_PV,
107
* which has useless CPUID information.
108
*/
109
topology_set_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
110
topology_set_dom(tscan, TOPO_CORE_DOMAIN, 0, 1);
111
112
return tscan->c->cpuid_level < 1;
113
}
114
115
static void parse_topology(struct topo_scan *tscan, bool early)
116
{
117
const struct cpuinfo_topology topo_defaults = {
118
.cu_id = 0xff,
119
.llc_id = BAD_APICID,
120
.l2c_id = BAD_APICID,
121
.cpu_type = TOPO_CPU_TYPE_UNKNOWN,
122
};
123
struct cpuinfo_x86 *c = tscan->c;
124
struct {
125
u32 unused0 : 16,
126
nproc : 8,
127
apicid : 8;
128
} ebx;
129
130
c->topo = topo_defaults;
131
132
if (fake_topology(tscan))
133
return;
134
135
/* Preset Initial APIC ID from CPUID leaf 1 */
136
cpuid_leaf_reg(1, CPUID_EBX, &ebx);
137
c->topo.initial_apicid = ebx.apicid;
138
139
/*
140
* The initial invocation from early_identify_cpu() happens before
141
* the APIC is mapped or X2APIC enabled. For establishing the
142
* topology, that's not required. Use the initial APIC ID.
143
*/
144
if (early)
145
c->topo.apicid = c->topo.initial_apicid;
146
else
147
c->topo.apicid = read_apic_id();
148
149
/* The above is sufficient for UP */
150
if (!IS_ENABLED(CONFIG_SMP))
151
return;
152
153
tscan->ebx1_nproc_shift = get_count_order(ebx.nproc);
154
155
switch (c->x86_vendor) {
156
case X86_VENDOR_AMD:
157
if (IS_ENABLED(CONFIG_CPU_SUP_AMD))
158
cpu_parse_topology_amd(tscan);
159
break;
160
case X86_VENDOR_CENTAUR:
161
case X86_VENDOR_ZHAOXIN:
162
parse_legacy(tscan);
163
break;
164
case X86_VENDOR_INTEL:
165
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
166
parse_legacy(tscan);
167
if (c->cpuid_level >= 0x1a)
168
c->topo.cpu_type = cpuid_eax(0x1a);
169
break;
170
case X86_VENDOR_HYGON:
171
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
172
cpu_parse_topology_amd(tscan);
173
break;
174
}
175
}
176
177
static void topo_set_ids(struct topo_scan *tscan, bool early)
178
{
179
struct cpuinfo_x86 *c = tscan->c;
180
u32 apicid = c->topo.apicid;
181
182
c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN);
183
c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN);
184
185
if (!early) {
186
c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN);
187
c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN);
188
c->topo.logical_core_id = topology_get_logical_id(apicid, TOPO_CORE_DOMAIN);
189
}
190
191
/* Package relative core ID */
192
c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >>
193
x86_topo_system.dom_shifts[TOPO_SMT_DOMAIN];
194
195
c->topo.amd_node_id = tscan->amd_node_id;
196
197
if (c->x86_vendor == X86_VENDOR_AMD)
198
cpu_topology_fixup_amd(tscan);
199
}
200
201
void cpu_parse_topology(struct cpuinfo_x86 *c)
202
{
203
unsigned int dom, cpu = smp_processor_id();
204
struct topo_scan tscan = { .c = c, };
205
206
parse_topology(&tscan, false);
207
208
if (IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
209
if (c->topo.initial_apicid != c->topo.apicid) {
210
pr_err(FW_BUG "CPU%4u: APIC ID mismatch. CPUID: 0x%04x APIC: 0x%04x\n",
211
cpu, c->topo.initial_apicid, c->topo.apicid);
212
}
213
214
if (c->topo.apicid != cpuid_to_apicid[cpu]) {
215
pr_err(FW_BUG "CPU%4u: APIC ID mismatch. Firmware: 0x%04x APIC: 0x%04x\n",
216
cpu, cpuid_to_apicid[cpu], c->topo.apicid);
217
}
218
}
219
220
for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++) {
221
if (tscan.dom_shifts[dom] == x86_topo_system.dom_shifts[dom])
222
continue;
223
pr_err(FW_BUG "CPU%d: Topology domain %u shift %u != %u\n", cpu, dom,
224
tscan.dom_shifts[dom], x86_topo_system.dom_shifts[dom]);
225
}
226
227
topo_set_ids(&tscan, false);
228
}
229
230
void __init cpu_init_topology(struct cpuinfo_x86 *c)
231
{
232
struct topo_scan tscan = { .c = c, };
233
unsigned int dom, sft;
234
235
parse_topology(&tscan, true);
236
237
/* Copy the shift values and calculate the unit sizes. */
238
memcpy(x86_topo_system.dom_shifts, tscan.dom_shifts, sizeof(x86_topo_system.dom_shifts));
239
240
dom = TOPO_SMT_DOMAIN;
241
x86_topo_system.dom_size[dom] = 1U << x86_topo_system.dom_shifts[dom];
242
243
for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
244
sft = x86_topo_system.dom_shifts[dom] - x86_topo_system.dom_shifts[dom - 1];
245
x86_topo_system.dom_size[dom] = 1U << sft;
246
}
247
248
topo_set_ids(&tscan, true);
249
250
/*
251
* AMD systems have Nodes per package which cannot be mapped to
252
* APIC ID.
253
*/
254
__amd_nodes_per_pkg = tscan.amd_nodes_per_pkg;
255
}
256
257