Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/apic/x2apic_cluster.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include <linux/cpuhotplug.h>
4
#include <linux/cpumask.h>
5
#include <linux/slab.h>
6
#include <linux/mm.h>
7
8
#include <asm/apic.h>
9
10
#include "local.h"
11
12
#define apic_cluster(apicid) ((apicid) >> 4)
13
14
/*
15
* __x2apic_send_IPI_mask() possibly needs to read
16
* x86_cpu_to_logical_apicid for all online cpus in a sequential way.
17
* Using per cpu variable would cost one cache line per cpu.
18
*/
19
static u32 *x86_cpu_to_logical_apicid __read_mostly;
20
21
static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
22
static DEFINE_PER_CPU_READ_MOSTLY(struct cpumask *, cluster_masks);
23
24
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
25
{
26
return x2apic_enabled();
27
}
28
29
static void x2apic_send_IPI(int cpu, int vector)
30
{
31
u32 dest = x86_cpu_to_logical_apicid[cpu];
32
33
/* x2apic MSRs are special and need a special fence: */
34
weak_wrmsr_fence();
35
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
36
}
37
38
static void
39
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
40
{
41
unsigned int cpu, clustercpu;
42
struct cpumask *tmpmsk;
43
unsigned long flags;
44
u32 dest;
45
46
/* x2apic MSRs are special and need a special fence: */
47
weak_wrmsr_fence();
48
local_irq_save(flags);
49
50
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
51
cpumask_copy(tmpmsk, mask);
52
/* If IPI should not be sent to self, clear current CPU */
53
if (apic_dest != APIC_DEST_ALLINC)
54
__cpumask_clear_cpu(smp_processor_id(), tmpmsk);
55
56
/* Collapse cpus in a cluster so a single IPI per cluster is sent */
57
for_each_cpu(cpu, tmpmsk) {
58
struct cpumask *cmsk = per_cpu(cluster_masks, cpu);
59
60
dest = 0;
61
for_each_cpu_and(clustercpu, tmpmsk, cmsk)
62
dest |= x86_cpu_to_logical_apicid[clustercpu];
63
64
if (!dest)
65
continue;
66
67
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
68
/* Remove cluster CPUs from tmpmask */
69
cpumask_andnot(tmpmsk, tmpmsk, cmsk);
70
}
71
72
local_irq_restore(flags);
73
}
74
75
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
76
{
77
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
78
}
79
80
static void
81
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
82
{
83
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
84
}
85
86
static u32 x2apic_calc_apicid(unsigned int cpu)
87
{
88
return x86_cpu_to_logical_apicid[cpu];
89
}
90
91
static void init_x2apic_ldr(void)
92
{
93
struct cpumask *cmsk = this_cpu_read(cluster_masks);
94
95
BUG_ON(!cmsk);
96
97
cpumask_set_cpu(smp_processor_id(), cmsk);
98
}
99
100
/*
101
* As an optimisation during boot, set the cluster_mask for all present
102
* CPUs at once, to prevent each of them having to iterate over the others
103
* to find the existing cluster_mask.
104
*/
105
static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster)
106
{
107
int cpu_i;
108
109
for_each_present_cpu(cpu_i) {
110
struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i);
111
u32 apicid = apic->cpu_present_to_apicid(cpu_i);
112
113
if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster)
114
continue;
115
116
if (WARN_ON_ONCE(*cpu_cmsk == cmsk))
117
continue;
118
119
BUG_ON(*cpu_cmsk);
120
*cpu_cmsk = cmsk;
121
}
122
}
123
124
static int alloc_clustermask(unsigned int cpu, u32 cluster, int node)
125
{
126
struct cpumask *cmsk = NULL;
127
unsigned int cpu_i;
128
129
/*
130
* At boot time, the CPU present mask is stable. The cluster mask is
131
* allocated for the first CPU in the cluster and propagated to all
132
* present siblings in the cluster. If the cluster mask is already set
133
* on entry to this function for a given CPU, there is nothing to do.
134
*/
135
if (per_cpu(cluster_masks, cpu))
136
return 0;
137
138
if (system_state < SYSTEM_RUNNING)
139
goto alloc;
140
141
/*
142
* On post boot hotplug for a CPU which was not present at boot time,
143
* iterate over all possible CPUs (even those which are not present
144
* any more) to find any existing cluster mask.
145
*/
146
for_each_possible_cpu(cpu_i) {
147
u32 apicid = apic->cpu_present_to_apicid(cpu_i);
148
149
if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) {
150
cmsk = per_cpu(cluster_masks, cpu_i);
151
/*
152
* If the cluster is already initialized, just store
153
* the mask and return. There's no need to propagate.
154
*/
155
if (cmsk) {
156
per_cpu(cluster_masks, cpu) = cmsk;
157
return 0;
158
}
159
}
160
}
161
/*
162
* No CPU in the cluster has ever been initialized, so fall through to
163
* the boot time code which will also populate the cluster mask for any
164
* other CPU in the cluster which is (now) present.
165
*/
166
alloc:
167
cmsk = kzalloc_node(sizeof(*cmsk), GFP_KERNEL, node);
168
if (!cmsk)
169
return -ENOMEM;
170
per_cpu(cluster_masks, cpu) = cmsk;
171
prefill_clustermask(cmsk, cpu, cluster);
172
173
return 0;
174
}
175
176
static int x2apic_prepare_cpu(unsigned int cpu)
177
{
178
u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
179
u32 cluster = apic_cluster(phys_apicid);
180
u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
181
int node = cpu_to_node(cpu);
182
183
x86_cpu_to_logical_apicid[cpu] = logical_apicid;
184
185
if (alloc_clustermask(cpu, cluster, node) < 0)
186
return -ENOMEM;
187
188
if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
189
return -ENOMEM;
190
191
return 0;
192
}
193
194
static int x2apic_dead_cpu(unsigned int dead_cpu)
195
{
196
struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu);
197
198
if (cmsk)
199
cpumask_clear_cpu(dead_cpu, cmsk);
200
free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
201
return 0;
202
}
203
204
static int x2apic_cluster_probe(void)
205
{
206
u32 slots;
207
208
if (!x2apic_mode)
209
return 0;
210
211
slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
212
x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
213
if (!x86_cpu_to_logical_apicid)
214
return 0;
215
216
if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
217
x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
218
pr_err("Failed to register X2APIC_PREPARE\n");
219
kfree(x86_cpu_to_logical_apicid);
220
x86_cpu_to_logical_apicid = NULL;
221
return 0;
222
}
223
init_x2apic_ldr();
224
return 1;
225
}
226
227
static struct apic apic_x2apic_cluster __ro_after_init = {
228
229
.name = "cluster x2apic",
230
.probe = x2apic_cluster_probe,
231
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
232
233
.dest_mode_logical = true,
234
235
.disable_esr = 0,
236
237
.init_apic_ldr = init_x2apic_ldr,
238
.cpu_present_to_apicid = default_cpu_present_to_apicid,
239
240
.max_apic_id = UINT_MAX,
241
.x2apic_set_max_apicid = true,
242
.get_apic_id = x2apic_get_apic_id,
243
244
.calc_dest_apicid = x2apic_calc_apicid,
245
246
.send_IPI = x2apic_send_IPI,
247
.send_IPI_mask = x2apic_send_IPI_mask,
248
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
249
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
250
.send_IPI_all = x2apic_send_IPI_all,
251
.send_IPI_self = x2apic_send_IPI_self,
252
.nmi_to_offline_cpu = true,
253
254
.read = native_apic_msr_read,
255
.write = native_apic_msr_write,
256
.eoi = native_apic_msr_eoi,
257
.icr_read = native_x2apic_icr_read,
258
.icr_write = native_x2apic_icr_write,
259
};
260
261
apic_driver(apic_x2apic_cluster);
262
263