Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/apic/ipi.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include <linux/cpumask.h>
4
#include <linux/delay.h>
5
#include <linux/smp.h>
6
#include <linux/string_choices.h>
7
8
#include <asm/io_apic.h>
9
10
#include "local.h"
11
12
DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
13
14
#ifdef CONFIG_SMP
15
static int apic_ipi_shorthand_off __ro_after_init;
16
17
static __init int apic_ipi_shorthand(char *str)
18
{
19
get_option(&str, &apic_ipi_shorthand_off);
20
return 1;
21
}
22
__setup("no_ipi_broadcast=", apic_ipi_shorthand);
23
24
static int __init print_ipi_mode(void)
25
{
26
pr_info("IPI shorthand broadcast: %s\n",
27
str_disabled_enabled(apic_ipi_shorthand_off));
28
return 0;
29
}
30
late_initcall(print_ipi_mode);
31
32
void apic_smt_update(void)
33
{
34
/*
35
* Do not switch to broadcast mode if:
36
* - Disabled on the command line
37
* - Only a single CPU is online
38
* - Not all present CPUs have been at least booted once
39
*
40
* The latter is important as the local APIC might be in some
41
* random state and a broadcast might cause havoc. That's
42
* especially true for NMI broadcasting.
43
*/
44
if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
45
!cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
46
static_branch_disable(&apic_use_ipi_shorthand);
47
} else {
48
static_branch_enable(&apic_use_ipi_shorthand);
49
}
50
}
51
52
void apic_send_IPI_allbutself(unsigned int vector)
53
{
54
if (num_online_cpus() < 2)
55
return;
56
57
if (static_branch_likely(&apic_use_ipi_shorthand))
58
__apic_send_IPI_allbutself(vector);
59
else
60
__apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
61
}
62
63
/*
64
* Send a 'reschedule' IPI to another CPU. It goes straight through and
65
* wastes no time serializing anything. Worst case is that we lose a
66
* reschedule ...
67
*/
68
void native_smp_send_reschedule(int cpu)
69
{
70
if (unlikely(cpu_is_offline(cpu))) {
71
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
72
return;
73
}
74
__apic_send_IPI(cpu, RESCHEDULE_VECTOR);
75
}
76
77
void native_send_call_func_single_ipi(int cpu)
78
{
79
__apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
80
}
81
82
void native_send_call_func_ipi(const struct cpumask *mask)
83
{
84
if (static_branch_likely(&apic_use_ipi_shorthand)) {
85
unsigned int cpu = smp_processor_id();
86
87
if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
88
goto sendmask;
89
90
if (cpumask_test_cpu(cpu, mask))
91
__apic_send_IPI_all(CALL_FUNCTION_VECTOR);
92
else if (num_online_cpus() > 1)
93
__apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
94
return;
95
}
96
97
sendmask:
98
__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
99
}
100
101
void apic_send_nmi_to_offline_cpu(unsigned int cpu)
102
{
103
if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
104
return;
105
if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
106
return;
107
apic->send_IPI(cpu, NMI_VECTOR);
108
}
109
#endif /* CONFIG_SMP */
110
111
static inline int __prepare_ICR2(unsigned int mask)
112
{
113
return SET_XAPIC_DEST_FIELD(mask);
114
}
115
116
u32 apic_mem_wait_icr_idle_timeout(void)
117
{
118
int cnt;
119
120
for (cnt = 0; cnt < 1000; cnt++) {
121
if (!(apic_read(APIC_ICR) & APIC_ICR_BUSY))
122
return 0;
123
inc_irq_stat(icr_read_retry_count);
124
udelay(100);
125
}
126
return APIC_ICR_BUSY;
127
}
128
129
void apic_mem_wait_icr_idle(void)
130
{
131
while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
132
cpu_relax();
133
}
134
135
/*
136
* This is safe against interruption because it only writes the lower 32
137
* bits of the APIC_ICR register. The destination field is ignored for
138
* short hand IPIs.
139
*
140
* wait_icr_idle()
141
* write(ICR2, dest)
142
* NMI
143
* wait_icr_idle()
144
* write(ICR)
145
* wait_icr_idle()
146
* write(ICR)
147
*
148
* This function does not need to disable interrupts as there is no ICR2
149
* interaction. The memory write is direct except when the machine is
150
* affected by the 11AP Pentium erratum, which turns the plain write into
151
* an XCHG operation.
152
*/
153
static void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
154
{
155
/*
156
* Wait for the previous ICR command to complete. Use
157
* safe_apic_wait_icr_idle() for the NMI vector as there have been
158
* issues where otherwise the system hangs when the panic CPU tries
159
* to stop the others before launching the kdump kernel.
160
*/
161
if (unlikely(vector == NMI_VECTOR))
162
apic_mem_wait_icr_idle_timeout();
163
else
164
apic_mem_wait_icr_idle();
165
166
/* Destination field (ICR2) and the destination mode are ignored */
167
native_apic_mem_write(APIC_ICR, __prepare_ICR(shortcut, vector, 0));
168
}
169
170
/*
171
* This is used to send an IPI with no shorthand notation (the destination is
172
* specified in bits 56 to 63 of the ICR).
173
*/
174
void __default_send_IPI_dest_field(unsigned int dest_mask, int vector,
175
unsigned int dest_mode)
176
{
177
/* See comment in __default_send_IPI_shortcut() */
178
if (unlikely(vector == NMI_VECTOR))
179
apic_mem_wait_icr_idle_timeout();
180
else
181
apic_mem_wait_icr_idle();
182
183
/* Set the IPI destination field in the ICR */
184
native_apic_mem_write(APIC_ICR2, __prepare_ICR2(dest_mask));
185
/* Send it with the proper destination mode */
186
native_apic_mem_write(APIC_ICR, __prepare_ICR(0, vector, dest_mode));
187
}
188
189
void default_send_IPI_single_phys(int cpu, int vector)
190
{
191
unsigned long flags;
192
193
local_irq_save(flags);
194
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
195
vector, APIC_DEST_PHYSICAL);
196
local_irq_restore(flags);
197
}
198
199
void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
200
{
201
unsigned long flags;
202
unsigned long cpu;
203
204
local_irq_save(flags);
205
for_each_cpu(cpu, mask) {
206
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
207
cpu), vector, APIC_DEST_PHYSICAL);
208
}
209
local_irq_restore(flags);
210
}
211
212
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
213
int vector)
214
{
215
unsigned int cpu, this_cpu = smp_processor_id();
216
unsigned long flags;
217
218
local_irq_save(flags);
219
for_each_cpu(cpu, mask) {
220
if (cpu == this_cpu)
221
continue;
222
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
223
cpu), vector, APIC_DEST_PHYSICAL);
224
}
225
local_irq_restore(flags);
226
}
227
228
/*
229
* Helper function for APICs which insist on cpumasks
230
*/
231
void default_send_IPI_single(int cpu, int vector)
232
{
233
__apic_send_IPI_mask(cpumask_of(cpu), vector);
234
}
235
236
void default_send_IPI_allbutself(int vector)
237
{
238
__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
239
}
240
241
void default_send_IPI_all(int vector)
242
{
243
__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
244
}
245
246
void default_send_IPI_self(int vector)
247
{
248
__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
249
}
250
251
#ifdef CONFIG_X86_32
252
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
253
{
254
unsigned long flags;
255
unsigned int cpu;
256
257
local_irq_save(flags);
258
for_each_cpu(cpu, mask)
259
__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
260
local_irq_restore(flags);
261
}
262
263
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
264
int vector)
265
{
266
unsigned int cpu, this_cpu = smp_processor_id();
267
unsigned long flags;
268
269
local_irq_save(flags);
270
for_each_cpu(cpu, mask) {
271
if (cpu == this_cpu)
272
continue;
273
__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
274
}
275
local_irq_restore(flags);
276
}
277
278
void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
279
{
280
unsigned long mask = cpumask_bits(cpumask)[0];
281
unsigned long flags;
282
283
if (!mask)
284
return;
285
286
local_irq_save(flags);
287
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
288
__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
289
local_irq_restore(flags);
290
}
291
#endif
292
293