Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/xen/smp.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/smp.h>
3
#include <linux/cpu.h>
4
#include <linux/slab.h>
5
#include <linux/cpumask.h>
6
#include <linux/percpu.h>
7
8
#include <xen/events.h>
9
10
#include <xen/hvc-console.h>
11
#include "xen-ops.h"
12
13
static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
14
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
15
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
16
static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
17
18
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
19
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
20
21
/*
22
* Reschedule call back.
23
*/
24
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
25
{
26
inc_irq_stat(irq_resched_count);
27
scheduler_ipi();
28
29
return IRQ_HANDLED;
30
}
31
32
void xen_smp_intr_free(unsigned int cpu)
33
{
34
kfree(per_cpu(xen_resched_irq, cpu).name);
35
per_cpu(xen_resched_irq, cpu).name = NULL;
36
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
37
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
38
per_cpu(xen_resched_irq, cpu).irq = -1;
39
}
40
kfree(per_cpu(xen_callfunc_irq, cpu).name);
41
per_cpu(xen_callfunc_irq, cpu).name = NULL;
42
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
43
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
44
per_cpu(xen_callfunc_irq, cpu).irq = -1;
45
}
46
kfree(per_cpu(xen_debug_irq, cpu).name);
47
per_cpu(xen_debug_irq, cpu).name = NULL;
48
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
49
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
50
per_cpu(xen_debug_irq, cpu).irq = -1;
51
}
52
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
53
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
54
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
55
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
56
NULL);
57
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
58
}
59
}
60
61
int xen_smp_intr_init(unsigned int cpu)
62
{
63
int rc;
64
char *resched_name, *callfunc_name, *debug_name;
65
66
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
67
if (!resched_name)
68
goto fail_mem;
69
per_cpu(xen_resched_irq, cpu).name = resched_name;
70
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
71
cpu,
72
xen_reschedule_interrupt,
73
IRQF_PERCPU|IRQF_NOBALANCING,
74
resched_name,
75
NULL);
76
if (rc < 0)
77
goto fail;
78
per_cpu(xen_resched_irq, cpu).irq = rc;
79
80
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
81
if (!callfunc_name)
82
goto fail_mem;
83
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
84
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
85
cpu,
86
xen_call_function_interrupt,
87
IRQF_PERCPU|IRQF_NOBALANCING,
88
callfunc_name,
89
NULL);
90
if (rc < 0)
91
goto fail;
92
per_cpu(xen_callfunc_irq, cpu).irq = rc;
93
94
if (!xen_fifo_events) {
95
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
96
if (!debug_name)
97
goto fail_mem;
98
99
per_cpu(xen_debug_irq, cpu).name = debug_name;
100
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
101
xen_debug_interrupt,
102
IRQF_PERCPU | IRQF_NOBALANCING,
103
debug_name, NULL);
104
if (rc < 0)
105
goto fail;
106
per_cpu(xen_debug_irq, cpu).irq = rc;
107
}
108
109
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
110
if (!callfunc_name)
111
goto fail_mem;
112
113
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
114
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
115
cpu,
116
xen_call_function_single_interrupt,
117
IRQF_PERCPU|IRQF_NOBALANCING,
118
callfunc_name,
119
NULL);
120
if (rc < 0)
121
goto fail;
122
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
123
124
return 0;
125
126
fail_mem:
127
rc = -ENOMEM;
128
fail:
129
xen_smp_intr_free(cpu);
130
return rc;
131
}
132
133
void __init xen_smp_cpus_done(unsigned int max_cpus)
134
{
135
if (xen_hvm_domain())
136
native_smp_cpus_done(max_cpus);
137
}
138
139
void xen_smp_send_reschedule(int cpu)
140
{
141
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
142
}
143
144
static void __xen_send_IPI_mask(const struct cpumask *mask,
145
int vector)
146
{
147
unsigned cpu;
148
149
for_each_cpu_and(cpu, mask, cpu_online_mask)
150
xen_send_IPI_one(cpu, vector);
151
}
152
153
void xen_smp_send_call_function_ipi(const struct cpumask *mask)
154
{
155
int cpu;
156
157
__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
158
159
/* Make sure other vcpus get a chance to run if they need to. */
160
for_each_cpu(cpu, mask) {
161
if (xen_vcpu_stolen(cpu)) {
162
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
163
break;
164
}
165
}
166
}
167
168
void xen_smp_send_call_function_single_ipi(int cpu)
169
{
170
__xen_send_IPI_mask(cpumask_of(cpu),
171
XEN_CALL_FUNCTION_SINGLE_VECTOR);
172
}
173
174
static inline int xen_map_vector(int vector)
175
{
176
int xen_vector;
177
178
switch (vector) {
179
case RESCHEDULE_VECTOR:
180
xen_vector = XEN_RESCHEDULE_VECTOR;
181
break;
182
case CALL_FUNCTION_VECTOR:
183
xen_vector = XEN_CALL_FUNCTION_VECTOR;
184
break;
185
case CALL_FUNCTION_SINGLE_VECTOR:
186
xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
187
break;
188
case IRQ_WORK_VECTOR:
189
xen_vector = XEN_IRQ_WORK_VECTOR;
190
break;
191
#ifdef CONFIG_X86_64
192
case NMI_VECTOR:
193
case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
194
xen_vector = XEN_NMI_VECTOR;
195
break;
196
#endif
197
default:
198
xen_vector = -1;
199
printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
200
vector);
201
}
202
203
return xen_vector;
204
}
205
206
void xen_send_IPI_mask(const struct cpumask *mask,
207
int vector)
208
{
209
int xen_vector = xen_map_vector(vector);
210
211
if (xen_vector >= 0)
212
__xen_send_IPI_mask(mask, xen_vector);
213
}
214
215
void xen_send_IPI_all(int vector)
216
{
217
int xen_vector = xen_map_vector(vector);
218
219
if (xen_vector >= 0)
220
__xen_send_IPI_mask(cpu_online_mask, xen_vector);
221
}
222
223
void xen_send_IPI_self(int vector)
224
{
225
int xen_vector = xen_map_vector(vector);
226
227
if (xen_vector >= 0)
228
xen_send_IPI_one(smp_processor_id(), xen_vector);
229
}
230
231
void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
232
int vector)
233
{
234
unsigned cpu;
235
unsigned int this_cpu = smp_processor_id();
236
int xen_vector = xen_map_vector(vector);
237
238
if (!(num_online_cpus() > 1) || (xen_vector < 0))
239
return;
240
241
for_each_cpu_and(cpu, mask, cpu_online_mask) {
242
if (this_cpu == cpu)
243
continue;
244
245
xen_send_IPI_one(cpu, xen_vector);
246
}
247
}
248
249
void xen_send_IPI_allbutself(int vector)
250
{
251
xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
252
}
253
254
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
255
{
256
generic_smp_call_function_interrupt();
257
inc_irq_stat(irq_call_count);
258
259
return IRQ_HANDLED;
260
}
261
262
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
263
{
264
generic_smp_call_function_single_interrupt();
265
inc_irq_stat(irq_call_count);
266
267
return IRQ_HANDLED;
268
}
269
270