Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/csky/kernel/smp.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include <linux/module.h>
4
#include <linux/init.h>
5
#include <linux/kernel.h>
6
#include <linux/mm.h>
7
#include <linux/sched.h>
8
#include <linux/kernel_stat.h>
9
#include <linux/notifier.h>
10
#include <linux/cpu.h>
11
#include <linux/percpu.h>
12
#include <linux/delay.h>
13
#include <linux/err.h>
14
#include <linux/irq.h>
15
#include <linux/irq_work.h>
16
#include <linux/irqdomain.h>
17
#include <linux/of.h>
18
#include <linux/seq_file.h>
19
#include <linux/sched/task_stack.h>
20
#include <linux/sched/mm.h>
21
#include <linux/sched/hotplug.h>
22
#include <asm/irq.h>
23
#include <asm/traps.h>
24
#include <asm/sections.h>
25
#include <asm/mmu_context.h>
26
#ifdef CONFIG_CPU_HAS_FPU
27
#include <abi/fpu.h>
28
#endif
29
30
enum ipi_message_type {
31
IPI_EMPTY,
32
IPI_RESCHEDULE,
33
IPI_CALL_FUNC,
34
IPI_IRQ_WORK,
35
IPI_MAX
36
};
37
38
struct ipi_data_struct {
39
unsigned long bits ____cacheline_aligned;
40
unsigned long stats[IPI_MAX] ____cacheline_aligned;
41
};
42
static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
43
44
static irqreturn_t handle_ipi(int irq, void *dev)
45
{
46
unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
47
48
while (true) {
49
unsigned long ops;
50
51
ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
52
if (ops == 0)
53
return IRQ_HANDLED;
54
55
if (ops & (1 << IPI_RESCHEDULE)) {
56
stats[IPI_RESCHEDULE]++;
57
scheduler_ipi();
58
}
59
60
if (ops & (1 << IPI_CALL_FUNC)) {
61
stats[IPI_CALL_FUNC]++;
62
generic_smp_call_function_interrupt();
63
}
64
65
if (ops & (1 << IPI_IRQ_WORK)) {
66
stats[IPI_IRQ_WORK]++;
67
irq_work_run();
68
}
69
70
BUG_ON((ops >> IPI_MAX) != 0);
71
}
72
73
return IRQ_HANDLED;
74
}
75
76
static void (*send_arch_ipi)(const struct cpumask *mask);
77
78
static int ipi_irq;
79
void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
80
{
81
if (send_arch_ipi)
82
return;
83
84
send_arch_ipi = func;
85
ipi_irq = irq;
86
}
87
88
static void
89
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
90
{
91
int i;
92
93
for_each_cpu(i, to_whom)
94
set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
95
96
smp_mb();
97
send_arch_ipi(to_whom);
98
}
99
100
static const char * const ipi_names[] = {
101
[IPI_EMPTY] = "Empty interrupts",
102
[IPI_RESCHEDULE] = "Rescheduling interrupts",
103
[IPI_CALL_FUNC] = "Function call interrupts",
104
[IPI_IRQ_WORK] = "Irq work interrupts",
105
};
106
107
int arch_show_interrupts(struct seq_file *p, int prec)
108
{
109
unsigned int cpu, i;
110
111
for (i = 0; i < IPI_MAX; i++) {
112
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
113
prec >= 4 ? " " : "");
114
for_each_online_cpu(cpu)
115
seq_printf(p, "%10lu ",
116
per_cpu_ptr(&ipi_data, cpu)->stats[i]);
117
seq_printf(p, " %s\n", ipi_names[i]);
118
}
119
120
return 0;
121
}
122
123
void arch_send_call_function_ipi_mask(struct cpumask *mask)
124
{
125
send_ipi_message(mask, IPI_CALL_FUNC);
126
}
127
128
void arch_send_call_function_single_ipi(int cpu)
129
{
130
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
131
}
132
133
static void ipi_stop(void *unused)
134
{
135
while (1);
136
}
137
138
void smp_send_stop(void)
139
{
140
on_each_cpu(ipi_stop, NULL, 1);
141
}
142
143
void arch_smp_send_reschedule(int cpu)
144
{
145
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
146
}
147
148
#ifdef CONFIG_IRQ_WORK
149
void arch_irq_work_raise(void)
150
{
151
send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
152
}
153
#endif
154
155
void __init smp_prepare_cpus(unsigned int max_cpus)
156
{
157
}
158
159
static int ipi_dummy_dev;
160
161
void __init setup_smp_ipi(void)
162
{
163
int rc;
164
165
if (ipi_irq == 0)
166
return;
167
168
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
169
&ipi_dummy_dev);
170
if (rc)
171
panic("%s IRQ request failed\n", __func__);
172
173
enable_percpu_irq(ipi_irq, 0);
174
}
175
176
void __init setup_smp(void)
177
{
178
struct device_node *node = NULL;
179
unsigned int cpu;
180
181
for_each_of_cpu_node(node) {
182
if (!of_device_is_available(node))
183
continue;
184
185
cpu = of_get_cpu_hwid(node, 0);
186
if (cpu >= NR_CPUS)
187
continue;
188
189
set_cpu_possible(cpu, true);
190
set_cpu_present(cpu, true);
191
}
192
}
193
194
extern void _start_smp_secondary(void);
195
196
volatile unsigned int secondary_hint;
197
volatile unsigned int secondary_hint2;
198
volatile unsigned int secondary_ccr;
199
volatile unsigned int secondary_stack;
200
volatile unsigned int secondary_msa1;
201
volatile unsigned int secondary_pgd;
202
203
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
204
{
205
unsigned long mask = 1 << cpu;
206
207
secondary_stack =
208
(unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
209
secondary_hint = mfcr("cr31");
210
secondary_hint2 = mfcr("cr<21, 1>");
211
secondary_ccr = mfcr("cr18");
212
secondary_msa1 = read_mmu_msa1();
213
secondary_pgd = mfcr("cr<29, 15>");
214
215
/*
216
* Because other CPUs are in reset status, we must flush data
217
* from cache to out and secondary CPUs use them in
218
* csky_start_secondary(void)
219
*/
220
mtcr("cr17", 0x22);
221
222
if (mask & mfcr("cr<29, 0>")) {
223
send_arch_ipi(cpumask_of(cpu));
224
} else {
225
/* Enable cpu in SMP reset ctrl reg */
226
mask |= mfcr("cr<29, 0>");
227
mtcr("cr<29, 0>", mask);
228
}
229
230
/* Wait for the cpu online */
231
while (!cpu_online(cpu));
232
233
secondary_stack = 0;
234
235
return 0;
236
}
237
238
void __init smp_cpus_done(unsigned int max_cpus)
239
{
240
}
241
242
void csky_start_secondary(void)
243
{
244
struct mm_struct *mm = &init_mm;
245
unsigned int cpu = smp_processor_id();
246
247
mtcr("cr31", secondary_hint);
248
mtcr("cr<21, 1>", secondary_hint2);
249
mtcr("cr18", secondary_ccr);
250
251
mtcr("vbr", vec_base);
252
253
flush_tlb_all();
254
write_mmu_pagemask(0);
255
256
#ifdef CONFIG_CPU_HAS_FPU
257
init_fpu();
258
#endif
259
260
enable_percpu_irq(ipi_irq, 0);
261
262
mmget(mm);
263
mmgrab(mm);
264
current->active_mm = mm;
265
cpumask_set_cpu(cpu, mm_cpumask(mm));
266
267
notify_cpu_starting(cpu);
268
set_cpu_online(cpu, true);
269
270
pr_info("CPU%u Online: %s...\n", cpu, __func__);
271
272
local_irq_enable();
273
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
274
}
275
276
#ifdef CONFIG_HOTPLUG_CPU
277
int __cpu_disable(void)
278
{
279
unsigned int cpu = smp_processor_id();
280
281
set_cpu_online(cpu, false);
282
283
irq_migrate_all_off_this_cpu();
284
285
clear_tasks_mm_cpumask(cpu);
286
287
return 0;
288
}
289
290
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
291
{
292
pr_notice("CPU%u: shutdown\n", cpu);
293
}
294
295
void __noreturn arch_cpu_idle_dead(void)
296
{
297
idle_task_exit();
298
299
cpuhp_ap_report_dead();
300
301
while (!secondary_stack)
302
arch_cpu_idle();
303
304
raw_local_irq_disable();
305
306
asm volatile(
307
"mov sp, %0\n"
308
"mov r8, %0\n"
309
"jmpi csky_start_secondary"
310
:
311
: "r" (secondary_stack));
312
313
BUG();
314
}
315
#endif
316
317