Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sh/kernel/smp.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* arch/sh/kernel/smp.c
4
*
5
* SMP support for the SuperH processors.
6
*
7
* Copyright (C) 2002 - 2010 Paul Mundt
8
* Copyright (C) 2006 - 2007 Akio Idehara
9
*/
10
#include <linux/err.h>
11
#include <linux/cache.h>
12
#include <linux/cpumask.h>
13
#include <linux/delay.h>
14
#include <linux/init.h>
15
#include <linux/spinlock.h>
16
#include <linux/mm.h>
17
#include <linux/module.h>
18
#include <linux/cpu.h>
19
#include <linux/interrupt.h>
20
#include <linux/sched/mm.h>
21
#include <linux/sched/hotplug.h>
22
#include <linux/atomic.h>
23
#include <linux/clockchips.h>
24
#include <linux/profile.h>
25
26
#include <asm/processor.h>
27
#include <asm/mmu_context.h>
28
#include <asm/smp.h>
29
#include <asm/cacheflush.h>
30
#include <asm/sections.h>
31
#include <asm/setup.h>
32
33
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
34
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
35
36
struct plat_smp_ops *mp_ops = NULL;
37
38
/* State of each CPU */
39
DEFINE_PER_CPU(int, cpu_state) = { 0 };
40
41
void register_smp_ops(struct plat_smp_ops *ops)
42
{
43
if (mp_ops)
44
printk(KERN_WARNING "Overriding previously set SMP ops\n");
45
46
mp_ops = ops;
47
}
48
49
static inline void smp_store_cpu_info(unsigned int cpu)
50
{
51
struct sh_cpuinfo *c = cpu_data + cpu;
52
53
memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
54
55
c->loops_per_jiffy = loops_per_jiffy;
56
}
57
58
void __init smp_prepare_cpus(unsigned int max_cpus)
59
{
60
unsigned int cpu = smp_processor_id();
61
62
init_new_context(current, &init_mm);
63
current_thread_info()->cpu = cpu;
64
mp_ops->prepare_cpus(max_cpus);
65
66
#ifndef CONFIG_HOTPLUG_CPU
67
init_cpu_present(cpu_possible_mask);
68
#endif
69
}
70
71
void __init smp_prepare_boot_cpu(void)
72
{
73
unsigned int cpu = smp_processor_id();
74
75
__cpu_number_map[0] = cpu;
76
__cpu_logical_map[0] = cpu;
77
78
set_cpu_online(cpu, true);
79
set_cpu_possible(cpu, true);
80
81
per_cpu(cpu_state, cpu) = CPU_ONLINE;
82
}
83
84
#ifdef CONFIG_HOTPLUG_CPU
85
void native_cpu_die(unsigned int cpu)
86
{
87
unsigned int i;
88
89
for (i = 0; i < 10; i++) {
90
smp_rmb();
91
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
92
if (system_state == SYSTEM_RUNNING)
93
pr_info("CPU %u is now offline\n", cpu);
94
95
return;
96
}
97
98
msleep(100);
99
}
100
101
pr_err("CPU %u didn't die...\n", cpu);
102
}
103
104
int native_cpu_disable(unsigned int cpu)
105
{
106
return cpu == 0 ? -EPERM : 0;
107
}
108
109
void play_dead_common(void)
110
{
111
idle_task_exit();
112
irq_ctx_exit(raw_smp_processor_id());
113
mb();
114
115
__this_cpu_write(cpu_state, CPU_DEAD);
116
local_irq_disable();
117
}
118
119
void native_play_dead(void)
120
{
121
play_dead_common();
122
}
123
124
int __cpu_disable(void)
125
{
126
unsigned int cpu = smp_processor_id();
127
int ret;
128
129
ret = mp_ops->cpu_disable(cpu);
130
if (ret)
131
return ret;
132
133
/*
134
* Take this CPU offline. Once we clear this, we can't return,
135
* and we must not schedule until we're ready to give up the cpu.
136
*/
137
set_cpu_online(cpu, false);
138
139
/*
140
* OK - migrate IRQs away from this CPU
141
*/
142
migrate_irqs();
143
144
/*
145
* Flush user cache and TLB mappings, and then remove this CPU
146
* from the vm mask set of all processes.
147
*/
148
flush_cache_all();
149
#ifdef CONFIG_MMU
150
local_flush_tlb_all();
151
#endif
152
153
clear_tasks_mm_cpumask(cpu);
154
155
return 0;
156
}
157
#else /* ... !CONFIG_HOTPLUG_CPU */
158
int native_cpu_disable(unsigned int cpu)
159
{
160
return -ENOSYS;
161
}
162
163
void native_cpu_die(unsigned int cpu)
164
{
165
/* We said "no" in __cpu_disable */
166
BUG();
167
}
168
169
void native_play_dead(void)
170
{
171
BUG();
172
}
173
#endif
174
175
static asmlinkage void start_secondary(void)
176
{
177
unsigned int cpu = smp_processor_id();
178
struct mm_struct *mm = &init_mm;
179
180
enable_mmu();
181
mmgrab(mm);
182
mmget(mm);
183
current->active_mm = mm;
184
#ifdef CONFIG_MMU
185
enter_lazy_tlb(mm, current);
186
local_flush_tlb_all();
187
#endif
188
189
per_cpu_trap_init();
190
191
notify_cpu_starting(cpu);
192
193
local_irq_enable();
194
195
calibrate_delay();
196
197
smp_store_cpu_info(cpu);
198
199
set_cpu_online(cpu, true);
200
per_cpu(cpu_state, cpu) = CPU_ONLINE;
201
202
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
203
}
204
205
extern struct {
206
unsigned long sp;
207
unsigned long bss_start;
208
unsigned long bss_end;
209
void *start_kernel_fn;
210
void *cpu_init_fn;
211
void *thread_info;
212
} stack_start;
213
214
int __cpu_up(unsigned int cpu, struct task_struct *tsk)
215
{
216
unsigned long timeout;
217
218
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
219
220
/* Fill in data in head.S for secondary cpus */
221
stack_start.sp = tsk->thread.sp;
222
stack_start.thread_info = tsk->stack;
223
stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
224
stack_start.start_kernel_fn = start_secondary;
225
226
flush_icache_range((unsigned long)&stack_start,
227
(unsigned long)&stack_start + sizeof(stack_start));
228
wmb();
229
230
mp_ops->start_cpu(cpu, (unsigned long)_stext);
231
232
timeout = jiffies + HZ;
233
while (time_before(jiffies, timeout)) {
234
if (cpu_online(cpu))
235
break;
236
237
udelay(10);
238
barrier();
239
}
240
241
if (cpu_online(cpu))
242
return 0;
243
244
return -ENOENT;
245
}
246
247
void __init smp_cpus_done(unsigned int max_cpus)
248
{
249
unsigned long bogosum = 0;
250
int cpu;
251
252
for_each_online_cpu(cpu)
253
bogosum += cpu_data[cpu].loops_per_jiffy;
254
255
printk(KERN_INFO "SMP: Total of %d processors activated "
256
"(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
257
bogosum / (500000/HZ),
258
(bogosum / (5000/HZ)) % 100);
259
}
260
261
void arch_smp_send_reschedule(int cpu)
262
{
263
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
264
}
265
266
void smp_send_stop(void)
267
{
268
smp_call_function(stop_this_cpu, 0, 0);
269
}
270
271
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
272
{
273
int cpu;
274
275
for_each_cpu(cpu, mask)
276
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
277
}
278
279
void arch_send_call_function_single_ipi(int cpu)
280
{
281
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
282
}
283
284
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
285
void tick_broadcast(const struct cpumask *mask)
286
{
287
int cpu;
288
289
for_each_cpu(cpu, mask)
290
mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
291
}
292
293
static void ipi_timer(void)
294
{
295
irq_enter();
296
tick_receive_broadcast();
297
irq_exit();
298
}
299
#endif
300
301
void smp_message_recv(unsigned int msg)
302
{
303
switch (msg) {
304
case SMP_MSG_FUNCTION:
305
generic_smp_call_function_interrupt();
306
break;
307
case SMP_MSG_RESCHEDULE:
308
scheduler_ipi();
309
break;
310
case SMP_MSG_FUNCTION_SINGLE:
311
generic_smp_call_function_single_interrupt();
312
break;
313
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
314
case SMP_MSG_TIMER:
315
ipi_timer();
316
break;
317
#endif
318
default:
319
printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
320
smp_processor_id(), __func__, msg);
321
break;
322
}
323
}
324
325
#ifdef CONFIG_PROFILING
326
/* Not really SMP stuff ... */
327
int setup_profiling_timer(unsigned int multiplier)
328
{
329
return 0;
330
}
331
#endif
332
333
#ifdef CONFIG_MMU
334
335
static void flush_tlb_all_ipi(void *info)
336
{
337
local_flush_tlb_all();
338
}
339
340
void flush_tlb_all(void)
341
{
342
on_each_cpu(flush_tlb_all_ipi, 0, 1);
343
}
344
345
static void flush_tlb_mm_ipi(void *mm)
346
{
347
local_flush_tlb_mm((struct mm_struct *)mm);
348
}
349
350
/*
351
* The following tlb flush calls are invoked when old translations are
352
* being torn down, or pte attributes are changing. For single threaded
353
* address spaces, a new context is obtained on the current cpu, and tlb
354
* context on other cpus are invalidated to force a new context allocation
355
* at switch_mm time, should the mm ever be used on other cpus. For
356
* multithreaded address spaces, intercpu interrupts have to be sent.
357
* Another case where intercpu interrupts are required is when the target
358
* mm might be active on another cpu (eg debuggers doing the flushes on
359
* behalf of debugees, kswapd stealing pages from another process etc).
360
* Kanoj 07/00.
361
*/
362
void flush_tlb_mm(struct mm_struct *mm)
363
{
364
preempt_disable();
365
366
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
367
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
368
} else {
369
int i;
370
for_each_online_cpu(i)
371
if (smp_processor_id() != i)
372
cpu_context(i, mm) = 0;
373
}
374
local_flush_tlb_mm(mm);
375
376
preempt_enable();
377
}
378
379
struct flush_tlb_data {
380
struct vm_area_struct *vma;
381
unsigned long addr1;
382
unsigned long addr2;
383
};
384
385
static void flush_tlb_range_ipi(void *info)
386
{
387
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
388
389
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
390
}
391
392
void flush_tlb_range(struct vm_area_struct *vma,
393
unsigned long start, unsigned long end)
394
{
395
struct mm_struct *mm = vma->vm_mm;
396
397
preempt_disable();
398
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
399
struct flush_tlb_data fd;
400
401
fd.vma = vma;
402
fd.addr1 = start;
403
fd.addr2 = end;
404
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
405
} else {
406
int i;
407
for_each_online_cpu(i)
408
if (smp_processor_id() != i)
409
cpu_context(i, mm) = 0;
410
}
411
local_flush_tlb_range(vma, start, end);
412
preempt_enable();
413
}
414
415
static void flush_tlb_kernel_range_ipi(void *info)
416
{
417
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
418
419
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
420
}
421
422
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
423
{
424
struct flush_tlb_data fd;
425
426
fd.addr1 = start;
427
fd.addr2 = end;
428
on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
429
}
430
431
static void flush_tlb_page_ipi(void *info)
432
{
433
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
434
435
local_flush_tlb_page(fd->vma, fd->addr1);
436
}
437
438
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
439
{
440
preempt_disable();
441
if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
442
(current->mm != vma->vm_mm)) {
443
struct flush_tlb_data fd;
444
445
fd.vma = vma;
446
fd.addr1 = page;
447
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
448
} else {
449
int i;
450
for_each_online_cpu(i)
451
if (smp_processor_id() != i)
452
cpu_context(i, vma->vm_mm) = 0;
453
}
454
local_flush_tlb_page(vma, page);
455
preempt_enable();
456
}
457
458
static void flush_tlb_one_ipi(void *info)
459
{
460
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
461
local_flush_tlb_one(fd->addr1, fd->addr2);
462
}
463
464
void flush_tlb_one(unsigned long asid, unsigned long vaddr)
465
{
466
struct flush_tlb_data fd;
467
468
fd.addr1 = asid;
469
fd.addr2 = vaddr;
470
471
smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
472
local_flush_tlb_one(asid, vaddr);
473
}
474
475
#endif
476
477