Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mn10300/kernel/smp.c
10817 views
1
/* SMP support routines.
2
*
3
* Copyright (C) 2006-2008 Panasonic Corporation
4
* All Rights Reserved.
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License
8
* version 2 as published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
14
*/
15
16
#include <linux/interrupt.h>
17
#include <linux/spinlock.h>
18
#include <linux/init.h>
19
#include <linux/jiffies.h>
20
#include <linux/cpumask.h>
21
#include <linux/err.h>
22
#include <linux/kernel.h>
23
#include <linux/delay.h>
24
#include <linux/sched.h>
25
#include <linux/profile.h>
26
#include <linux/smp.h>
27
#include <asm/tlbflush.h>
28
#include <asm/system.h>
29
#include <asm/bitops.h>
30
#include <asm/processor.h>
31
#include <asm/bug.h>
32
#include <asm/exceptions.h>
33
#include <asm/hardirq.h>
34
#include <asm/fpu.h>
35
#include <asm/mmu_context.h>
36
#include <asm/thread_info.h>
37
#include <asm/cpu-regs.h>
38
#include <asm/intctl-regs.h>
39
#include "internal.h"
40
41
#ifdef CONFIG_HOTPLUG_CPU
42
#include <linux/cpu.h>
43
#include <asm/cacheflush.h>
44
45
static unsigned long sleep_mode[NR_CPUS];
46
47
static void run_sleep_cpu(unsigned int cpu);
48
static void run_wakeup_cpu(unsigned int cpu);
49
#endif /* CONFIG_HOTPLUG_CPU */
50
51
/*
52
* Debug Message function
53
*/
54
55
#undef DEBUG_SMP
56
#ifdef DEBUG_SMP
57
#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
58
#else
59
#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
60
#endif
61
62
/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
63
#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
64
65
/*
66
* Structure and data for smp_nmi_call_function().
67
*/
68
struct nmi_call_data_struct {
69
smp_call_func_t func;
70
void *info;
71
cpumask_t started;
72
cpumask_t finished;
73
int wait;
74
char size_alignment[0]
75
__attribute__ ((__aligned__(SMP_CACHE_BYTES)));
76
} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
77
78
static DEFINE_SPINLOCK(smp_nmi_call_lock);
79
static struct nmi_call_data_struct *nmi_call_data;
80
81
/*
82
* Data structures and variables
83
*/
84
static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
85
static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
86
cpumask_t cpu_boot_map; /* Bitmask of boot APs */
87
unsigned long start_stack[NR_CPUS - 1];
88
89
/*
90
* Per CPU parameters
91
*/
92
struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
93
94
static int cpucount; /* The count of boot CPUs */
95
static cpumask_t smp_commenced_mask;
96
cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
97
98
/*
99
* Function Prototypes
100
*/
101
static int do_boot_cpu(int);
102
static void smp_show_cpu_info(int cpu_id);
103
static void smp_callin(void);
104
static void smp_online(void);
105
static void smp_store_cpu_info(int);
106
static void smp_cpu_init(void);
107
static void smp_tune_scheduling(void);
108
static void send_IPI_mask(const cpumask_t *cpumask, int irq);
109
static void init_ipi(void);
110
111
/*
112
* IPI Initialization interrupt definitions
113
*/
114
static void mn10300_ipi_disable(unsigned int irq);
115
static void mn10300_ipi_enable(unsigned int irq);
116
static void mn10300_ipi_chip_disable(struct irq_data *d);
117
static void mn10300_ipi_chip_enable(struct irq_data *d);
118
static void mn10300_ipi_ack(struct irq_data *d);
119
static void mn10300_ipi_nop(struct irq_data *d);
120
121
static struct irq_chip mn10300_ipi_type = {
122
.name = "cpu_ipi",
123
.irq_disable = mn10300_ipi_chip_disable,
124
.irq_enable = mn10300_ipi_chip_enable,
125
.irq_ack = mn10300_ipi_ack,
126
.irq_eoi = mn10300_ipi_nop
127
};
128
129
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
130
static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
131
132
static struct irqaction reschedule_ipi = {
133
.handler = smp_reschedule_interrupt,
134
.name = "smp reschedule IPI"
135
};
136
static struct irqaction call_function_ipi = {
137
.handler = smp_call_function_interrupt,
138
.name = "smp call function IPI"
139
};
140
141
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
142
static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
143
static struct irqaction local_timer_ipi = {
144
.handler = smp_ipi_timer_interrupt,
145
.flags = IRQF_DISABLED,
146
.name = "smp local timer IPI"
147
};
148
#endif
149
150
/**
151
* init_ipi - Initialise the IPI mechanism
152
*/
153
static void init_ipi(void)
154
{
155
unsigned long flags;
156
u16 tmp16;
157
158
/* set up the reschedule IPI */
159
irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
160
handle_percpu_irq);
161
setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
162
set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
163
mn10300_ipi_enable(RESCHEDULE_IPI);
164
165
/* set up the call function IPI */
166
irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
167
handle_percpu_irq);
168
setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
169
set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
170
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
171
172
/* set up the local timer IPI */
173
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
174
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
175
irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
176
handle_percpu_irq);
177
setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
178
set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
179
mn10300_ipi_enable(LOCAL_TIMER_IPI);
180
#endif
181
182
#ifdef CONFIG_MN10300_CACHE_ENABLED
183
/* set up the cache flush IPI */
184
flags = arch_local_cli_save();
185
__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
186
mn10300_low_ipi_handler);
187
GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
188
mn10300_ipi_enable(FLUSH_CACHE_IPI);
189
arch_local_irq_restore(flags);
190
#endif
191
192
/* set up the NMI call function IPI */
193
flags = arch_local_cli_save();
194
GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
195
tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
196
arch_local_irq_restore(flags);
197
198
/* set up the SMP boot IPI */
199
flags = arch_local_cli_save();
200
__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
201
mn10300_low_ipi_handler);
202
arch_local_irq_restore(flags);
203
}
204
205
/**
206
* mn10300_ipi_shutdown - Shut down handling of an IPI
207
* @irq: The IPI to be shut down.
208
*/
209
static void mn10300_ipi_shutdown(unsigned int irq)
210
{
211
unsigned long flags;
212
u16 tmp;
213
214
flags = arch_local_cli_save();
215
216
tmp = GxICR(irq);
217
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
218
tmp = GxICR(irq);
219
220
arch_local_irq_restore(flags);
221
}
222
223
/**
224
* mn10300_ipi_enable - Enable an IPI
225
* @irq: The IPI to be enabled.
226
*/
227
static void mn10300_ipi_enable(unsigned int irq)
228
{
229
unsigned long flags;
230
u16 tmp;
231
232
flags = arch_local_cli_save();
233
234
tmp = GxICR(irq);
235
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
236
tmp = GxICR(irq);
237
238
arch_local_irq_restore(flags);
239
}
240
241
static void mn10300_ipi_chip_enable(struct irq_data *d)
242
{
243
mn10300_ipi_enable(d->irq);
244
}
245
246
/**
247
* mn10300_ipi_disable - Disable an IPI
248
* @irq: The IPI to be disabled.
249
*/
250
static void mn10300_ipi_disable(unsigned int irq)
251
{
252
unsigned long flags;
253
u16 tmp;
254
255
flags = arch_local_cli_save();
256
257
tmp = GxICR(irq);
258
GxICR(irq) = tmp & GxICR_LEVEL;
259
tmp = GxICR(irq);
260
261
arch_local_irq_restore(flags);
262
}
263
264
static void mn10300_ipi_chip_disable(struct irq_data *d)
265
{
266
mn10300_ipi_disable(d->irq);
267
}
268
269
270
/**
271
* mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
272
* @irq: The IPI to be acknowledged.
273
*
274
* Clear the interrupt detection flag for the IPI on the appropriate interrupt
275
* channel in the PIC.
276
*/
277
static void mn10300_ipi_ack(struct irq_data *d)
278
{
279
unsigned int irq = d->irq;
280
unsigned long flags;
281
u16 tmp;
282
283
flags = arch_local_cli_save();
284
GxICR_u8(irq) = GxICR_DETECT;
285
tmp = GxICR(irq);
286
arch_local_irq_restore(flags);
287
}
288
289
/**
290
* mn10300_ipi_nop - Dummy IPI action
291
* @irq: The IPI to be acted upon.
292
*/
293
static void mn10300_ipi_nop(struct irq_data *d)
294
{
295
}
296
297
/**
298
* send_IPI_mask - Send IPIs to all CPUs in list
299
* @cpumask: The list of CPUs to target.
300
* @irq: The IPI request to be sent.
301
*
302
* Send the specified IPI to all the CPUs in the list, not waiting for them to
303
* finish before returning. The caller is responsible for synchronisation if
304
* that is needed.
305
*/
306
static void send_IPI_mask(const cpumask_t *cpumask, int irq)
307
{
308
int i;
309
u16 tmp;
310
311
for (i = 0; i < NR_CPUS; i++) {
312
if (cpumask_test_cpu(i, cpumask)) {
313
/* send IPI */
314
tmp = CROSS_GxICR(irq, i);
315
CROSS_GxICR(irq, i) =
316
tmp | GxICR_REQUEST | GxICR_DETECT;
317
tmp = CROSS_GxICR(irq, i); /* flush write buffer */
318
}
319
}
320
}
321
322
/**
323
* send_IPI_self - Send an IPI to this CPU.
324
* @irq: The IPI request to be sent.
325
*
326
* Send the specified IPI to the current CPU.
327
*/
328
void send_IPI_self(int irq)
329
{
330
send_IPI_mask(cpumask_of(smp_processor_id()), irq);
331
}
332
333
/**
334
* send_IPI_allbutself - Send IPIs to all the other CPUs.
335
* @irq: The IPI request to be sent.
336
*
337
* Send the specified IPI to all CPUs in the system barring the current one,
338
* not waiting for them to finish before returning. The caller is responsible
339
* for synchronisation if that is needed.
340
*/
341
void send_IPI_allbutself(int irq)
342
{
343
cpumask_t cpumask;
344
345
cpumask_copy(&cpumask, cpu_online_mask);
346
cpumask_clear_cpu(smp_processor_id(), &cpumask);
347
send_IPI_mask(&cpumask, irq);
348
}
349
350
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
351
{
352
BUG();
353
/*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
354
}
355
356
void arch_send_call_function_single_ipi(int cpu)
357
{
358
send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
359
}
360
361
/**
362
* smp_send_reschedule - Send reschedule IPI to a CPU
363
* @cpu: The CPU to target.
364
*/
365
void smp_send_reschedule(int cpu)
366
{
367
send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
368
}
369
370
/**
371
* smp_nmi_call_function - Send a call function NMI IPI to all CPUs
372
* @func: The function to ask to be run.
373
* @info: The context data to pass to that function.
374
* @wait: If true, wait (atomically) until function is run on all CPUs.
375
*
376
* Send a non-maskable request to all CPUs in the system, requesting them to
377
* run the specified function with the given context data, and, potentially, to
378
* wait for completion of that function on all CPUs.
379
*
380
* Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
381
* timeout.
382
*/
383
int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
384
{
385
struct nmi_call_data_struct data;
386
unsigned long flags;
387
unsigned int cnt;
388
int cpus, ret = 0;
389
390
cpus = num_online_cpus() - 1;
391
if (cpus < 1)
392
return 0;
393
394
data.func = func;
395
data.info = info;
396
cpumask_copy(&data.started, cpu_online_mask);
397
cpumask_clear_cpu(smp_processor_id(), &data.started);
398
data.wait = wait;
399
if (wait)
400
data.finished = data.started;
401
402
spin_lock_irqsave(&smp_nmi_call_lock, flags);
403
nmi_call_data = &data;
404
smp_mb();
405
406
/* Send a message to all other CPUs and wait for them to respond */
407
send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
408
409
/* Wait for response */
410
if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
411
for (cnt = 0;
412
cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
413
!cpumask_empty(&data.started);
414
cnt++)
415
mdelay(1);
416
417
if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
418
for (cnt = 0;
419
cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
420
!cpumask_empty(&data.finished);
421
cnt++)
422
mdelay(1);
423
}
424
425
if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
426
ret = -ETIMEDOUT;
427
428
} else {
429
/* If timeout value is zero, wait until cpumask has been
430
* cleared */
431
while (!cpumask_empty(&data.started))
432
barrier();
433
if (wait)
434
while (!cpumask_empty(&data.finished))
435
barrier();
436
}
437
438
spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
439
return ret;
440
}
441
442
/**
443
* smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
444
*
445
* Send a non-maskable request to all other CPUs in the system, instructing
446
* them to jump into the debugger. The caller is responsible for checking that
447
* the other CPUs responded to the instruction.
448
*
449
* The caller should make sure that this CPU's debugger IPI is disabled.
450
*/
451
void smp_jump_to_debugger(void)
452
{
453
if (num_online_cpus() > 1)
454
/* Send a message to all other CPUs */
455
send_IPI_allbutself(DEBUGGER_NMI_IPI);
456
}
457
458
/**
459
* stop_this_cpu - Callback to stop a CPU.
460
* @unused: Callback context (ignored).
461
*/
462
void stop_this_cpu(void *unused)
463
{
464
static volatile int stopflag;
465
unsigned long flags;
466
467
#ifdef CONFIG_GDBSTUB
468
/* In case of single stepping smp_send_stop by other CPU,
469
* clear procindebug to avoid deadlock.
470
*/
471
atomic_set(&procindebug[smp_processor_id()], 0);
472
#endif /* CONFIG_GDBSTUB */
473
474
flags = arch_local_cli_save();
475
set_cpu_online(smp_processor_id(), false);
476
477
while (!stopflag)
478
cpu_relax();
479
480
set_cpu_online(smp_processor_id(), true);
481
arch_local_irq_restore(flags);
482
}
483
484
/**
485
* smp_send_stop - Send a stop request to all CPUs.
486
*/
487
void smp_send_stop(void)
488
{
489
smp_nmi_call_function(stop_this_cpu, NULL, 0);
490
}
491
492
/**
493
* smp_reschedule_interrupt - Reschedule IPI handler
494
* @irq: The interrupt number.
495
* @dev_id: The device ID.
496
*
497
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
498
*/
499
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
500
{
501
scheduler_ipi();
502
return IRQ_HANDLED;
503
}
504
505
/**
506
* smp_call_function_interrupt - Call function IPI handler
507
* @irq: The interrupt number.
508
* @dev_id: The device ID.
509
*
510
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
511
*/
512
static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
513
{
514
/* generic_smp_call_function_interrupt(); */
515
generic_smp_call_function_single_interrupt();
516
return IRQ_HANDLED;
517
}
518
519
/**
520
* smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
521
*/
522
void smp_nmi_call_function_interrupt(void)
523
{
524
smp_call_func_t func = nmi_call_data->func;
525
void *info = nmi_call_data->info;
526
int wait = nmi_call_data->wait;
527
528
/* Notify the initiating CPU that I've grabbed the data and am about to
529
* execute the function
530
*/
531
smp_mb();
532
cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
533
(*func)(info);
534
535
if (wait) {
536
smp_mb();
537
cpumask_clear_cpu(smp_processor_id(),
538
&nmi_call_data->finished);
539
}
540
}
541
542
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
543
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
544
/**
545
* smp_ipi_timer_interrupt - Local timer IPI handler
546
* @irq: The interrupt number.
547
* @dev_id: The device ID.
548
*
549
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
550
*/
551
static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
552
{
553
return local_timer_interrupt();
554
}
555
#endif
556
557
void __init smp_init_cpus(void)
558
{
559
int i;
560
for (i = 0; i < NR_CPUS; i++) {
561
set_cpu_possible(i, true);
562
set_cpu_present(i, true);
563
}
564
}
565
566
/**
567
* smp_cpu_init - Initialise AP in start_secondary.
568
*
569
* For this Application Processor, set up init_mm, initialise FPU and set
570
* interrupt level 0-6 setting.
571
*/
572
static void __init smp_cpu_init(void)
573
{
574
unsigned long flags;
575
int cpu_id = smp_processor_id();
576
u16 tmp16;
577
578
if (test_and_set_bit(cpu_id, &cpu_initialized)) {
579
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
580
for (;;)
581
local_irq_enable();
582
}
583
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
584
585
atomic_inc(&init_mm.mm_count);
586
current->active_mm = &init_mm;
587
BUG_ON(current->mm);
588
589
enter_lazy_tlb(&init_mm, current);
590
591
/* Force FPU initialization */
592
clear_using_fpu(current);
593
594
GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
595
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
596
597
GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
598
mn10300_ipi_enable(LOCAL_TIMER_IPI);
599
600
GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
601
mn10300_ipi_enable(RESCHEDULE_IPI);
602
603
#ifdef CONFIG_MN10300_CACHE_ENABLED
604
GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
605
mn10300_ipi_enable(FLUSH_CACHE_IPI);
606
#endif
607
608
mn10300_ipi_shutdown(SMP_BOOT_IRQ);
609
610
/* Set up the non-maskable call function IPI */
611
flags = arch_local_cli_save();
612
GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
613
tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
614
arch_local_irq_restore(flags);
615
}
616
617
/**
618
* smp_prepare_cpu_init - Initialise CPU in startup_secondary
619
*
620
* Set interrupt level 0-6 setting and init ICR of the kernel debugger.
621
*/
622
void smp_prepare_cpu_init(void)
623
{
624
int loop;
625
626
/* Set the interrupt vector registers */
627
IVAR0 = EXCEP_IRQ_LEVEL0;
628
IVAR1 = EXCEP_IRQ_LEVEL1;
629
IVAR2 = EXCEP_IRQ_LEVEL2;
630
IVAR3 = EXCEP_IRQ_LEVEL3;
631
IVAR4 = EXCEP_IRQ_LEVEL4;
632
IVAR5 = EXCEP_IRQ_LEVEL5;
633
IVAR6 = EXCEP_IRQ_LEVEL6;
634
635
/* Disable all interrupts and set to priority 6 (lowest) */
636
for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
637
GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
638
639
#ifdef CONFIG_KERNEL_DEBUGGER
640
/* initialise the kernel debugger interrupt */
641
do {
642
unsigned long flags;
643
u16 tmp16;
644
645
flags = arch_local_cli_save();
646
GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
647
tmp16 = GxICR(DEBUGGER_NMI_IPI);
648
arch_local_irq_restore(flags);
649
} while (0);
650
#endif
651
}
652
653
/**
654
* start_secondary - Activate a secondary CPU (AP)
655
* @unused: Thread parameter (ignored).
656
*/
657
int __init start_secondary(void *unused)
658
{
659
smp_cpu_init();
660
smp_callin();
661
while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
662
cpu_relax();
663
664
local_flush_tlb();
665
preempt_disable();
666
smp_online();
667
668
#ifdef CONFIG_GENERIC_CLOCKEVENTS
669
init_clockevents();
670
#endif
671
cpu_idle();
672
return 0;
673
}
674
675
/**
676
* smp_prepare_cpus - Boot up secondary CPUs (APs)
677
* @max_cpus: Maximum number of CPUs to boot.
678
*
679
* Call do_boot_cpu, and boot up APs.
680
*/
681
void __init smp_prepare_cpus(unsigned int max_cpus)
682
{
683
int phy_id;
684
685
/* Setup boot CPU information */
686
smp_store_cpu_info(0);
687
smp_tune_scheduling();
688
689
init_ipi();
690
691
/* If SMP should be disabled, then finish */
692
if (max_cpus == 0) {
693
printk(KERN_INFO "SMP mode deactivated.\n");
694
goto smp_done;
695
}
696
697
/* Boot secondary CPUs (for which phy_id > 0) */
698
for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
699
/* Don't boot primary CPU */
700
if (max_cpus <= cpucount + 1)
701
continue;
702
if (phy_id != 0)
703
do_boot_cpu(phy_id);
704
set_cpu_possible(phy_id, true);
705
smp_show_cpu_info(phy_id);
706
}
707
708
smp_done:
709
Dprintk("Boot done.\n");
710
}
711
712
/**
713
* smp_store_cpu_info - Save a CPU's information
714
* @cpu: The CPU to save for.
715
*
716
* Save boot_cpu_data and jiffy for the specified CPU.
717
*/
718
static void __init smp_store_cpu_info(int cpu)
719
{
720
struct mn10300_cpuinfo *ci = &cpu_data[cpu];
721
722
*ci = boot_cpu_data;
723
ci->loops_per_jiffy = loops_per_jiffy;
724
ci->type = CPUREV;
725
}
726
727
/**
728
* smp_tune_scheduling - Set time slice value
729
*
730
* Nothing to do here.
731
*/
732
static void __init smp_tune_scheduling(void)
733
{
734
}
735
736
/**
737
* do_boot_cpu: Boot up one CPU
738
* @phy_id: Physical ID of CPU to boot.
739
*
740
* Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
741
* otherwise.
742
*/
743
static int __init do_boot_cpu(int phy_id)
744
{
745
struct task_struct *idle;
746
unsigned long send_status, callin_status;
747
int timeout, cpu_id;
748
749
send_status = GxICR_REQUEST;
750
callin_status = 0;
751
timeout = 0;
752
cpu_id = phy_id;
753
754
cpucount++;
755
756
/* Create idle thread for this CPU */
757
idle = fork_idle(cpu_id);
758
if (IS_ERR(idle))
759
panic("Failed fork for CPU#%d.", cpu_id);
760
761
idle->thread.pc = (unsigned long)start_secondary;
762
763
printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
764
start_stack[cpu_id - 1] = idle->thread.sp;
765
766
task_thread_info(idle)->cpu = cpu_id;
767
768
/* Send boot IPI to AP */
769
send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
770
771
Dprintk("Waiting for send to finish...\n");
772
773
/* Wait for AP's IPI receive in 100[ms] */
774
do {
775
udelay(1000);
776
send_status =
777
CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
778
} while (send_status == GxICR_REQUEST && timeout++ < 100);
779
780
Dprintk("Waiting for cpu_callin_map.\n");
781
782
if (send_status == 0) {
783
/* Allow AP to start initializing */
784
cpumask_set_cpu(cpu_id, &cpu_callout_map);
785
786
/* Wait for setting cpu_callin_map */
787
timeout = 0;
788
do {
789
udelay(1000);
790
callin_status = cpumask_test_cpu(cpu_id,
791
&cpu_callin_map);
792
} while (callin_status == 0 && timeout++ < 5000);
793
794
if (callin_status == 0)
795
Dprintk("Not responding.\n");
796
} else {
797
printk(KERN_WARNING "IPI not delivered.\n");
798
}
799
800
if (send_status == GxICR_REQUEST || callin_status == 0) {
801
cpumask_clear_cpu(cpu_id, &cpu_callout_map);
802
cpumask_clear_cpu(cpu_id, &cpu_callin_map);
803
cpumask_clear_cpu(cpu_id, &cpu_initialized);
804
cpucount--;
805
return 1;
806
}
807
return 0;
808
}
809
810
/**
811
* smp_show_cpu_info - Show SMP CPU information
812
* @cpu: The CPU of interest.
813
*/
814
static void __init smp_show_cpu_info(int cpu)
815
{
816
struct mn10300_cpuinfo *ci = &cpu_data[cpu];
817
818
printk(KERN_INFO
819
"CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
820
cpu,
821
MN10300_IOCLK / 1000000,
822
(MN10300_IOCLK / 10000) % 100,
823
ci->loops_per_jiffy / (500000 / HZ),
824
(ci->loops_per_jiffy / (5000 / HZ)) % 100);
825
}
826
827
/**
828
* smp_callin - Set cpu_callin_map of the current CPU ID
829
*/
830
static void __init smp_callin(void)
831
{
832
unsigned long timeout;
833
int cpu;
834
835
cpu = smp_processor_id();
836
timeout = jiffies + (2 * HZ);
837
838
if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
839
printk(KERN_ERR "CPU#%d already present.\n", cpu);
840
BUG();
841
}
842
Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
843
844
/* Wait for AP startup 2s total */
845
while (time_before(jiffies, timeout)) {
846
if (cpumask_test_cpu(cpu, &cpu_callout_map))
847
break;
848
cpu_relax();
849
}
850
851
if (!time_before(jiffies, timeout)) {
852
printk(KERN_ERR
853
"BUG: CPU#%d started up but did not get a callout!\n",
854
cpu);
855
BUG();
856
}
857
858
#ifdef CONFIG_CALIBRATE_DELAY
859
calibrate_delay(); /* Get our bogomips */
860
#endif
861
862
/* Save our processor parameters */
863
smp_store_cpu_info(cpu);
864
865
/* Allow the boot processor to continue */
866
cpumask_set_cpu(cpu, &cpu_callin_map);
867
}
868
869
/**
870
* smp_online - Set cpu_online_mask
871
*/
872
static void __init smp_online(void)
873
{
874
int cpu;
875
876
cpu = smp_processor_id();
877
878
local_irq_enable();
879
880
set_cpu_online(cpu, true);
881
smp_wmb();
882
}
883
884
/**
885
* smp_cpus_done -
886
* @max_cpus: Maximum CPU count.
887
*
888
* Do nothing.
889
*/
890
void __init smp_cpus_done(unsigned int max_cpus)
891
{
892
}
893
894
/*
895
* smp_prepare_boot_cpu - Set up stuff for the boot processor.
896
*
897
* Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
898
* processor (CPU 0).
899
*/
900
void __devinit smp_prepare_boot_cpu(void)
901
{
902
cpumask_set_cpu(0, &cpu_callout_map);
903
cpumask_set_cpu(0, &cpu_callin_map);
904
current_thread_info()->cpu = 0;
905
}
906
907
/*
908
* initialize_secondary - Initialise a secondary CPU (Application Processor).
909
*
910
* Set SP register and jump to thread's PC address.
911
*/
912
void initialize_secondary(void)
913
{
914
asm volatile (
915
"mov %0,sp \n"
916
"jmp (%1) \n"
917
:
918
: "a"(current->thread.sp), "a"(current->thread.pc));
919
}
920
921
/**
922
* __cpu_up - Set smp_commenced_mask for the nominated CPU
923
* @cpu: The target CPU.
924
*/
925
int __devinit __cpu_up(unsigned int cpu)
926
{
927
int timeout;
928
929
#ifdef CONFIG_HOTPLUG_CPU
930
if (num_online_cpus() == 1)
931
disable_hlt();
932
if (sleep_mode[cpu])
933
run_wakeup_cpu(cpu);
934
#endif /* CONFIG_HOTPLUG_CPU */
935
936
cpumask_set_cpu(cpu, &smp_commenced_mask);
937
938
/* Wait 5s total for a response */
939
for (timeout = 0 ; timeout < 5000 ; timeout++) {
940
if (cpu_online(cpu))
941
break;
942
udelay(1000);
943
}
944
945
BUG_ON(!cpu_online(cpu));
946
return 0;
947
}
948
949
/**
950
* setup_profiling_timer - Set up the profiling timer
951
* @multiplier - The frequency multiplier to use
952
*
953
* The frequency of the profiling timer can be changed by writing a multiplier
954
* value into /proc/profile.
955
*/
956
int setup_profiling_timer(unsigned int multiplier)
957
{
958
return -EINVAL;
959
}
960
961
/*
962
* CPU hotplug routines
963
*/
964
#ifdef CONFIG_HOTPLUG_CPU
965
966
static DEFINE_PER_CPU(struct cpu, cpu_devices);
967
968
static int __init topology_init(void)
969
{
970
int cpu, ret;
971
972
for_each_cpu(cpu) {
973
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
974
if (ret)
975
printk(KERN_WARNING
976
"topology_init: register_cpu %d failed (%d)\n",
977
cpu, ret);
978
}
979
return 0;
980
}
981
982
subsys_initcall(topology_init);
983
984
int __cpu_disable(void)
985
{
986
int cpu = smp_processor_id();
987
if (cpu == 0)
988
return -EBUSY;
989
990
migrate_irqs();
991
cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
992
return 0;
993
}
994
995
void __cpu_die(unsigned int cpu)
996
{
997
run_sleep_cpu(cpu);
998
999
if (num_online_cpus() == 1)
1000
enable_hlt();
1001
}
1002
1003
#ifdef CONFIG_MN10300_CACHE_ENABLED
1004
static inline void hotplug_cpu_disable_cache(void)
1005
{
1006
int tmp;
1007
asm volatile(
1008
" movhu (%1),%0 \n"
1009
" and %2,%0 \n"
1010
" movhu %0,(%1) \n"
1011
"1: movhu (%1),%0 \n"
1012
" btst %3,%0 \n"
1013
" bne 1b \n"
1014
: "=&r"(tmp)
1015
: "a"(&CHCTR),
1016
"i"(~(CHCTR_ICEN | CHCTR_DCEN)),
1017
"i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
1018
: "memory", "cc");
1019
}
1020
1021
static inline void hotplug_cpu_enable_cache(void)
1022
{
1023
int tmp;
1024
asm volatile(
1025
"movhu (%1),%0 \n"
1026
"or %2,%0 \n"
1027
"movhu %0,(%1) \n"
1028
: "=&r"(tmp)
1029
: "a"(&CHCTR),
1030
"i"(CHCTR_ICEN | CHCTR_DCEN)
1031
: "memory", "cc");
1032
}
1033
1034
static inline void hotplug_cpu_invalidate_cache(void)
1035
{
1036
int tmp;
1037
asm volatile (
1038
"movhu (%1),%0 \n"
1039
"or %2,%0 \n"
1040
"movhu %0,(%1) \n"
1041
: "=&r"(tmp)
1042
: "a"(&CHCTR),
1043
"i"(CHCTR_ICINV | CHCTR_DCINV)
1044
: "cc");
1045
}
1046
1047
#else /* CONFIG_MN10300_CACHE_ENABLED */
1048
#define hotplug_cpu_disable_cache() do {} while (0)
1049
#define hotplug_cpu_enable_cache() do {} while (0)
1050
#define hotplug_cpu_invalidate_cache() do {} while (0)
1051
#endif /* CONFIG_MN10300_CACHE_ENABLED */
1052
1053
/**
1054
* hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
1055
* @cpumask: List of target CPUs.
1056
* @func: The function to call on those CPUs.
1057
* @info: The context data for the function to be called.
1058
* @wait: Whether to wait for the calls to complete.
1059
*
1060
* Non-maskably call a function on another CPU for hotplug purposes.
1061
*
1062
* This function must be called with maskable interrupts disabled.
1063
*/
1064
static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1065
smp_call_func_t func, void *info,
1066
int wait)
1067
{
1068
/*
1069
* The address and the size of nmi_call_func_mask_data
1070
* need to be aligned on L1_CACHE_BYTES.
1071
*/
1072
static struct nmi_call_data_struct nmi_call_func_mask_data
1073
__cacheline_aligned;
1074
unsigned long start, end;
1075
1076
start = (unsigned long)&nmi_call_func_mask_data;
1077
end = start + sizeof(struct nmi_call_data_struct);
1078
1079
nmi_call_func_mask_data.func = func;
1080
nmi_call_func_mask_data.info = info;
1081
nmi_call_func_mask_data.started = cpumask;
1082
nmi_call_func_mask_data.wait = wait;
1083
if (wait)
1084
nmi_call_func_mask_data.finished = cpumask;
1085
1086
spin_lock(&smp_nmi_call_lock);
1087
nmi_call_data = &nmi_call_func_mask_data;
1088
mn10300_local_dcache_flush_range(start, end);
1089
smp_wmb();
1090
1091
send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1092
1093
do {
1094
mn10300_local_dcache_inv_range(start, end);
1095
barrier();
1096
} while (!cpumask_empty(&nmi_call_func_mask_data.started));
1097
1098
if (wait) {
1099
do {
1100
mn10300_local_dcache_inv_range(start, end);
1101
barrier();
1102
} while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1103
}
1104
1105
spin_unlock(&smp_nmi_call_lock);
1106
return 0;
1107
}
1108
1109
static void restart_wakeup_cpu(void)
1110
{
1111
unsigned int cpu = smp_processor_id();
1112
1113
cpumask_set_cpu(cpu, &cpu_callin_map);
1114
local_flush_tlb();
1115
set_cpu_online(cpu, true);
1116
smp_wmb();
1117
}
1118
1119
static void prepare_sleep_cpu(void *unused)
1120
{
1121
sleep_mode[smp_processor_id()] = 1;
1122
smp_mb();
1123
mn10300_local_dcache_flush_inv();
1124
hotplug_cpu_disable_cache();
1125
hotplug_cpu_invalidate_cache();
1126
}
1127
1128
/* when this function called, IE=0, NMID=0. */
1129
static void sleep_cpu(void *unused)
1130
{
1131
unsigned int cpu_id = smp_processor_id();
1132
/*
1133
* CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
1134
* before this cpu goes in SLEEP mode.
1135
*/
1136
do {
1137
smp_mb();
1138
__sleep_cpu();
1139
} while (sleep_mode[cpu_id]);
1140
restart_wakeup_cpu();
1141
}
1142
1143
static void run_sleep_cpu(unsigned int cpu)
1144
{
1145
unsigned long flags;
1146
cpumask_t cpumask;
1147
1148
cpumask_copy(&cpumask, &cpumask_of(cpu));
1149
flags = arch_local_cli_save();
1150
hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1151
hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1152
udelay(1); /* delay for the cpu to sleep. */
1153
arch_local_irq_restore(flags);
1154
}
1155
1156
static void wakeup_cpu(void)
1157
{
1158
hotplug_cpu_invalidate_cache();
1159
hotplug_cpu_enable_cache();
1160
smp_mb();
1161
sleep_mode[smp_processor_id()] = 0;
1162
}
1163
1164
static void run_wakeup_cpu(unsigned int cpu)
1165
{
1166
unsigned long flags;
1167
1168
flags = arch_local_cli_save();
1169
#if NR_CPUS == 2
1170
mn10300_local_dcache_flush_inv();
1171
#else
1172
/*
1173
* Before waking up the cpu,
1174
* all online cpus should stop and flush D-Cache for global data.
1175
*/
1176
#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1177
#endif
1178
hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1179
arch_local_irq_restore(flags);
1180
}
1181
1182
#endif /* CONFIG_HOTPLUG_CPU */
1183
1184