Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/mach-common/smp.c
10817 views
1
/*
2
* IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
3
*
4
* Copyright 2007-2009 Analog Devices Inc.
5
* Philippe Gerum <[email protected]>
6
*
7
* Licensed under the GPL-2.
8
*/
9
10
#include <linux/module.h>
11
#include <linux/delay.h>
12
#include <linux/init.h>
13
#include <linux/spinlock.h>
14
#include <linux/sched.h>
15
#include <linux/interrupt.h>
16
#include <linux/cache.h>
17
#include <linux/profile.h>
18
#include <linux/errno.h>
19
#include <linux/mm.h>
20
#include <linux/cpu.h>
21
#include <linux/smp.h>
22
#include <linux/cpumask.h>
23
#include <linux/seq_file.h>
24
#include <linux/irq.h>
25
#include <linux/slab.h>
26
#include <asm/atomic.h>
27
#include <asm/cacheflush.h>
28
#include <asm/irq_handler.h>
29
#include <asm/mmu_context.h>
30
#include <asm/pgtable.h>
31
#include <asm/pgalloc.h>
32
#include <asm/processor.h>
33
#include <asm/ptrace.h>
34
#include <asm/cpu.h>
35
#include <asm/time.h>
36
#include <linux/err.h>
37
38
/*
39
* Anomaly notes:
40
* 05000120 - we always define corelock as 32-bit integer in L2
41
*/
42
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
43
44
#ifdef CONFIG_ICACHE_FLUSH_L1
45
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
46
#endif
47
48
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
49
*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
50
*init_saved_dcplb_fault_addr_coreb;
51
52
#define BFIN_IPI_RESCHEDULE 0
53
#define BFIN_IPI_CALL_FUNC 1
54
#define BFIN_IPI_CPU_STOP 2
55
56
struct blackfin_flush_data {
57
unsigned long start;
58
unsigned long end;
59
};
60
61
void *secondary_stack;
62
63
64
struct smp_call_struct {
65
void (*func)(void *info);
66
void *info;
67
int wait;
68
cpumask_t *waitmask;
69
};
70
71
static struct blackfin_flush_data smp_flush_data;
72
73
static DEFINE_SPINLOCK(stop_lock);
74
75
struct ipi_message {
76
unsigned long type;
77
struct smp_call_struct call_struct;
78
};
79
80
/* A magic number - stress test shows this is safe for common cases */
81
#define BFIN_IPI_MSGQ_LEN 5
82
83
/* Simple FIFO buffer, overflow leads to panic */
84
struct ipi_message_queue {
85
spinlock_t lock;
86
unsigned long count;
87
unsigned long head; /* head of the queue */
88
struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
89
};
90
91
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
92
93
static void ipi_cpu_stop(unsigned int cpu)
94
{
95
spin_lock(&stop_lock);
96
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
97
dump_stack();
98
spin_unlock(&stop_lock);
99
100
set_cpu_online(cpu, false);
101
102
local_irq_disable();
103
104
while (1)
105
SSYNC();
106
}
107
108
static void ipi_flush_icache(void *info)
109
{
110
struct blackfin_flush_data *fdata = info;
111
112
/* Invalidate the memory holding the bounds of the flushed region. */
113
blackfin_dcache_invalidate_range((unsigned long)fdata,
114
(unsigned long)fdata + sizeof(*fdata));
115
116
/* Make sure all write buffers in the data side of the core
117
* are flushed before trying to invalidate the icache. This
118
* needs to be after the data flush and before the icache
119
* flush so that the SSYNC does the right thing in preventing
120
* the instruction prefetcher from hitting things in cached
121
* memory at the wrong time -- it runs much further ahead than
122
* the pipeline.
123
*/
124
SSYNC();
125
126
/* ipi_flaush_icache is invoked by generic flush_icache_range,
127
* so call blackfin arch icache flush directly here.
128
*/
129
blackfin_icache_flush_range(fdata->start, fdata->end);
130
}
131
132
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
133
{
134
int wait;
135
void (*func)(void *info);
136
void *info;
137
func = msg->call_struct.func;
138
info = msg->call_struct.info;
139
wait = msg->call_struct.wait;
140
func(info);
141
if (wait) {
142
#ifdef __ARCH_SYNC_CORE_DCACHE
143
/*
144
* 'wait' usually means synchronization between CPUs.
145
* Invalidate D cache in case shared data was changed
146
* by func() to ensure cache coherence.
147
*/
148
resync_core_dcache();
149
#endif
150
cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
151
}
152
}
153
154
/* Use IRQ_SUPPLE_0 to request reschedule.
155
* When returning from interrupt to user space,
156
* there is chance to reschedule */
157
static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
158
{
159
unsigned int cpu = smp_processor_id();
160
161
platform_clear_ipi(cpu, IRQ_SUPPLE_0);
162
return IRQ_HANDLED;
163
}
164
165
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
166
{
167
struct ipi_message *msg;
168
struct ipi_message_queue *msg_queue;
169
unsigned int cpu = smp_processor_id();
170
unsigned long flags;
171
172
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
173
174
msg_queue = &__get_cpu_var(ipi_msg_queue);
175
176
spin_lock_irqsave(&msg_queue->lock, flags);
177
178
while (msg_queue->count) {
179
msg = &msg_queue->ipi_message[msg_queue->head];
180
switch (msg->type) {
181
case BFIN_IPI_RESCHEDULE:
182
scheduler_ipi();
183
break;
184
case BFIN_IPI_CALL_FUNC:
185
spin_unlock_irqrestore(&msg_queue->lock, flags);
186
ipi_call_function(cpu, msg);
187
spin_lock_irqsave(&msg_queue->lock, flags);
188
break;
189
case BFIN_IPI_CPU_STOP:
190
spin_unlock_irqrestore(&msg_queue->lock, flags);
191
ipi_cpu_stop(cpu);
192
spin_lock_irqsave(&msg_queue->lock, flags);
193
break;
194
default:
195
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
196
cpu, msg->type);
197
break;
198
}
199
msg_queue->head++;
200
msg_queue->head %= BFIN_IPI_MSGQ_LEN;
201
msg_queue->count--;
202
}
203
spin_unlock_irqrestore(&msg_queue->lock, flags);
204
return IRQ_HANDLED;
205
}
206
207
static void ipi_queue_init(void)
208
{
209
unsigned int cpu;
210
struct ipi_message_queue *msg_queue;
211
for_each_possible_cpu(cpu) {
212
msg_queue = &per_cpu(ipi_msg_queue, cpu);
213
spin_lock_init(&msg_queue->lock);
214
msg_queue->count = 0;
215
msg_queue->head = 0;
216
}
217
}
218
219
static inline void smp_send_message(cpumask_t callmap, unsigned long type,
220
void (*func) (void *info), void *info, int wait)
221
{
222
unsigned int cpu;
223
struct ipi_message_queue *msg_queue;
224
struct ipi_message *msg;
225
unsigned long flags, next_msg;
226
cpumask_t waitmask; /* waitmask is shared by all cpus */
227
228
cpumask_copy(&waitmask, &callmap);
229
for_each_cpu(cpu, &callmap) {
230
msg_queue = &per_cpu(ipi_msg_queue, cpu);
231
spin_lock_irqsave(&msg_queue->lock, flags);
232
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
233
next_msg = (msg_queue->head + msg_queue->count)
234
% BFIN_IPI_MSGQ_LEN;
235
msg = &msg_queue->ipi_message[next_msg];
236
msg->type = type;
237
if (type == BFIN_IPI_CALL_FUNC) {
238
msg->call_struct.func = func;
239
msg->call_struct.info = info;
240
msg->call_struct.wait = wait;
241
msg->call_struct.waitmask = &waitmask;
242
}
243
msg_queue->count++;
244
} else
245
panic("IPI message queue overflow\n");
246
spin_unlock_irqrestore(&msg_queue->lock, flags);
247
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
248
}
249
250
if (wait) {
251
while (!cpumask_empty(&waitmask))
252
blackfin_dcache_invalidate_range(
253
(unsigned long)(&waitmask),
254
(unsigned long)(&waitmask));
255
#ifdef __ARCH_SYNC_CORE_DCACHE
256
/*
257
* Invalidate D cache in case shared data was changed by
258
* other processors to ensure cache coherence.
259
*/
260
resync_core_dcache();
261
#endif
262
}
263
}
264
265
int smp_call_function(void (*func)(void *info), void *info, int wait)
266
{
267
cpumask_t callmap;
268
269
preempt_disable();
270
cpumask_copy(&callmap, cpu_online_mask);
271
cpumask_clear_cpu(smp_processor_id(), &callmap);
272
if (!cpumask_empty(&callmap))
273
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
274
275
preempt_enable();
276
277
return 0;
278
}
279
EXPORT_SYMBOL_GPL(smp_call_function);
280
281
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
282
int wait)
283
{
284
unsigned int cpu = cpuid;
285
cpumask_t callmap;
286
287
if (cpu_is_offline(cpu))
288
return 0;
289
cpumask_clear(&callmap);
290
cpumask_set_cpu(cpu, &callmap);
291
292
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
293
294
return 0;
295
}
296
EXPORT_SYMBOL_GPL(smp_call_function_single);
297
298
void smp_send_reschedule(int cpu)
299
{
300
/* simply trigger an ipi */
301
if (cpu_is_offline(cpu))
302
return;
303
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
304
305
return;
306
}
307
308
void smp_send_stop(void)
309
{
310
cpumask_t callmap;
311
312
preempt_disable();
313
cpumask_copy(&callmap, cpu_online_mask);
314
cpumask_clear_cpu(smp_processor_id(), &callmap);
315
if (!cpumask_empty(&callmap))
316
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
317
318
preempt_enable();
319
320
return;
321
}
322
323
int __cpuinit __cpu_up(unsigned int cpu)
324
{
325
int ret;
326
static struct task_struct *idle;
327
328
if (idle)
329
free_task(idle);
330
331
idle = fork_idle(cpu);
332
if (IS_ERR(idle)) {
333
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
334
return PTR_ERR(idle);
335
}
336
337
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
338
339
ret = platform_boot_secondary(cpu, idle);
340
341
secondary_stack = NULL;
342
343
return ret;
344
}
345
346
static void __cpuinit setup_secondary(unsigned int cpu)
347
{
348
unsigned long ilat;
349
350
bfin_write_IMASK(0);
351
CSYNC();
352
ilat = bfin_read_ILAT();
353
CSYNC();
354
bfin_write_ILAT(ilat);
355
CSYNC();
356
357
/* Enable interrupt levels IVG7-15. IARs have been already
358
* programmed by the boot CPU. */
359
bfin_irq_flags |= IMASK_IVG15 |
360
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
361
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
362
}
363
364
void __cpuinit secondary_start_kernel(void)
365
{
366
unsigned int cpu = smp_processor_id();
367
struct mm_struct *mm = &init_mm;
368
369
if (_bfin_swrst & SWRST_DBL_FAULT_B) {
370
printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
371
#ifdef CONFIG_DEBUG_DOUBLEFAULT
372
printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
373
(int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
374
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
375
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
376
#endif
377
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
378
init_retx_coreb);
379
}
380
381
/*
382
* We want the D-cache to be enabled early, in case the atomic
383
* support code emulates cache coherence (see
384
* __ARCH_SYNC_CORE_DCACHE).
385
*/
386
init_exception_vectors();
387
388
local_irq_disable();
389
390
/* Attach the new idle task to the global mm. */
391
atomic_inc(&mm->mm_users);
392
atomic_inc(&mm->mm_count);
393
current->active_mm = mm;
394
395
preempt_disable();
396
397
setup_secondary(cpu);
398
399
platform_secondary_init(cpu);
400
401
/* setup local core timer */
402
bfin_local_timer_setup();
403
404
local_irq_enable();
405
406
bfin_setup_caches(cpu);
407
408
/*
409
* Calibrate loops per jiffy value.
410
* IRQs need to be enabled here - D-cache can be invalidated
411
* in timer irq handler, so core B can read correct jiffies.
412
*/
413
calibrate_delay();
414
415
cpu_idle();
416
}
417
418
void __init smp_prepare_boot_cpu(void)
419
{
420
}
421
422
void __init smp_prepare_cpus(unsigned int max_cpus)
423
{
424
platform_prepare_cpus(max_cpus);
425
ipi_queue_init();
426
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
427
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
428
}
429
430
void __init smp_cpus_done(unsigned int max_cpus)
431
{
432
unsigned long bogosum = 0;
433
unsigned int cpu;
434
435
for_each_online_cpu(cpu)
436
bogosum += loops_per_jiffy;
437
438
printk(KERN_INFO "SMP: Total of %d processors activated "
439
"(%lu.%02lu BogoMIPS).\n",
440
num_online_cpus(),
441
bogosum / (500000/HZ),
442
(bogosum / (5000/HZ)) % 100);
443
}
444
445
void smp_icache_flush_range_others(unsigned long start, unsigned long end)
446
{
447
smp_flush_data.start = start;
448
smp_flush_data.end = end;
449
450
if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
451
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
452
}
453
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
454
455
#ifdef __ARCH_SYNC_CORE_ICACHE
456
unsigned long icache_invld_count[NR_CPUS];
457
void resync_core_icache(void)
458
{
459
unsigned int cpu = get_cpu();
460
blackfin_invalidate_entire_icache();
461
icache_invld_count[cpu]++;
462
put_cpu();
463
}
464
EXPORT_SYMBOL(resync_core_icache);
465
#endif
466
467
#ifdef __ARCH_SYNC_CORE_DCACHE
468
unsigned long dcache_invld_count[NR_CPUS];
469
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
470
471
void resync_core_dcache(void)
472
{
473
unsigned int cpu = get_cpu();
474
blackfin_invalidate_entire_dcache();
475
dcache_invld_count[cpu]++;
476
put_cpu();
477
}
478
EXPORT_SYMBOL(resync_core_dcache);
479
#endif
480
481
#ifdef CONFIG_HOTPLUG_CPU
482
int __cpuexit __cpu_disable(void)
483
{
484
unsigned int cpu = smp_processor_id();
485
486
if (cpu == 0)
487
return -EPERM;
488
489
set_cpu_online(cpu, false);
490
return 0;
491
}
492
493
static DECLARE_COMPLETION(cpu_killed);
494
495
int __cpuexit __cpu_die(unsigned int cpu)
496
{
497
return wait_for_completion_timeout(&cpu_killed, 5000);
498
}
499
500
void cpu_die(void)
501
{
502
complete(&cpu_killed);
503
504
atomic_dec(&init_mm.mm_users);
505
atomic_dec(&init_mm.mm_count);
506
507
local_irq_disable();
508
platform_cpu_die();
509
}
510
#endif
511
512