Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/tile/kernel/smpboot.c
10817 views
1
/*
2
* Copyright 2010 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*/
14
15
#include <linux/module.h>
16
#include <linux/init.h>
17
#include <linux/kernel.h>
18
#include <linux/mm.h>
19
#include <linux/sched.h>
20
#include <linux/kernel_stat.h>
21
#include <linux/bootmem.h>
22
#include <linux/notifier.h>
23
#include <linux/cpu.h>
24
#include <linux/percpu.h>
25
#include <linux/delay.h>
26
#include <linux/err.h>
27
#include <linux/irq.h>
28
#include <asm/mmu_context.h>
29
#include <asm/tlbflush.h>
30
#include <asm/sections.h>
31
32
/* State of each CPU. */
33
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
34
35
/* The messaging code jumps to this pointer during boot-up */
36
unsigned long start_cpu_function_addr;
37
38
/* Called very early during startup to mark boot cpu as online */
39
void __init smp_prepare_boot_cpu(void)
40
{
41
int cpu = smp_processor_id();
42
set_cpu_online(cpu, 1);
43
set_cpu_present(cpu, 1);
44
__get_cpu_var(cpu_state) = CPU_ONLINE;
45
46
init_messaging();
47
}
48
49
static void start_secondary(void);
50
51
/*
52
* Called at the top of init() to launch all the other CPUs.
53
* They run free to complete their initialization and then wait
54
* until they get an IPI from the boot cpu to come online.
55
*/
56
void __init smp_prepare_cpus(unsigned int max_cpus)
57
{
58
long rc;
59
int cpu, cpu_count;
60
int boot_cpu = smp_processor_id();
61
62
current_thread_info()->cpu = boot_cpu;
63
64
/*
65
* Pin this task to the boot CPU while we bring up the others,
66
* just to make sure we don't uselessly migrate as they come up.
67
*/
68
rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
69
if (rc != 0)
70
pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
71
72
/* Print information about disabled and dataplane cpus. */
73
print_disabled_cpus();
74
75
/*
76
* Tell the messaging subsystem how to respond to the
77
* startup message. We use a level of indirection to avoid
78
* confusing the linker with the fact that the messaging
79
* subsystem is calling __init code.
80
*/
81
start_cpu_function_addr = (unsigned long) &online_secondary;
82
83
/* Set up thread context for all new processors. */
84
cpu_count = 1;
85
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
86
struct task_struct *idle;
87
88
if (cpu == boot_cpu)
89
continue;
90
91
if (!cpu_possible(cpu)) {
92
/*
93
* Make this processor do nothing on boot.
94
* Note that we don't give the boot_pc function
95
* a stack, so it has to be assembly code.
96
*/
97
per_cpu(boot_sp, cpu) = 0;
98
per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
99
continue;
100
}
101
102
/* Create a new idle thread to run start_secondary() */
103
idle = fork_idle(cpu);
104
if (IS_ERR(idle))
105
panic("failed fork for CPU %d", cpu);
106
idle->thread.pc = (unsigned long) start_secondary;
107
108
/* Make this thread the boot thread for this processor */
109
per_cpu(boot_sp, cpu) = task_ksp0(idle);
110
per_cpu(boot_pc, cpu) = idle->thread.pc;
111
112
++cpu_count;
113
}
114
BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
115
116
/* Fire up the other tiles, if any */
117
init_cpu_present(cpu_possible_mask);
118
if (cpumask_weight(cpu_present_mask) > 1) {
119
mb(); /* make sure all data is visible to new processors */
120
hv_start_all_tiles();
121
}
122
}
123
124
static __initdata struct cpumask init_affinity;
125
126
static __init int reset_init_affinity(void)
127
{
128
long rc = sched_setaffinity(current->pid, &init_affinity);
129
if (rc != 0)
130
pr_warning("couldn't reset init affinity (%ld)\n",
131
rc);
132
return 0;
133
}
134
late_initcall(reset_init_affinity);
135
136
static struct cpumask cpu_started __cpuinitdata;
137
138
/*
139
* Activate a secondary processor. Very minimal; don't add anything
140
* to this path without knowing what you're doing, since SMP booting
141
* is pretty fragile.
142
*/
143
static void __cpuinit start_secondary(void)
144
{
145
int cpuid = smp_processor_id();
146
147
/* Set our thread pointer appropriately. */
148
set_my_cpu_offset(__per_cpu_offset[cpuid]);
149
150
preempt_disable();
151
152
/*
153
* In large machines even this will slow us down, since we
154
* will be contending for for the printk spinlock.
155
*/
156
/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
157
158
/* Initialize the current asid for our first page table. */
159
__get_cpu_var(current_asid) = min_asid;
160
161
/* Set up this thread as another owner of the init_mm */
162
atomic_inc(&init_mm.mm_count);
163
current->active_mm = &init_mm;
164
if (current->mm)
165
BUG();
166
enter_lazy_tlb(&init_mm, current);
167
168
/* Allow hypervisor messages to be received */
169
init_messaging();
170
local_irq_enable();
171
172
/* Indicate that we're ready to come up. */
173
/* Must not do this before we're ready to receive messages */
174
if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
175
pr_warning("CPU#%d already started!\n", cpuid);
176
for (;;)
177
local_irq_enable();
178
}
179
180
smp_nap();
181
}
182
183
/*
184
* Bring a secondary processor online.
185
*/
186
void __cpuinit online_secondary(void)
187
{
188
/*
189
* low-memory mappings have been cleared, flush them from
190
* the local TLBs too.
191
*/
192
local_flush_tlb();
193
194
BUG_ON(in_interrupt());
195
196
/* This must be done before setting cpu_online_mask */
197
wmb();
198
199
/*
200
* We need to hold call_lock, so there is no inconsistency
201
* between the time smp_call_function() determines number of
202
* IPI recipients, and the time when the determination is made
203
* for which cpus receive the IPI. Holding this
204
* lock helps us to not include this cpu in a currently in progress
205
* smp_call_function().
206
*/
207
ipi_call_lock();
208
set_cpu_online(smp_processor_id(), 1);
209
ipi_call_unlock();
210
__get_cpu_var(cpu_state) = CPU_ONLINE;
211
212
/* Set up tile-specific state for this cpu. */
213
setup_cpu(0);
214
215
/* Set up tile-timer clock-event device on this cpu */
216
setup_tile_timer();
217
218
preempt_enable();
219
220
cpu_idle();
221
}
222
223
int __cpuinit __cpu_up(unsigned int cpu)
224
{
225
/* Wait 5s total for all CPUs for them to come online */
226
static int timeout;
227
for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
228
if (timeout >= 50000) {
229
pr_info("skipping unresponsive cpu%d\n", cpu);
230
local_irq_enable();
231
return -EIO;
232
}
233
udelay(100);
234
}
235
236
local_irq_enable();
237
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
238
239
/* Unleash the CPU! */
240
send_IPI_single(cpu, MSG_TAG_START_CPU);
241
while (!cpumask_test_cpu(cpu, cpu_online_mask))
242
cpu_relax();
243
return 0;
244
}
245
246
static void panic_start_cpu(void)
247
{
248
panic("Received a MSG_START_CPU IPI after boot finished.");
249
}
250
251
void __init smp_cpus_done(unsigned int max_cpus)
252
{
253
int cpu, next, rc;
254
255
/* Reset the response to a (now illegal) MSG_START_CPU IPI. */
256
start_cpu_function_addr = (unsigned long) &panic_start_cpu;
257
258
cpumask_copy(&init_affinity, cpu_online_mask);
259
260
/*
261
* Pin ourselves to a single cpu in the initial affinity set
262
* so that kernel mappings for the rootfs are not in the dataplane,
263
* if set, and to avoid unnecessary migrating during bringup.
264
* Use the last cpu just in case the whole chip has been
265
* isolated from the scheduler, to keep init away from likely
266
* more useful user code. This also ensures that work scheduled
267
* via schedule_delayed_work() in the init routines will land
268
* on this cpu.
269
*/
270
for (cpu = cpumask_first(&init_affinity);
271
(next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
272
cpu = next)
273
;
274
rc = sched_setaffinity(current->pid, cpumask_of(cpu));
275
if (rc != 0)
276
pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
277
}
278
279