Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/mach-hisi/platmcpm.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2013-2014 Linaro Ltd.
4
* Copyright (c) 2013-2014 HiSilicon Limited.
5
*/
6
#include <linux/init.h>
7
#include <linux/smp.h>
8
#include <linux/delay.h>
9
#include <linux/io.h>
10
#include <linux/memblock.h>
11
#include <linux/of_address.h>
12
13
#include <asm/cputype.h>
14
#include <asm/cp15.h>
15
#include <asm/cacheflush.h>
16
#include <asm/smp.h>
17
#include <asm/smp_plat.h>
18
19
#include "core.h"
20
21
/* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x]
22
* 1 -- unreset; 0 -- reset
23
*/
24
#define CORE_RESET_BIT(x) (1 << x)
25
#define NEON_RESET_BIT(x) (1 << (x + 4))
26
#define CORE_DEBUG_RESET_BIT(x) (1 << (x + 9))
27
#define CLUSTER_L2_RESET_BIT (1 << 8)
28
#define CLUSTER_DEBUG_RESET_BIT (1 << 13)
29
30
/*
31
* bits definition in SC_CPU_RESET_STATUS[x]
32
* 1 -- reset status; 0 -- unreset status
33
*/
34
#define CORE_RESET_STATUS(x) (1 << x)
35
#define NEON_RESET_STATUS(x) (1 << (x + 4))
36
#define CORE_DEBUG_RESET_STATUS(x) (1 << (x + 9))
37
#define CLUSTER_L2_RESET_STATUS (1 << 8)
38
#define CLUSTER_DEBUG_RESET_STATUS (1 << 13)
39
#define CORE_WFI_STATUS(x) (1 << (x + 16))
40
#define CORE_WFE_STATUS(x) (1 << (x + 20))
41
#define CORE_DEBUG_ACK(x) (1 << (x + 24))
42
43
#define SC_CPU_RESET_REQ(x) (0x520 + (x << 3)) /* reset */
44
#define SC_CPU_RESET_DREQ(x) (0x524 + (x << 3)) /* unreset */
45
#define SC_CPU_RESET_STATUS(x) (0x1520 + (x << 3))
46
47
#define FAB_SF_MODE 0x0c
48
#define FAB_SF_INVLD 0x10
49
50
/* bits definition in FB_SF_INVLD */
51
#define FB_SF_INVLD_START (1 << 8)
52
53
#define HIP04_MAX_CLUSTERS 4
54
#define HIP04_MAX_CPUS_PER_CLUSTER 4
55
56
#define POLL_MSEC 10
57
#define TIMEOUT_MSEC 1000
58
59
static void __iomem *sysctrl, *fabric;
60
static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
61
static DEFINE_SPINLOCK(boot_lock);
62
static u32 fabric_phys_addr;
63
/*
64
* [0]: bootwrapper physical address
65
* [1]: bootwrapper size
66
* [2]: relocation address
67
* [3]: relocation size
68
*/
69
static u32 hip04_boot_method[4];
70
71
static bool hip04_cluster_is_down(unsigned int cluster)
72
{
73
int i;
74
75
for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++)
76
if (hip04_cpu_table[cluster][i])
77
return false;
78
return true;
79
}
80
81
static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
82
{
83
unsigned long data;
84
85
if (!fabric)
86
BUG();
87
data = readl_relaxed(fabric + FAB_SF_MODE);
88
if (on)
89
data |= 1 << cluster;
90
else
91
data &= ~(1 << cluster);
92
writel_relaxed(data, fabric + FAB_SF_MODE);
93
do {
94
cpu_relax();
95
} while (data != readl_relaxed(fabric + FAB_SF_MODE));
96
}
97
98
static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
99
{
100
unsigned int mpidr, cpu, cluster;
101
unsigned long data;
102
void __iomem *sys_dreq, *sys_status;
103
104
mpidr = cpu_logical_map(l_cpu);
105
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
106
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
107
108
if (!sysctrl)
109
return -ENODEV;
110
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
111
return -EINVAL;
112
113
spin_lock_irq(&boot_lock);
114
115
if (hip04_cpu_table[cluster][cpu])
116
goto out;
117
118
sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster);
119
sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster);
120
if (hip04_cluster_is_down(cluster)) {
121
data = CLUSTER_DEBUG_RESET_BIT;
122
writel_relaxed(data, sys_dreq);
123
do {
124
cpu_relax();
125
data = readl_relaxed(sys_status);
126
} while (data & CLUSTER_DEBUG_RESET_STATUS);
127
hip04_set_snoop_filter(cluster, 1);
128
}
129
130
data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
131
CORE_DEBUG_RESET_BIT(cpu);
132
writel_relaxed(data, sys_dreq);
133
do {
134
cpu_relax();
135
} while (data == readl_relaxed(sys_status));
136
137
/*
138
* We may fail to power up core again without this delay.
139
* It's not mentioned in document. It's found by test.
140
*/
141
udelay(20);
142
143
arch_send_wakeup_ipi_mask(cpumask_of(l_cpu));
144
145
out:
146
hip04_cpu_table[cluster][cpu]++;
147
spin_unlock_irq(&boot_lock);
148
149
return 0;
150
}
151
152
#ifdef CONFIG_HOTPLUG_CPU
153
static void hip04_cpu_die(unsigned int l_cpu)
154
{
155
unsigned int mpidr, cpu, cluster;
156
bool last_man;
157
158
mpidr = cpu_logical_map(l_cpu);
159
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
160
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
161
162
spin_lock(&boot_lock);
163
hip04_cpu_table[cluster][cpu]--;
164
if (hip04_cpu_table[cluster][cpu] == 1) {
165
/* A power_up request went ahead of us. */
166
spin_unlock(&boot_lock);
167
return;
168
} else if (hip04_cpu_table[cluster][cpu] > 1) {
169
pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
170
BUG();
171
}
172
173
last_man = hip04_cluster_is_down(cluster);
174
spin_unlock(&boot_lock);
175
if (last_man) {
176
/* Since it's Cortex A15, disable L2 prefetching. */
177
asm volatile(
178
"mcr p15, 1, %0, c15, c0, 3 \n\t"
179
"isb \n\t"
180
"dsb "
181
: : "r" (0x400) );
182
v7_exit_coherency_flush(all);
183
} else {
184
v7_exit_coherency_flush(louis);
185
}
186
187
for (;;)
188
wfi();
189
}
190
191
static int hip04_cpu_kill(unsigned int l_cpu)
192
{
193
unsigned int mpidr, cpu, cluster;
194
unsigned int data, tries, count;
195
196
mpidr = cpu_logical_map(l_cpu);
197
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
198
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
199
BUG_ON(cluster >= HIP04_MAX_CLUSTERS ||
200
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
201
202
count = TIMEOUT_MSEC / POLL_MSEC;
203
spin_lock_irq(&boot_lock);
204
for (tries = 0; tries < count; tries++) {
205
if (hip04_cpu_table[cluster][cpu])
206
goto err;
207
cpu_relax();
208
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
209
if (data & CORE_WFI_STATUS(cpu))
210
break;
211
spin_unlock_irq(&boot_lock);
212
/* Wait for clean L2 when the whole cluster is down. */
213
msleep(POLL_MSEC);
214
spin_lock_irq(&boot_lock);
215
}
216
if (tries >= count)
217
goto err;
218
data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
219
CORE_DEBUG_RESET_BIT(cpu);
220
writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster));
221
for (tries = 0; tries < count; tries++) {
222
cpu_relax();
223
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
224
if (data & CORE_RESET_STATUS(cpu))
225
break;
226
}
227
if (tries >= count)
228
goto err;
229
if (hip04_cluster_is_down(cluster))
230
hip04_set_snoop_filter(cluster, 0);
231
spin_unlock_irq(&boot_lock);
232
return 1;
233
err:
234
spin_unlock_irq(&boot_lock);
235
return 0;
236
}
237
#endif
238
239
static const struct smp_operations hip04_smp_ops __initconst = {
240
.smp_boot_secondary = hip04_boot_secondary,
241
#ifdef CONFIG_HOTPLUG_CPU
242
.cpu_die = hip04_cpu_die,
243
.cpu_kill = hip04_cpu_kill,
244
#endif
245
};
246
247
static bool __init hip04_cpu_table_init(void)
248
{
249
unsigned int mpidr, cpu, cluster;
250
251
mpidr = read_cpuid_mpidr();
252
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
253
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
254
255
if (cluster >= HIP04_MAX_CLUSTERS ||
256
cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
257
pr_err("%s: boot CPU is out of bound!\n", __func__);
258
return false;
259
}
260
hip04_set_snoop_filter(cluster, 1);
261
hip04_cpu_table[cluster][cpu] = 1;
262
return true;
263
}
264
265
static int __init hip04_smp_init(void)
266
{
267
struct device_node *np, *np_sctl, *np_fab;
268
struct resource fab_res;
269
void __iomem *relocation;
270
int ret = -ENODEV;
271
272
np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper");
273
if (!np)
274
goto err;
275
ret = of_property_read_u32_array(np, "boot-method",
276
&hip04_boot_method[0], 4);
277
if (ret)
278
goto err;
279
280
ret = -ENODEV;
281
np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
282
if (!np_sctl)
283
goto err;
284
np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric");
285
if (!np_fab)
286
goto err;
287
288
ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]);
289
if (ret)
290
goto err;
291
292
relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]);
293
if (!relocation) {
294
pr_err("failed to map relocation space\n");
295
ret = -ENOMEM;
296
goto err_reloc;
297
}
298
sysctrl = of_iomap(np_sctl, 0);
299
if (!sysctrl) {
300
pr_err("failed to get sysctrl base\n");
301
ret = -ENOMEM;
302
goto err_sysctrl;
303
}
304
ret = of_address_to_resource(np_fab, 0, &fab_res);
305
if (ret) {
306
pr_err("failed to get fabric base phys\n");
307
goto err_fabric;
308
}
309
fabric_phys_addr = fab_res.start;
310
sync_cache_w(&fabric_phys_addr);
311
fabric = of_iomap(np_fab, 0);
312
if (!fabric) {
313
pr_err("failed to get fabric base\n");
314
ret = -ENOMEM;
315
goto err_fabric;
316
}
317
318
if (!hip04_cpu_table_init()) {
319
ret = -EINVAL;
320
goto err_table;
321
}
322
323
/*
324
* Fill the instruction address that is used after secondary core
325
* out of reset.
326
*/
327
writel_relaxed(hip04_boot_method[0], relocation);
328
writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */
329
writel_relaxed(__pa_symbol(secondary_startup), relocation + 8);
330
writel_relaxed(0, relocation + 12);
331
iounmap(relocation);
332
333
smp_set_ops(&hip04_smp_ops);
334
return ret;
335
err_table:
336
iounmap(fabric);
337
err_fabric:
338
iounmap(sysctrl);
339
err_sysctrl:
340
iounmap(relocation);
341
err_reloc:
342
memblock_phys_free(hip04_boot_method[0], hip04_boot_method[1]);
343
err:
344
return ret;
345
}
346
early_initcall(hip04_smp_init);
347
348