Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/mach-bcm/platsmp-brcmstb.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Broadcom STB CPU SMP and hotplug support for ARM
4
*
5
* Copyright (C) 2013-2014 Broadcom Corporation
6
*/
7
8
#include <linux/delay.h>
9
#include <linux/errno.h>
10
#include <linux/init.h>
11
#include <linux/io.h>
12
#include <linux/jiffies.h>
13
#include <linux/of.h>
14
#include <linux/of_address.h>
15
#include <linux/printk.h>
16
#include <linux/regmap.h>
17
#include <linux/smp.h>
18
#include <linux/mfd/syscon.h>
19
20
#include <asm/cacheflush.h>
21
#include <asm/cp15.h>
22
#include <asm/mach-types.h>
23
#include <asm/smp_plat.h>
24
25
enum {
26
ZONE_MAN_CLKEN_MASK = BIT(0),
27
ZONE_MAN_RESET_CNTL_MASK = BIT(1),
28
ZONE_MAN_MEM_PWR_MASK = BIT(4),
29
ZONE_RESERVED_1_MASK = BIT(5),
30
ZONE_MAN_ISO_CNTL_MASK = BIT(6),
31
ZONE_MANUAL_CONTROL_MASK = BIT(7),
32
ZONE_PWR_DN_REQ_MASK = BIT(9),
33
ZONE_PWR_UP_REQ_MASK = BIT(10),
34
ZONE_BLK_RST_ASSERT_MASK = BIT(12),
35
ZONE_PWR_OFF_STATE_MASK = BIT(25),
36
ZONE_PWR_ON_STATE_MASK = BIT(26),
37
ZONE_DPG_PWR_STATE_MASK = BIT(28),
38
ZONE_MEM_PWR_STATE_MASK = BIT(29),
39
ZONE_RESET_STATE_MASK = BIT(31),
40
CPU0_PWR_ZONE_CTRL_REG = 1,
41
CPU_RESET_CONFIG_REG = 2,
42
};
43
44
static void __iomem *cpubiuctrl_block;
45
static void __iomem *hif_cont_block;
46
static u32 cpu0_pwr_zone_ctrl_reg;
47
static u32 cpu_rst_cfg_reg;
48
static u32 hif_cont_reg;
49
50
#ifdef CONFIG_HOTPLUG_CPU
51
/*
52
* We must quiesce a dying CPU before it can be killed by the boot CPU. Because
53
* one or more cache may be disabled, we must flush to ensure coherency. We
54
* cannot use traditional completion structures or spinlocks as they rely on
55
* coherency.
56
*/
57
static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state);
58
59
static int per_cpu_sw_state_rd(u32 cpu)
60
{
61
sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
62
return per_cpu(per_cpu_sw_state, cpu);
63
}
64
65
static void per_cpu_sw_state_wr(u32 cpu, int val)
66
{
67
dmb();
68
per_cpu(per_cpu_sw_state, cpu) = val;
69
sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
70
}
71
#else
72
static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
73
#endif
74
75
static void __iomem *pwr_ctrl_get_base(u32 cpu)
76
{
77
void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg;
78
base += (cpu_logical_map(cpu) * 4);
79
return base;
80
}
81
82
static u32 pwr_ctrl_rd(u32 cpu)
83
{
84
void __iomem *base = pwr_ctrl_get_base(cpu);
85
return readl_relaxed(base);
86
}
87
88
static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask)
89
{
90
void __iomem *base = pwr_ctrl_get_base(cpu);
91
writel((readl(base) & mask) | val, base);
92
}
93
94
static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask)
95
{
96
void __iomem *base = pwr_ctrl_get_base(cpu);
97
writel((readl(base) & mask) & ~val, base);
98
}
99
100
#define POLL_TMOUT_MS 500
101
static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask)
102
{
103
const unsigned long timeo = jiffies + msecs_to_jiffies(POLL_TMOUT_MS);
104
u32 tmp;
105
106
do {
107
tmp = pwr_ctrl_rd(cpu) & mask;
108
if (!set == !tmp)
109
return 0;
110
} while (time_before(jiffies, timeo));
111
112
tmp = pwr_ctrl_rd(cpu) & mask;
113
if (!set == !tmp)
114
return 0;
115
116
return -ETIMEDOUT;
117
}
118
119
static void cpu_rst_cfg_set(u32 cpu, int set)
120
{
121
u32 val;
122
val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg);
123
if (set)
124
val |= BIT(cpu_logical_map(cpu));
125
else
126
val &= ~BIT(cpu_logical_map(cpu));
127
writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg);
128
}
129
130
static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
131
{
132
const int reg_ofs = cpu_logical_map(cpu) * 8;
133
writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs);
134
writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs);
135
}
136
137
static void brcmstb_cpu_boot(u32 cpu)
138
{
139
/* Mark this CPU as "up" */
140
per_cpu_sw_state_wr(cpu, 1);
141
142
/*
143
* Set the reset vector to point to the secondary_startup
144
* routine
145
*/
146
cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup));
147
148
/* Unhalt the cpu */
149
cpu_rst_cfg_set(cpu, 0);
150
}
151
152
static void brcmstb_cpu_power_on(u32 cpu)
153
{
154
/*
155
* The secondary cores power was cut, so we must go through
156
* power-on initialization.
157
*/
158
pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00);
159
pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
160
pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1);
161
162
pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
163
164
if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK))
165
panic("ZONE_MEM_PWR_STATE_MASK set timeout");
166
167
pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1);
168
169
if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK))
170
panic("ZONE_DPG_PWR_STATE_MASK set timeout");
171
172
pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
173
pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
174
}
175
176
static int brcmstb_cpu_get_power_state(u32 cpu)
177
{
178
int tmp = pwr_ctrl_rd(cpu);
179
return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1;
180
}
181
182
#ifdef CONFIG_HOTPLUG_CPU
183
184
static void brcmstb_cpu_die(u32 cpu)
185
{
186
v7_exit_coherency_flush(all);
187
188
per_cpu_sw_state_wr(cpu, 0);
189
190
/* Sit and wait to die */
191
wfi();
192
193
/* We should never get here... */
194
while (1)
195
;
196
}
197
198
static int brcmstb_cpu_kill(u32 cpu)
199
{
200
/*
201
* Ordinarily, the hardware forbids power-down of CPU0 (which is good
202
* because it is the boot CPU), but this is not true when using BPCM
203
* manual mode. Consequently, we must avoid turning off CPU0 here to
204
* ensure that TI2C master reset will work.
205
*/
206
if (cpu == 0) {
207
pr_warn("SMP: refusing to power off CPU0\n");
208
return 1;
209
}
210
211
while (per_cpu_sw_state_rd(cpu))
212
;
213
214
pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
215
pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
216
pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1);
217
pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
218
pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
219
220
if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK))
221
panic("ZONE_MEM_PWR_STATE_MASK clear timeout");
222
223
pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1);
224
225
if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK))
226
panic("ZONE_DPG_PWR_STATE_MASK clear timeout");
227
228
/* Flush pipeline before resetting CPU */
229
mb();
230
231
/* Assert reset on the CPU */
232
cpu_rst_cfg_set(cpu, 1);
233
234
return 1;
235
}
236
237
#endif /* CONFIG_HOTPLUG_CPU */
238
239
static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
240
{
241
int rc = 0;
242
char *name;
243
struct device_node *syscon_np = NULL;
244
245
name = "syscon-cpu";
246
247
syscon_np = of_parse_phandle(np, name, 0);
248
if (!syscon_np) {
249
pr_err("can't find phandle %s\n", name);
250
rc = -EINVAL;
251
goto cleanup;
252
}
253
254
cpubiuctrl_block = of_iomap(syscon_np, 0);
255
if (!cpubiuctrl_block) {
256
pr_err("iomap failed for cpubiuctrl_block\n");
257
rc = -EINVAL;
258
goto cleanup;
259
}
260
261
rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG,
262
&cpu0_pwr_zone_ctrl_reg);
263
if (rc) {
264
pr_err("failed to read 1st entry from %s property (%d)\n", name,
265
rc);
266
rc = -EINVAL;
267
goto cleanup;
268
}
269
270
rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG,
271
&cpu_rst_cfg_reg);
272
if (rc) {
273
pr_err("failed to read 2nd entry from %s property (%d)\n", name,
274
rc);
275
rc = -EINVAL;
276
goto cleanup;
277
}
278
279
cleanup:
280
of_node_put(syscon_np);
281
return rc;
282
}
283
284
static int __init setup_hifcont_regs(struct device_node *np)
285
{
286
int rc = 0;
287
char *name;
288
struct device_node *syscon_np = NULL;
289
290
name = "syscon-cont";
291
292
syscon_np = of_parse_phandle(np, name, 0);
293
if (!syscon_np) {
294
pr_err("can't find phandle %s\n", name);
295
rc = -EINVAL;
296
goto cleanup;
297
}
298
299
hif_cont_block = of_iomap(syscon_np, 0);
300
if (!hif_cont_block) {
301
pr_err("iomap failed for hif_cont_block\n");
302
rc = -EINVAL;
303
goto cleanup;
304
}
305
306
/* Offset is at top of hif_cont_block */
307
hif_cont_reg = 0;
308
309
cleanup:
310
of_node_put(syscon_np);
311
return rc;
312
}
313
314
static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus)
315
{
316
int rc;
317
struct device_node *np;
318
char *name;
319
320
name = "brcm,brcmstb-smpboot";
321
np = of_find_compatible_node(NULL, NULL, name);
322
if (!np) {
323
pr_err("can't find compatible node %s\n", name);
324
return;
325
}
326
327
rc = setup_hifcpubiuctrl_regs(np);
328
if (rc)
329
goto out_put_node;
330
331
rc = setup_hifcont_regs(np);
332
if (rc)
333
goto out_put_node;
334
335
out_put_node:
336
of_node_put(np);
337
}
338
339
static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
340
{
341
/* Missing the brcm,brcmstb-smpboot DT node? */
342
if (!cpubiuctrl_block || !hif_cont_block)
343
return -ENODEV;
344
345
/* Bring up power to the core if necessary */
346
if (brcmstb_cpu_get_power_state(cpu) == 0)
347
brcmstb_cpu_power_on(cpu);
348
349
brcmstb_cpu_boot(cpu);
350
351
return 0;
352
}
353
354
static const struct smp_operations brcmstb_smp_ops __initconst = {
355
.smp_prepare_cpus = brcmstb_cpu_ctrl_setup,
356
.smp_boot_secondary = brcmstb_boot_secondary,
357
#ifdef CONFIG_HOTPLUG_CPU
358
.cpu_kill = brcmstb_cpu_kill,
359
.cpu_die = brcmstb_cpu_die,
360
#endif
361
};
362
363
CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);
364
365