Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bus/bt1-apb.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4
*
5
* Authors:
6
* Serge Semin <[email protected]>
7
*
8
* Baikal-T1 APB-bus driver
9
*/
10
11
#include <linux/kernel.h>
12
#include <linux/module.h>
13
#include <linux/types.h>
14
#include <linux/device.h>
15
#include <linux/atomic.h>
16
#include <linux/platform_device.h>
17
#include <linux/interrupt.h>
18
#include <linux/io.h>
19
#include <linux/nmi.h>
20
#include <linux/of.h>
21
#include <linux/regmap.h>
22
#include <linux/clk.h>
23
#include <linux/reset.h>
24
#include <linux/time64.h>
25
#include <linux/sysfs.h>
26
27
#define APB_EHB_ISR 0x00
28
#define APB_EHB_ISR_PENDING BIT(0)
29
#define APB_EHB_ISR_MASK BIT(1)
30
#define APB_EHB_ADDR 0x04
31
#define APB_EHB_TIMEOUT 0x08
32
33
#define APB_EHB_TIMEOUT_MIN 0x000003FFU
34
#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
35
36
/*
37
* struct bt1_apb - Baikal-T1 APB EHB private data
38
* @dev: Pointer to the device structure.
39
* @regs: APB EHB registers map.
40
* @res: No-device error injection memory region.
41
* @irq: Errors IRQ number.
42
* @rate: APB-bus reference clock rate.
43
* @pclk: APB-reference clock.
44
* @prst: APB domain reset line.
45
* @count: Number of errors detected.
46
*/
47
struct bt1_apb {
48
struct device *dev;
49
50
struct regmap *regs;
51
void __iomem *res;
52
int irq;
53
54
unsigned long rate;
55
struct clk *pclk;
56
57
struct reset_control *prst;
58
59
atomic_t count;
60
};
61
62
static const struct regmap_config bt1_apb_regmap_cfg = {
63
.reg_bits = 32,
64
.val_bits = 32,
65
.reg_stride = 4,
66
.max_register = APB_EHB_TIMEOUT,
67
.fast_io = true
68
};
69
70
static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
71
{
72
u64 timeout = (u64)n * USEC_PER_SEC;
73
74
do_div(timeout, apb->rate);
75
76
return timeout;
77
78
}
79
80
static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
81
unsigned long timeout)
82
{
83
u64 n = (u64)timeout * apb->rate;
84
85
do_div(n, USEC_PER_SEC);
86
87
return n;
88
89
}
90
91
static irqreturn_t bt1_apb_isr(int irq, void *data)
92
{
93
struct bt1_apb *apb = data;
94
u32 addr = 0;
95
96
regmap_read(apb->regs, APB_EHB_ADDR, &addr);
97
98
dev_crit_ratelimited(apb->dev,
99
"APB-bus fault %d: Slave access timeout at 0x%08x\n",
100
atomic_inc_return(&apb->count),
101
addr);
102
103
/*
104
* Print backtrace on each CPU. This might be pointless if the fault
105
* has happened on the same CPU as the IRQ handler is executed or
106
* the other core proceeded further execution despite the error.
107
* But if it's not, by looking at the trace we would get straight to
108
* the cause of the problem.
109
*/
110
trigger_all_cpu_backtrace();
111
112
regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
113
114
return IRQ_HANDLED;
115
}
116
117
static void bt1_apb_clear_data(void *data)
118
{
119
struct bt1_apb *apb = data;
120
struct platform_device *pdev = to_platform_device(apb->dev);
121
122
platform_set_drvdata(pdev, NULL);
123
}
124
125
static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
126
{
127
struct device *dev = &pdev->dev;
128
struct bt1_apb *apb;
129
int ret;
130
131
apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
132
if (!apb)
133
return ERR_PTR(-ENOMEM);
134
135
ret = devm_add_action(dev, bt1_apb_clear_data, apb);
136
if (ret) {
137
dev_err(dev, "Can't add APB EHB data clear action\n");
138
return ERR_PTR(ret);
139
}
140
141
apb->dev = dev;
142
atomic_set(&apb->count, 0);
143
platform_set_drvdata(pdev, apb);
144
145
return apb;
146
}
147
148
static int bt1_apb_request_regs(struct bt1_apb *apb)
149
{
150
struct platform_device *pdev = to_platform_device(apb->dev);
151
void __iomem *regs;
152
153
regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
154
if (IS_ERR(regs)) {
155
dev_err(apb->dev, "Couldn't map APB EHB registers\n");
156
return PTR_ERR(regs);
157
}
158
159
apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
160
if (IS_ERR(apb->regs)) {
161
dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
162
return PTR_ERR(apb->regs);
163
}
164
165
apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
166
if (IS_ERR(apb->res))
167
dev_err(apb->dev, "Couldn't map reserved region\n");
168
169
return PTR_ERR_OR_ZERO(apb->res);
170
}
171
172
static int bt1_apb_request_rst(struct bt1_apb *apb)
173
{
174
int ret;
175
176
apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
177
if (IS_ERR(apb->prst))
178
return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
179
"Couldn't get reset control line\n");
180
181
ret = reset_control_deassert(apb->prst);
182
if (ret)
183
dev_err(apb->dev, "Failed to deassert the reset line\n");
184
185
return ret;
186
}
187
188
static int bt1_apb_request_clk(struct bt1_apb *apb)
189
{
190
apb->pclk = devm_clk_get_enabled(apb->dev, "pclk");
191
if (IS_ERR(apb->pclk))
192
return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
193
"Couldn't get APB clock descriptor\n");
194
195
apb->rate = clk_get_rate(apb->pclk);
196
if (!apb->rate) {
197
dev_err(apb->dev, "Invalid clock rate\n");
198
return -EINVAL;
199
}
200
201
return 0;
202
}
203
204
static void bt1_apb_clear_irq(void *data)
205
{
206
struct bt1_apb *apb = data;
207
208
regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
209
}
210
211
static int bt1_apb_request_irq(struct bt1_apb *apb)
212
{
213
struct platform_device *pdev = to_platform_device(apb->dev);
214
int ret;
215
216
apb->irq = platform_get_irq(pdev, 0);
217
if (apb->irq < 0)
218
return apb->irq;
219
220
ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
221
"bt1-apb", apb);
222
if (ret) {
223
dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
224
return ret;
225
}
226
227
ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
228
if (ret) {
229
dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
230
return ret;
231
}
232
233
/* Unmask IRQ and clear it' pending flag. */
234
regmap_update_bits(apb->regs, APB_EHB_ISR,
235
APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
236
APB_EHB_ISR_MASK);
237
238
return 0;
239
}
240
241
static ssize_t count_show(struct device *dev, struct device_attribute *attr,
242
char *buf)
243
{
244
struct bt1_apb *apb = dev_get_drvdata(dev);
245
246
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
247
}
248
static DEVICE_ATTR_RO(count);
249
250
static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
251
char *buf)
252
{
253
struct bt1_apb *apb = dev_get_drvdata(dev);
254
unsigned long timeout;
255
int ret;
256
u32 n;
257
258
ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
259
if (ret)
260
return ret;
261
262
timeout = bt1_apb_n_to_timeout_us(apb, n);
263
264
return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
265
}
266
267
static ssize_t timeout_store(struct device *dev,
268
struct device_attribute *attr,
269
const char *buf, size_t count)
270
{
271
struct bt1_apb *apb = dev_get_drvdata(dev);
272
unsigned long timeout;
273
int ret;
274
u32 n;
275
276
if (kstrtoul(buf, 0, &timeout) < 0)
277
return -EINVAL;
278
279
n = bt1_apb_timeout_to_n_us(apb, timeout);
280
n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
281
282
ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
283
284
return ret ?: count;
285
}
286
static DEVICE_ATTR_RW(timeout);
287
288
static ssize_t inject_error_show(struct device *dev,
289
struct device_attribute *attr, char *buf)
290
{
291
return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
292
}
293
294
static ssize_t inject_error_store(struct device *dev,
295
struct device_attribute *attr,
296
const char *data, size_t count)
297
{
298
struct bt1_apb *apb = dev_get_drvdata(dev);
299
300
/*
301
* Either dummy read from the unmapped address in the APB IO area
302
* or manually set the IRQ status.
303
*/
304
if (sysfs_streq(data, "nodev"))
305
readl(apb->res);
306
else if (sysfs_streq(data, "irq"))
307
regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
308
APB_EHB_ISR_PENDING);
309
else
310
return -EINVAL;
311
312
return count;
313
}
314
static DEVICE_ATTR_RW(inject_error);
315
316
static struct attribute *bt1_apb_sysfs_attrs[] = {
317
&dev_attr_count.attr,
318
&dev_attr_timeout.attr,
319
&dev_attr_inject_error.attr,
320
NULL
321
};
322
ATTRIBUTE_GROUPS(bt1_apb_sysfs);
323
324
static void bt1_apb_remove_sysfs(void *data)
325
{
326
struct bt1_apb *apb = data;
327
328
device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
329
}
330
331
static int bt1_apb_init_sysfs(struct bt1_apb *apb)
332
{
333
int ret;
334
335
ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
336
if (ret) {
337
dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
338
return ret;
339
}
340
341
ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
342
if (ret)
343
dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
344
345
return ret;
346
}
347
348
static int bt1_apb_probe(struct platform_device *pdev)
349
{
350
struct bt1_apb *apb;
351
int ret;
352
353
apb = bt1_apb_create_data(pdev);
354
if (IS_ERR(apb))
355
return PTR_ERR(apb);
356
357
ret = bt1_apb_request_regs(apb);
358
if (ret)
359
return ret;
360
361
ret = bt1_apb_request_rst(apb);
362
if (ret)
363
return ret;
364
365
ret = bt1_apb_request_clk(apb);
366
if (ret)
367
return ret;
368
369
ret = bt1_apb_request_irq(apb);
370
if (ret)
371
return ret;
372
373
ret = bt1_apb_init_sysfs(apb);
374
if (ret)
375
return ret;
376
377
return 0;
378
}
379
380
static const struct of_device_id bt1_apb_of_match[] = {
381
{ .compatible = "baikal,bt1-apb" },
382
{ }
383
};
384
MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
385
386
static struct platform_driver bt1_apb_driver = {
387
.probe = bt1_apb_probe,
388
.driver = {
389
.name = "bt1-apb",
390
.of_match_table = bt1_apb_of_match
391
}
392
};
393
module_platform_driver(bt1_apb_driver);
394
395
MODULE_AUTHOR("Serge Semin <[email protected]>");
396
MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
397
398