Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/edac/armada_xp_edac.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2017 Pengutronix, Jan Luebbe <[email protected]>
4
*/
5
6
#include <linux/kernel.h>
7
#include <linux/edac.h>
8
#include <linux/of.h>
9
#include <linux/of_device.h>
10
#include <linux/platform_device.h>
11
12
#include <asm/hardware/cache-l2x0.h>
13
#include <asm/hardware/cache-aurora-l2.h>
14
15
#include "edac_mc.h"
16
#include "edac_device.h"
17
#include "edac_module.h"
18
19
/************************ EDAC MC (DDR RAM) ********************************/
20
21
#define SDRAM_NUM_CS 4
22
23
#define SDRAM_CONFIG_REG 0x0
24
#define SDRAM_CONFIG_ECC_MASK BIT(18)
25
#define SDRAM_CONFIG_REGISTERED_MASK BIT(17)
26
#define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15)
27
28
#define SDRAM_ADDR_CTRL_REG 0x10
29
#define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
30
#define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
31
#define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs)
32
#define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2)
33
#define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
34
#define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4)
35
#define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
36
37
#define SDRAM_ERR_DATA_H_REG 0x40
38
#define SDRAM_ERR_DATA_L_REG 0x44
39
40
#define SDRAM_ERR_RECV_ECC_REG 0x48
41
#define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
42
43
#define SDRAM_ERR_CALC_ECC_REG 0x4c
44
#define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
45
#define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
46
#define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
47
48
#define SDRAM_ERR_ADDR_REG 0x50
49
#define SDRAM_ERR_ADDR_BANK_OFFSET 23
50
#define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
51
#define SDRAM_ERR_ADDR_COL_OFFSET 8
52
#define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
53
#define SDRAM_ERR_ADDR_CS_OFFSET 1
54
#define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
55
#define SDRAM_ERR_ADDR_TYPE_MASK BIT(0)
56
57
#define SDRAM_ERR_CTRL_REG 0x54
58
#define SDRAM_ERR_CTRL_THR_OFFSET 16
59
#define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
60
#define SDRAM_ERR_CTRL_PROP_MASK BIT(9)
61
62
#define SDRAM_ERR_SBE_COUNT_REG 0x58
63
#define SDRAM_ERR_DBE_COUNT_REG 0x5c
64
65
#define SDRAM_ERR_CAUSE_ERR_REG 0xd0
66
#define SDRAM_ERR_CAUSE_MSG_REG 0xd8
67
#define SDRAM_ERR_CAUSE_DBE_MASK BIT(1)
68
#define SDRAM_ERR_CAUSE_SBE_MASK BIT(0)
69
70
#define SDRAM_RANK_CTRL_REG 0x1e0
71
#define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
72
73
struct axp_mc_drvdata {
74
void __iomem *base;
75
/* width in bytes */
76
unsigned int width;
77
/* bank interleaving */
78
bool cs_addr_sel[SDRAM_NUM_CS];
79
80
char msg[128];
81
};
82
83
/* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
84
static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
85
uint8_t cs, uint8_t bank, uint16_t row,
86
uint16_t col)
87
{
88
if (drvdata->width == 8) {
89
/* 64 bit */
90
if (drvdata->cs_addr_sel[cs])
91
/* bank interleaved */
92
return (((row & 0xfff8) << 16) |
93
((bank & 0x7) << 16) |
94
((row & 0x7) << 13) |
95
((col & 0x3ff) << 3));
96
else
97
return (((row & 0xffff << 16) |
98
((bank & 0x7) << 13) |
99
((col & 0x3ff)) << 3));
100
} else if (drvdata->width == 4) {
101
/* 32 bit */
102
if (drvdata->cs_addr_sel[cs])
103
/* bank interleaved */
104
return (((row & 0xfff0) << 15) |
105
((bank & 0x7) << 16) |
106
((row & 0xf) << 12) |
107
((col & 0x3ff) << 2));
108
else
109
return (((row & 0xffff << 15) |
110
((bank & 0x7) << 12) |
111
((col & 0x3ff)) << 2));
112
} else {
113
/* 16 bit */
114
if (drvdata->cs_addr_sel[cs])
115
/* bank interleaved */
116
return (((row & 0xffe0) << 14) |
117
((bank & 0x7) << 16) |
118
((row & 0x1f) << 11) |
119
((col & 0x3ff) << 1));
120
else
121
return (((row & 0xffff << 14) |
122
((bank & 0x7) << 11) |
123
((col & 0x3ff)) << 1));
124
}
125
}
126
127
static void axp_mc_check(struct mem_ctl_info *mci)
128
{
129
struct axp_mc_drvdata *drvdata = mci->pvt_info;
130
uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
131
uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
132
uint32_t row_val, col_val, bank_val, addr_val;
133
uint8_t syndrome_val, cs_val;
134
char *msg = drvdata->msg;
135
136
data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
137
data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
138
recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
139
calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
140
addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
141
cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
142
cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
143
cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
144
cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
145
146
/* clear cause registers */
147
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
148
drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
149
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
150
drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
151
152
/* clear error counter registers */
153
if (cnt_sbe)
154
writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
155
if (cnt_dbe)
156
writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
157
158
if (!cnt_sbe && !cnt_dbe)
159
return;
160
161
if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
162
if (cnt_sbe)
163
cnt_sbe--;
164
else
165
dev_warn(mci->pdev, "inconsistent SBE count detected\n");
166
} else {
167
if (cnt_dbe)
168
cnt_dbe--;
169
else
170
dev_warn(mci->pdev, "inconsistent DBE count detected\n");
171
}
172
173
/* report earlier errors */
174
if (cnt_sbe)
175
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
176
cnt_sbe, /* error count */
177
0, 0, 0, /* pfn, offset, syndrome */
178
-1, -1, -1, /* top, mid, low layer */
179
mci->ctl_name,
180
"details unavailable (multiple errors)");
181
if (cnt_dbe)
182
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
183
cnt_dbe, /* error count */
184
0, 0, 0, /* pfn, offset, syndrome */
185
-1, -1, -1, /* top, mid, low layer */
186
mci->ctl_name,
187
"details unavailable (multiple errors)");
188
189
/* report details for most recent error */
190
cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
191
bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
192
row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
193
col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
194
syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
195
addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
196
col_val);
197
msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
198
msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */
199
msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
200
msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */
201
202
if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
203
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
204
1, /* error count */
205
addr_val >> PAGE_SHIFT,
206
addr_val & ~PAGE_MASK,
207
syndrome_val,
208
cs_val, -1, -1, /* top, mid, low layer */
209
mci->ctl_name, drvdata->msg);
210
} else {
211
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
212
1, /* error count */
213
addr_val >> PAGE_SHIFT,
214
addr_val & ~PAGE_MASK,
215
syndrome_val,
216
cs_val, -1, -1, /* top, mid, low layer */
217
mci->ctl_name, drvdata->msg);
218
}
219
}
220
221
static void axp_mc_read_config(struct mem_ctl_info *mci)
222
{
223
struct axp_mc_drvdata *drvdata = mci->pvt_info;
224
uint32_t config, addr_ctrl, rank_ctrl;
225
unsigned int i, cs_struct, cs_size;
226
struct dimm_info *dimm;
227
228
config = readl(drvdata->base + SDRAM_CONFIG_REG);
229
if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
230
/* 64 bit */
231
drvdata->width = 8;
232
else
233
/* 32 bit */
234
drvdata->width = 4;
235
236
addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
237
rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
238
for (i = 0; i < SDRAM_NUM_CS; i++) {
239
dimm = mci->dimms[i];
240
241
if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
242
continue;
243
244
drvdata->cs_addr_sel[i] =
245
!!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
246
247
cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
248
cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
249
((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
250
251
switch (cs_size) {
252
case 0: /* 2GBit */
253
dimm->nr_pages = 524288;
254
break;
255
case 1: /* 256MBit */
256
dimm->nr_pages = 65536;
257
break;
258
case 2: /* 512MBit */
259
dimm->nr_pages = 131072;
260
break;
261
case 3: /* 1GBit */
262
dimm->nr_pages = 262144;
263
break;
264
case 4: /* 4GBit */
265
dimm->nr_pages = 1048576;
266
break;
267
case 5: /* 8GBit */
268
dimm->nr_pages = 2097152;
269
break;
270
}
271
dimm->grain = 8;
272
dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
273
dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
274
MEM_RDDR3 : MEM_DDR3;
275
dimm->edac_mode = EDAC_SECDED;
276
}
277
}
278
279
static const struct of_device_id axp_mc_of_match[] = {
280
{.compatible = "marvell,armada-xp-sdram-controller",},
281
{},
282
};
283
MODULE_DEVICE_TABLE(of, axp_mc_of_match);
284
285
static int axp_mc_probe(struct platform_device *pdev)
286
{
287
struct axp_mc_drvdata *drvdata;
288
struct edac_mc_layer layers[1];
289
const struct of_device_id *id;
290
struct mem_ctl_info *mci;
291
void __iomem *base;
292
uint32_t config;
293
294
base = devm_platform_ioremap_resource(pdev, 0);
295
if (IS_ERR(base)) {
296
dev_err(&pdev->dev, "Unable to map regs\n");
297
return PTR_ERR(base);
298
}
299
300
config = readl(base + SDRAM_CONFIG_REG);
301
if (!(config & SDRAM_CONFIG_ECC_MASK)) {
302
dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
303
return -EINVAL;
304
}
305
306
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
307
layers[0].size = SDRAM_NUM_CS;
308
layers[0].is_virt_csrow = true;
309
310
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
311
if (!mci)
312
return -ENOMEM;
313
314
drvdata = mci->pvt_info;
315
drvdata->base = base;
316
mci->pdev = &pdev->dev;
317
platform_set_drvdata(pdev, mci);
318
319
id = of_match_device(axp_mc_of_match, &pdev->dev);
320
mci->edac_check = axp_mc_check;
321
mci->mtype_cap = MEM_FLAG_DDR3;
322
mci->edac_cap = EDAC_FLAG_SECDED;
323
mci->mod_name = pdev->dev.driver->name;
324
mci->ctl_name = id ? id->compatible : "unknown";
325
mci->dev_name = dev_name(&pdev->dev);
326
mci->scrub_mode = SCRUB_NONE;
327
328
axp_mc_read_config(mci);
329
330
/* These SoCs have a reduced width bus */
331
if (of_machine_is_compatible("marvell,armada380") ||
332
of_machine_is_compatible("marvell,armadaxp-98dx3236"))
333
drvdata->width /= 2;
334
335
/* configure SBE threshold */
336
/* it seems that SBEs are not captured otherwise */
337
writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
338
339
/* clear cause registers */
340
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
341
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
342
343
/* clear counter registers */
344
writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
345
writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
346
347
if (edac_mc_add_mc(mci)) {
348
edac_mc_free(mci);
349
return -EINVAL;
350
}
351
edac_op_state = EDAC_OPSTATE_POLL;
352
353
return 0;
354
}
355
356
static void axp_mc_remove(struct platform_device *pdev)
357
{
358
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
359
360
edac_mc_del_mc(&pdev->dev);
361
edac_mc_free(mci);
362
platform_set_drvdata(pdev, NULL);
363
}
364
365
static struct platform_driver axp_mc_driver = {
366
.probe = axp_mc_probe,
367
.remove = axp_mc_remove,
368
.driver = {
369
.name = "armada_xp_mc_edac",
370
.of_match_table = of_match_ptr(axp_mc_of_match),
371
},
372
};
373
374
/************************ EDAC Device (L2 Cache) ***************************/
375
376
struct aurora_l2_drvdata {
377
void __iomem *base;
378
379
char msg[128];
380
381
/* error injection via debugfs */
382
uint32_t inject_addr;
383
uint32_t inject_mask;
384
uint8_t inject_ctl;
385
386
struct dentry *debugfs;
387
};
388
389
#ifdef CONFIG_EDAC_DEBUG
390
static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
391
{
392
drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
393
drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
394
writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
395
writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
396
writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
397
}
398
#endif
399
400
static void aurora_l2_check(struct edac_device_ctl_info *dci)
401
{
402
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
403
uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
404
unsigned int cnt_ce, cnt_ue;
405
char *msg = drvdata->msg;
406
size_t size = sizeof(drvdata->msg);
407
size_t len = 0;
408
409
cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
410
attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
411
addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
412
way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
413
414
cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
415
cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
416
/* clear error counter registers */
417
if (cnt_ce || cnt_ue)
418
writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
419
420
if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
421
goto clear_remaining;
422
423
src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
424
if (src <= 3)
425
len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
426
else
427
len += scnprintf(msg+len, size-len, "src=IO ");
428
429
txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
430
switch (txn) {
431
case 0:
432
len += scnprintf(msg+len, size-len, "txn=Data-Read ");
433
break;
434
case 1:
435
len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
436
break;
437
case 2:
438
len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
439
break;
440
case 3:
441
len += scnprintf(msg+len, size-len, "txn=Eviction ");
442
break;
443
case 4:
444
len += scnprintf(msg+len, size-len,
445
"txn=Read-Modify-Write ");
446
break;
447
}
448
449
err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
450
switch (err) {
451
case 0:
452
len += scnprintf(msg+len, size-len, "err=CorrECC ");
453
break;
454
case 1:
455
len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
456
break;
457
case 2:
458
len += scnprintf(msg+len, size-len, "err=TagParity ");
459
break;
460
}
461
462
len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
463
len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
464
len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
465
466
/* clear error capture registers */
467
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
468
if (err) {
469
/* UnCorrECC or TagParity */
470
if (cnt_ue)
471
cnt_ue--;
472
edac_device_handle_ue(dci, 0, 0, drvdata->msg);
473
} else {
474
if (cnt_ce)
475
cnt_ce--;
476
edac_device_handle_ce(dci, 0, 0, drvdata->msg);
477
}
478
479
clear_remaining:
480
/* report remaining errors */
481
while (cnt_ue--)
482
edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
483
while (cnt_ce--)
484
edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
485
}
486
487
static void aurora_l2_poll(struct edac_device_ctl_info *dci)
488
{
489
#ifdef CONFIG_EDAC_DEBUG
490
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
491
#endif
492
493
aurora_l2_check(dci);
494
#ifdef CONFIG_EDAC_DEBUG
495
aurora_l2_inject(drvdata);
496
#endif
497
}
498
499
static const struct of_device_id aurora_l2_of_match[] = {
500
{.compatible = "marvell,aurora-system-cache",},
501
{},
502
};
503
MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
504
505
static int aurora_l2_probe(struct platform_device *pdev)
506
{
507
struct aurora_l2_drvdata *drvdata;
508
struct edac_device_ctl_info *dci;
509
const struct of_device_id *id;
510
uint32_t l2x0_aux_ctrl;
511
void __iomem *base;
512
513
base = devm_platform_ioremap_resource(pdev, 0);
514
if (IS_ERR(base)) {
515
dev_err(&pdev->dev, "Unable to map regs\n");
516
return PTR_ERR(base);
517
}
518
519
l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
520
if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
521
dev_warn(&pdev->dev, "tag parity is not enabled\n");
522
if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
523
dev_warn(&pdev->dev, "data ECC is not enabled\n");
524
525
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
526
"cpu", 1, "L", 1, 2, 0);
527
if (!dci)
528
return -ENOMEM;
529
530
drvdata = dci->pvt_info;
531
drvdata->base = base;
532
dci->dev = &pdev->dev;
533
platform_set_drvdata(pdev, dci);
534
535
id = of_match_device(aurora_l2_of_match, &pdev->dev);
536
dci->edac_check = aurora_l2_poll;
537
dci->mod_name = pdev->dev.driver->name;
538
dci->ctl_name = id ? id->compatible : "unknown";
539
dci->dev_name = dev_name(&pdev->dev);
540
541
/* clear registers */
542
writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
543
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
544
545
if (edac_device_add_device(dci)) {
546
edac_device_free_ctl_info(dci);
547
return -EINVAL;
548
}
549
550
#ifdef CONFIG_EDAC_DEBUG
551
drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
552
if (drvdata->debugfs) {
553
edac_debugfs_create_x32("inject_addr", 0644,
554
drvdata->debugfs,
555
&drvdata->inject_addr);
556
edac_debugfs_create_x32("inject_mask", 0644,
557
drvdata->debugfs,
558
&drvdata->inject_mask);
559
edac_debugfs_create_x8("inject_ctl", 0644,
560
drvdata->debugfs, &drvdata->inject_ctl);
561
}
562
#endif
563
564
return 0;
565
}
566
567
static void aurora_l2_remove(struct platform_device *pdev)
568
{
569
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
570
#ifdef CONFIG_EDAC_DEBUG
571
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
572
573
edac_debugfs_remove_recursive(drvdata->debugfs);
574
#endif
575
edac_device_del_device(&pdev->dev);
576
edac_device_free_ctl_info(dci);
577
platform_set_drvdata(pdev, NULL);
578
}
579
580
static struct platform_driver aurora_l2_driver = {
581
.probe = aurora_l2_probe,
582
.remove = aurora_l2_remove,
583
.driver = {
584
.name = "aurora_l2_edac",
585
.of_match_table = of_match_ptr(aurora_l2_of_match),
586
},
587
};
588
589
/************************ Driver registration ******************************/
590
591
static struct platform_driver * const drivers[] = {
592
&axp_mc_driver,
593
&aurora_l2_driver,
594
};
595
596
static int __init armada_xp_edac_init(void)
597
{
598
int res;
599
600
if (ghes_get_devices())
601
return -EBUSY;
602
603
/* only polling is supported */
604
edac_op_state = EDAC_OPSTATE_POLL;
605
606
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
607
if (res)
608
pr_warn("Armada XP EDAC drivers fail to register\n");
609
610
return 0;
611
}
612
module_init(armada_xp_edac_init);
613
614
static void __exit armada_xp_edac_exit(void)
615
{
616
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
617
}
618
module_exit(armada_xp_edac_exit);
619
620
MODULE_LICENSE("GPL v2");
621
MODULE_AUTHOR("Pengutronix");
622
MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");
623
624