Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/edac/fsl_ddr_edac.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Freescale Memory Controller kernel module
4
*
5
* Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
6
* ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
7
* split out from mpc85xx_edac EDAC driver.
8
*
9
* Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
10
*
11
* Author: Dave Jiang <[email protected]>
12
*
13
* 2006-2007 (c) MontaVista Software, Inc.
14
*/
15
#include <linux/module.h>
16
#include <linux/init.h>
17
#include <linux/interrupt.h>
18
#include <linux/ctype.h>
19
#include <linux/io.h>
20
#include <linux/mod_devicetable.h>
21
#include <linux/edac.h>
22
#include <linux/smp.h>
23
#include <linux/gfp.h>
24
25
#include <linux/of.h>
26
#include <linux/of_address.h>
27
#include "edac_module.h"
28
#include "fsl_ddr_edac.h"
29
30
#define EDAC_MOD_STR "fsl_ddr_edac"
31
32
static int edac_mc_idx;
33
34
static inline void __iomem *ddr_reg_addr(struct fsl_mc_pdata *pdata, unsigned int off)
35
{
36
if (pdata->flag == TYPE_IMX9 && off >= FSL_MC_DATA_ERR_INJECT_HI && off <= FSL_MC_ERR_SBE)
37
return pdata->inject_vbase + off - FSL_MC_DATA_ERR_INJECT_HI
38
+ IMX9_MC_DATA_ERR_INJECT_OFF;
39
40
if (pdata->flag == TYPE_IMX9 && off >= IMX9_MC_ERR_EN)
41
return pdata->inject_vbase + off - IMX9_MC_ERR_EN;
42
43
return pdata->mc_vbase + off;
44
}
45
46
static inline u32 ddr_in32(struct fsl_mc_pdata *pdata, unsigned int off)
47
{
48
void __iomem *addr = ddr_reg_addr(pdata, off);
49
50
return pdata->little_endian ? ioread32(addr) : ioread32be(addr);
51
}
52
53
static inline void ddr_out32(struct fsl_mc_pdata *pdata, unsigned int off, u32 value)
54
{
55
void __iomem *addr = ddr_reg_addr(pdata, off);
56
57
if (pdata->little_endian)
58
iowrite32(value, addr);
59
else
60
iowrite32be(value, addr);
61
}
62
63
#ifdef CONFIG_EDAC_DEBUG
64
/************************ MC SYSFS parts ***********************************/
65
66
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
67
68
static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
69
struct device_attribute *mattr,
70
char *data)
71
{
72
struct mem_ctl_info *mci = to_mci(dev);
73
struct fsl_mc_pdata *pdata = mci->pvt_info;
74
return sprintf(data, "0x%08x",
75
ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_HI));
76
}
77
78
static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
79
struct device_attribute *mattr,
80
char *data)
81
{
82
struct mem_ctl_info *mci = to_mci(dev);
83
struct fsl_mc_pdata *pdata = mci->pvt_info;
84
return sprintf(data, "0x%08x",
85
ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_LO));
86
}
87
88
static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
89
struct device_attribute *mattr,
90
char *data)
91
{
92
struct mem_ctl_info *mci = to_mci(dev);
93
struct fsl_mc_pdata *pdata = mci->pvt_info;
94
return sprintf(data, "0x%08x",
95
ddr_in32(pdata, FSL_MC_ECC_ERR_INJECT));
96
}
97
98
static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
99
struct device_attribute *mattr,
100
const char *data, size_t count)
101
{
102
struct mem_ctl_info *mci = to_mci(dev);
103
struct fsl_mc_pdata *pdata = mci->pvt_info;
104
unsigned long val;
105
int rc;
106
107
if (isdigit(*data)) {
108
rc = kstrtoul(data, 0, &val);
109
if (rc)
110
return rc;
111
112
ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_HI, val);
113
return count;
114
}
115
return 0;
116
}
117
118
static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
119
struct device_attribute *mattr,
120
const char *data, size_t count)
121
{
122
struct mem_ctl_info *mci = to_mci(dev);
123
struct fsl_mc_pdata *pdata = mci->pvt_info;
124
unsigned long val;
125
int rc;
126
127
if (isdigit(*data)) {
128
rc = kstrtoul(data, 0, &val);
129
if (rc)
130
return rc;
131
132
ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_LO, val);
133
return count;
134
}
135
return 0;
136
}
137
138
static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
139
struct device_attribute *mattr,
140
const char *data, size_t count)
141
{
142
struct mem_ctl_info *mci = to_mci(dev);
143
struct fsl_mc_pdata *pdata = mci->pvt_info;
144
unsigned long val;
145
int rc;
146
147
if (isdigit(*data)) {
148
rc = kstrtoul(data, 0, &val);
149
if (rc)
150
return rc;
151
152
ddr_out32(pdata, FSL_MC_ECC_ERR_INJECT, val);
153
return count;
154
}
155
return 0;
156
}
157
158
static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
159
fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
160
static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
161
fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
162
static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
163
fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
164
#endif /* CONFIG_EDAC_DEBUG */
165
166
static struct attribute *fsl_ddr_dev_attrs[] = {
167
#ifdef CONFIG_EDAC_DEBUG
168
&dev_attr_inject_data_hi.attr,
169
&dev_attr_inject_data_lo.attr,
170
&dev_attr_inject_ctrl.attr,
171
#endif
172
NULL
173
};
174
175
ATTRIBUTE_GROUPS(fsl_ddr_dev);
176
177
/**************************** MC Err device ***************************/
178
179
/*
180
* Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
181
* MPC8572 User's Manual. Each line represents a syndrome bit column as a
182
* 64-bit value, but split into an upper and lower 32-bit chunk. The labels
183
* below correspond to Freescale's manuals.
184
*/
185
static unsigned int ecc_table[16] = {
186
/* MSB LSB */
187
/* [0:31] [32:63] */
188
0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
189
0x00ff00ff, 0x00fff0ff,
190
0x0f0f0f0f, 0x0f0fff00,
191
0x11113333, 0x7777000f,
192
0x22224444, 0x8888222f,
193
0x44448888, 0xffff4441,
194
0x8888ffff, 0x11118882,
195
0xffff1111, 0x22221114, /* Syndrome bit 0 */
196
};
197
198
/*
199
* Calculate the correct ECC value for a 64-bit value specified by high:low
200
*/
201
static u8 calculate_ecc(u32 high, u32 low)
202
{
203
u32 mask_low;
204
u32 mask_high;
205
int bit_cnt;
206
u8 ecc = 0;
207
int i;
208
int j;
209
210
for (i = 0; i < 8; i++) {
211
mask_high = ecc_table[i * 2];
212
mask_low = ecc_table[i * 2 + 1];
213
bit_cnt = 0;
214
215
for (j = 0; j < 32; j++) {
216
if ((mask_high >> j) & 1)
217
bit_cnt ^= (high >> j) & 1;
218
if ((mask_low >> j) & 1)
219
bit_cnt ^= (low >> j) & 1;
220
}
221
222
ecc |= bit_cnt << i;
223
}
224
225
return ecc;
226
}
227
228
/*
229
* Create the syndrome code which is generated if the data line specified by
230
* 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
231
* User's Manual and 9-61 in the MPC8572 User's Manual.
232
*/
233
static u8 syndrome_from_bit(unsigned int bit) {
234
int i;
235
u8 syndrome = 0;
236
237
/*
238
* Cycle through the upper or lower 32-bit portion of each value in
239
* ecc_table depending on if 'bit' is in the upper or lower half of
240
* 64-bit data.
241
*/
242
for (i = bit < 32; i < 16; i += 2)
243
syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
244
245
return syndrome;
246
}
247
248
/*
249
* Decode data and ecc syndrome to determine what went wrong
250
* Note: This can only decode single-bit errors
251
*/
252
static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
253
int *bad_data_bit, int *bad_ecc_bit)
254
{
255
int i;
256
u8 syndrome;
257
258
*bad_data_bit = -1;
259
*bad_ecc_bit = -1;
260
261
/*
262
* Calculate the ECC of the captured data and XOR it with the captured
263
* ECC to find an ECC syndrome value we can search for
264
*/
265
syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
266
267
/* Check if a data line is stuck... */
268
for (i = 0; i < 64; i++) {
269
if (syndrome == syndrome_from_bit(i)) {
270
*bad_data_bit = i;
271
return;
272
}
273
}
274
275
/* If data is correct, check ECC bits for errors... */
276
for (i = 0; i < 8; i++) {
277
if ((syndrome >> i) & 0x1) {
278
*bad_ecc_bit = i;
279
return;
280
}
281
}
282
}
283
284
#define make64(high, low) (((u64)(high) << 32) | (low))
285
286
static void fsl_mc_check(struct mem_ctl_info *mci)
287
{
288
struct fsl_mc_pdata *pdata = mci->pvt_info;
289
struct csrow_info *csrow;
290
u32 bus_width;
291
u32 err_detect;
292
u32 syndrome;
293
u64 err_addr;
294
u32 pfn;
295
int row_index;
296
u32 cap_high;
297
u32 cap_low;
298
int bad_data_bit;
299
int bad_ecc_bit;
300
301
err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
302
if (!err_detect)
303
return;
304
305
fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
306
err_detect);
307
308
/* no more processing if not ECC bit errors */
309
if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
310
ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
311
return;
312
}
313
314
syndrome = ddr_in32(pdata, FSL_MC_CAPTURE_ECC);
315
316
/* Mask off appropriate bits of syndrome based on bus width */
317
bus_width = (ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG) &
318
DSC_DBW_MASK) ? 32 : 64;
319
if (bus_width == 64)
320
syndrome &= 0xff;
321
else
322
syndrome &= 0xffff;
323
324
err_addr = make64(
325
ddr_in32(pdata, FSL_MC_CAPTURE_EXT_ADDRESS),
326
ddr_in32(pdata, FSL_MC_CAPTURE_ADDRESS));
327
pfn = err_addr >> PAGE_SHIFT;
328
329
for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
330
csrow = mci->csrows[row_index];
331
if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
332
break;
333
}
334
335
cap_high = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_HI);
336
cap_low = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_LO);
337
338
/*
339
* Analyze single-bit errors on 64-bit wide buses
340
* TODO: Add support for 32-bit wide buses
341
*/
342
if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
343
u64 cap = (u64)cap_high << 32 | cap_low;
344
u32 s = syndrome;
345
346
sbe_ecc_decode(cap_high, cap_low, syndrome,
347
&bad_data_bit, &bad_ecc_bit);
348
349
if (bad_data_bit >= 0) {
350
fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
351
cap ^= 1ULL << bad_data_bit;
352
}
353
354
if (bad_ecc_bit >= 0) {
355
fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
356
s ^= 1 << bad_ecc_bit;
357
}
358
359
fsl_mc_printk(mci, KERN_ERR,
360
"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
361
upper_32_bits(cap), lower_32_bits(cap), s);
362
}
363
364
fsl_mc_printk(mci, KERN_ERR,
365
"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
366
cap_high, cap_low, syndrome);
367
fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
368
fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
369
370
/* we are out of range */
371
if (row_index == mci->nr_csrows)
372
fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
373
374
if (err_detect & DDR_EDE_SBE)
375
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
376
pfn, err_addr & ~PAGE_MASK, syndrome,
377
row_index, 0, -1,
378
mci->ctl_name, "");
379
380
if (err_detect & DDR_EDE_MBE)
381
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
382
pfn, err_addr & ~PAGE_MASK, syndrome,
383
row_index, 0, -1,
384
mci->ctl_name, "");
385
386
ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
387
}
388
389
static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
390
{
391
struct mem_ctl_info *mci = dev_id;
392
struct fsl_mc_pdata *pdata = mci->pvt_info;
393
u32 err_detect;
394
395
err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
396
if (!err_detect)
397
return IRQ_NONE;
398
399
fsl_mc_check(mci);
400
401
return IRQ_HANDLED;
402
}
403
404
static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
405
{
406
struct fsl_mc_pdata *pdata = mci->pvt_info;
407
struct csrow_info *csrow;
408
struct dimm_info *dimm;
409
u32 sdram_ctl;
410
u32 sdtype;
411
enum mem_type mtype;
412
u32 cs_bnds;
413
int index;
414
415
sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
416
417
sdtype = sdram_ctl & DSC_SDTYPE_MASK;
418
if (sdram_ctl & DSC_RD_EN) {
419
switch (sdtype) {
420
case 0x02000000:
421
mtype = MEM_RDDR;
422
break;
423
case 0x03000000:
424
mtype = MEM_RDDR2;
425
break;
426
case 0x07000000:
427
mtype = MEM_RDDR3;
428
break;
429
case 0x05000000:
430
mtype = MEM_RDDR4;
431
break;
432
default:
433
mtype = MEM_UNKNOWN;
434
break;
435
}
436
} else {
437
switch (sdtype) {
438
case 0x02000000:
439
mtype = MEM_DDR;
440
break;
441
case 0x03000000:
442
mtype = MEM_DDR2;
443
break;
444
case 0x07000000:
445
mtype = MEM_DDR3;
446
break;
447
case 0x05000000:
448
mtype = MEM_DDR4;
449
break;
450
case 0x04000000:
451
mtype = MEM_LPDDR4;
452
break;
453
default:
454
mtype = MEM_UNKNOWN;
455
break;
456
}
457
}
458
459
for (index = 0; index < mci->nr_csrows; index++) {
460
u32 start;
461
u32 end;
462
463
csrow = mci->csrows[index];
464
dimm = csrow->channels[0]->dimm;
465
466
cs_bnds = ddr_in32(pdata, FSL_MC_CS_BNDS_0 +
467
(index * FSL_MC_CS_BNDS_OFS));
468
469
start = (cs_bnds & 0xffff0000) >> 16;
470
end = (cs_bnds & 0x0000ffff);
471
472
if (start == end)
473
continue; /* not populated */
474
475
start <<= (24 - PAGE_SHIFT);
476
end <<= (24 - PAGE_SHIFT);
477
end |= (1 << (24 - PAGE_SHIFT)) - 1;
478
479
csrow->first_page = start;
480
csrow->last_page = end;
481
482
dimm->nr_pages = end + 1 - start;
483
dimm->grain = 8;
484
dimm->mtype = mtype;
485
dimm->dtype = DEV_UNKNOWN;
486
if (pdata->flag == TYPE_IMX9)
487
dimm->dtype = DEV_X16;
488
else if (sdram_ctl & DSC_X32_EN)
489
dimm->dtype = DEV_X32;
490
dimm->edac_mode = EDAC_SECDED;
491
}
492
}
493
494
int fsl_mc_err_probe(struct platform_device *op)
495
{
496
struct mem_ctl_info *mci;
497
struct edac_mc_layer layers[2];
498
struct fsl_mc_pdata *pdata;
499
struct resource r;
500
u32 ecc_en_mask;
501
u32 sdram_ctl;
502
int res;
503
504
if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
505
return -ENOMEM;
506
507
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
508
layers[0].size = 4;
509
layers[0].is_virt_csrow = true;
510
layers[1].type = EDAC_MC_LAYER_CHANNEL;
511
layers[1].size = 1;
512
layers[1].is_virt_csrow = false;
513
mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
514
sizeof(*pdata));
515
if (!mci) {
516
devres_release_group(&op->dev, fsl_mc_err_probe);
517
return -ENOMEM;
518
}
519
520
pdata = mci->pvt_info;
521
pdata->name = "fsl_mc_err";
522
mci->pdev = &op->dev;
523
pdata->edac_idx = edac_mc_idx++;
524
dev_set_drvdata(mci->pdev, mci);
525
mci->ctl_name = pdata->name;
526
mci->dev_name = pdata->name;
527
528
pdata->flag = (unsigned long)device_get_match_data(&op->dev);
529
530
/*
531
* Get the endianness of DDR controller registers.
532
* Default is big endian.
533
*/
534
pdata->little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
535
536
res = of_address_to_resource(op->dev.of_node, 0, &r);
537
if (res) {
538
pr_err("%s: Unable to get resource for MC err regs\n",
539
__func__);
540
goto err;
541
}
542
543
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
544
pdata->name)) {
545
pr_err("%s: Error while requesting mem region\n",
546
__func__);
547
res = -EBUSY;
548
goto err;
549
}
550
551
pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
552
if (!pdata->mc_vbase) {
553
pr_err("%s: Unable to setup MC err regs\n", __func__);
554
res = -ENOMEM;
555
goto err;
556
}
557
558
if (pdata->flag == TYPE_IMX9) {
559
pdata->inject_vbase = devm_platform_ioremap_resource_byname(op, "inject");
560
if (IS_ERR(pdata->inject_vbase)) {
561
res = -ENOMEM;
562
goto err;
563
}
564
}
565
566
if (pdata->flag == TYPE_IMX9) {
567
sdram_ctl = ddr_in32(pdata, IMX9_MC_ERR_EN);
568
ecc_en_mask = ERR_ECC_EN | ERR_INLINE_ECC;
569
} else {
570
sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
571
ecc_en_mask = DSC_ECC_EN;
572
}
573
574
if ((sdram_ctl & ecc_en_mask) != ecc_en_mask) {
575
/* no ECC */
576
pr_warn("%s: No ECC DIMMs discovered\n", __func__);
577
res = -ENODEV;
578
goto err;
579
}
580
581
edac_dbg(3, "init mci\n");
582
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
583
MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
584
MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
585
MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
586
MEM_FLAG_LPDDR4;
587
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
588
mci->edac_cap = EDAC_FLAG_SECDED;
589
mci->mod_name = EDAC_MOD_STR;
590
591
if (edac_op_state == EDAC_OPSTATE_POLL)
592
mci->edac_check = fsl_mc_check;
593
594
mci->ctl_page_to_phys = NULL;
595
596
mci->scrub_mode = SCRUB_SW_SRC;
597
598
fsl_ddr_init_csrows(mci);
599
600
/* store the original error disable bits */
601
pdata->orig_ddr_err_disable = ddr_in32(pdata, FSL_MC_ERR_DISABLE);
602
ddr_out32(pdata, FSL_MC_ERR_DISABLE, 0);
603
604
/* clear all error bits */
605
ddr_out32(pdata, FSL_MC_ERR_DETECT, ~0);
606
607
res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
608
if (res) {
609
edac_dbg(3, "failed edac_mc_add_mc()\n");
610
goto err;
611
}
612
613
if (edac_op_state == EDAC_OPSTATE_INT) {
614
ddr_out32(pdata, FSL_MC_ERR_INT_EN,
615
DDR_EIE_MBEE | DDR_EIE_SBEE);
616
617
/* store the original error management threshold */
618
pdata->orig_ddr_err_sbe = ddr_in32(pdata,
619
FSL_MC_ERR_SBE) & 0xff0000;
620
621
/* set threshold to 1 error per interrupt */
622
ddr_out32(pdata, FSL_MC_ERR_SBE, 0x10000);
623
624
/* register interrupts */
625
pdata->irq = platform_get_irq(op, 0);
626
res = devm_request_irq(&op->dev, pdata->irq,
627
fsl_mc_isr,
628
IRQF_SHARED,
629
"[EDAC] MC err", mci);
630
if (res < 0) {
631
pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
632
__func__, pdata->irq);
633
res = -ENODEV;
634
goto err2;
635
}
636
637
pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
638
pdata->irq);
639
}
640
641
devres_remove_group(&op->dev, fsl_mc_err_probe);
642
edac_dbg(3, "success\n");
643
pr_info(EDAC_MOD_STR " MC err registered\n");
644
645
return 0;
646
647
err2:
648
edac_mc_del_mc(&op->dev);
649
err:
650
devres_release_group(&op->dev, fsl_mc_err_probe);
651
edac_mc_free(mci);
652
return res;
653
}
654
655
void fsl_mc_err_remove(struct platform_device *op)
656
{
657
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
658
struct fsl_mc_pdata *pdata = mci->pvt_info;
659
660
edac_dbg(0, "\n");
661
662
if (edac_op_state == EDAC_OPSTATE_INT) {
663
ddr_out32(pdata, FSL_MC_ERR_INT_EN, 0);
664
}
665
666
ddr_out32(pdata, FSL_MC_ERR_DISABLE,
667
pdata->orig_ddr_err_disable);
668
ddr_out32(pdata, FSL_MC_ERR_SBE, pdata->orig_ddr_err_sbe);
669
670
671
edac_mc_del_mc(&op->dev);
672
edac_mc_free(mci);
673
}
674
675