Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/edac/edac_mc_sysfs.c
26278 views
1
/*
2
* edac_mc kernel module
3
* (C) 2005-2007 Linux Networx (http://lnxi.com)
4
*
5
* This file may be distributed under the terms of the
6
* GNU General Public License.
7
*
8
* Written Doug Thompson <[email protected]> www.softwarebitmaker.com
9
*
10
* (c) 2012-2013 - Mauro Carvalho Chehab
11
* The entire API were re-written, and ported to use struct device
12
*
13
*/
14
15
#include <linux/ctype.h>
16
#include <linux/slab.h>
17
#include <linux/edac.h>
18
#include <linux/bug.h>
19
#include <linux/pm_runtime.h>
20
#include <linux/uaccess.h>
21
22
#include "edac_mc.h"
23
#include "edac_module.h"
24
25
/* MC EDAC Controls, setable by module parameter, and sysfs */
26
static int edac_mc_log_ue = 1;
27
static int edac_mc_log_ce = 1;
28
static int edac_mc_panic_on_ue;
29
static unsigned int edac_mc_poll_msec = 1000;
30
31
/* Getter functions for above */
32
int edac_mc_get_log_ue(void)
33
{
34
return edac_mc_log_ue;
35
}
36
37
int edac_mc_get_log_ce(void)
38
{
39
return edac_mc_log_ce;
40
}
41
42
int edac_mc_get_panic_on_ue(void)
43
{
44
return edac_mc_panic_on_ue;
45
}
46
47
/* this is temporary */
48
unsigned int edac_mc_get_poll_msec(void)
49
{
50
return edac_mc_poll_msec;
51
}
52
53
static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
54
{
55
unsigned int i;
56
int ret;
57
58
if (!val)
59
return -EINVAL;
60
61
ret = kstrtouint(val, 0, &i);
62
if (ret)
63
return ret;
64
65
if (i < 1000)
66
return -EINVAL;
67
68
*((unsigned int *)kp->arg) = i;
69
70
/* notify edac_mc engine to reset the poll period */
71
edac_mc_reset_delay_period(i);
72
73
return 0;
74
}
75
76
/* Parameter declarations for above */
77
module_param(edac_mc_panic_on_ue, int, 0644);
78
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
79
module_param(edac_mc_log_ue, int, 0644);
80
MODULE_PARM_DESC(edac_mc_log_ue,
81
"Log uncorrectable error to console: 0=off 1=on");
82
module_param(edac_mc_log_ce, int, 0644);
83
MODULE_PARM_DESC(edac_mc_log_ce,
84
"Log correctable error to console: 0=off 1=on");
85
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
86
&edac_mc_poll_msec, 0644);
87
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
88
89
static struct device *mci_pdev;
90
91
/*
92
* various constants for Memory Controllers
93
*/
94
static const char * const dev_types[] = {
95
[DEV_UNKNOWN] = "Unknown",
96
[DEV_X1] = "x1",
97
[DEV_X2] = "x2",
98
[DEV_X4] = "x4",
99
[DEV_X8] = "x8",
100
[DEV_X16] = "x16",
101
[DEV_X32] = "x32",
102
[DEV_X64] = "x64"
103
};
104
105
static const char * const edac_caps[] = {
106
[EDAC_UNKNOWN] = "Unknown",
107
[EDAC_NONE] = "None",
108
[EDAC_RESERVED] = "Reserved",
109
[EDAC_PARITY] = "PARITY",
110
[EDAC_EC] = "EC",
111
[EDAC_SECDED] = "SECDED",
112
[EDAC_S2ECD2ED] = "S2ECD2ED",
113
[EDAC_S4ECD4ED] = "S4ECD4ED",
114
[EDAC_S8ECD8ED] = "S8ECD8ED",
115
[EDAC_S16ECD16ED] = "S16ECD16ED"
116
};
117
118
#ifdef CONFIG_EDAC_LEGACY_SYSFS
119
/*
120
* EDAC sysfs CSROW data structures and methods
121
*/
122
123
#define to_csrow(k) container_of(k, struct csrow_info, dev)
124
125
/*
126
* We need it to avoid namespace conflicts between the legacy API
127
* and the per-dimm/per-rank one
128
*/
129
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
130
static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
131
132
struct dev_ch_attribute {
133
struct device_attribute attr;
134
unsigned int channel;
135
};
136
137
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
138
static struct dev_ch_attribute dev_attr_legacy_##_name = \
139
{ __ATTR(_name, _mode, _show, _store), (_var) }
140
141
#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
142
143
/* Set of more default csrow<id> attribute show/store functions */
144
static ssize_t csrow_ue_count_show(struct device *dev,
145
struct device_attribute *mattr, char *data)
146
{
147
struct csrow_info *csrow = to_csrow(dev);
148
149
return sysfs_emit(data, "%u\n", csrow->ue_count);
150
}
151
152
static ssize_t csrow_ce_count_show(struct device *dev,
153
struct device_attribute *mattr, char *data)
154
{
155
struct csrow_info *csrow = to_csrow(dev);
156
157
return sysfs_emit(data, "%u\n", csrow->ce_count);
158
}
159
160
static ssize_t csrow_size_show(struct device *dev,
161
struct device_attribute *mattr, char *data)
162
{
163
struct csrow_info *csrow = to_csrow(dev);
164
int i;
165
u32 nr_pages = 0;
166
167
for (i = 0; i < csrow->nr_channels; i++)
168
nr_pages += csrow->channels[i]->dimm->nr_pages;
169
return sysfs_emit(data, "%u\n", PAGES_TO_MiB(nr_pages));
170
}
171
172
static ssize_t csrow_mem_type_show(struct device *dev,
173
struct device_attribute *mattr, char *data)
174
{
175
struct csrow_info *csrow = to_csrow(dev);
176
177
return sysfs_emit(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
178
}
179
180
static ssize_t csrow_dev_type_show(struct device *dev,
181
struct device_attribute *mattr, char *data)
182
{
183
struct csrow_info *csrow = to_csrow(dev);
184
185
return sysfs_emit(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
186
}
187
188
static ssize_t csrow_edac_mode_show(struct device *dev,
189
struct device_attribute *mattr,
190
char *data)
191
{
192
struct csrow_info *csrow = to_csrow(dev);
193
194
return sysfs_emit(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
195
}
196
197
/* show/store functions for DIMM Label attributes */
198
static ssize_t channel_dimm_label_show(struct device *dev,
199
struct device_attribute *mattr,
200
char *data)
201
{
202
struct csrow_info *csrow = to_csrow(dev);
203
unsigned int chan = to_channel(mattr);
204
struct rank_info *rank = csrow->channels[chan];
205
206
/* if field has not been initialized, there is nothing to send */
207
if (!rank->dimm->label[0])
208
return 0;
209
210
return sysfs_emit(data, "%s\n", rank->dimm->label);
211
}
212
213
static ssize_t channel_dimm_label_store(struct device *dev,
214
struct device_attribute *mattr,
215
const char *data, size_t count)
216
{
217
struct csrow_info *csrow = to_csrow(dev);
218
unsigned int chan = to_channel(mattr);
219
struct rank_info *rank = csrow->channels[chan];
220
size_t copy_count = count;
221
222
if (count == 0)
223
return -EINVAL;
224
225
if (data[count - 1] == '\0' || data[count - 1] == '\n')
226
copy_count -= 1;
227
228
if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
229
return -EINVAL;
230
231
memcpy(rank->dimm->label, data, copy_count);
232
rank->dimm->label[copy_count] = '\0';
233
234
return count;
235
}
236
237
/* show function for dynamic chX_ce_count attribute */
238
static ssize_t channel_ce_count_show(struct device *dev,
239
struct device_attribute *mattr, char *data)
240
{
241
struct csrow_info *csrow = to_csrow(dev);
242
unsigned int chan = to_channel(mattr);
243
struct rank_info *rank = csrow->channels[chan];
244
245
return sysfs_emit(data, "%u\n", rank->ce_count);
246
}
247
248
/* cwrow<id>/attribute files */
249
DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
250
DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
251
DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
252
DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
253
DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
254
DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
255
256
/* default attributes of the CSROW<id> object */
257
static struct attribute *csrow_attrs[] = {
258
&dev_attr_legacy_dev_type.attr,
259
&dev_attr_legacy_mem_type.attr,
260
&dev_attr_legacy_edac_mode.attr,
261
&dev_attr_legacy_size_mb.attr,
262
&dev_attr_legacy_ue_count.attr,
263
&dev_attr_legacy_ce_count.attr,
264
NULL,
265
};
266
267
static const struct attribute_group csrow_attr_grp = {
268
.attrs = csrow_attrs,
269
};
270
271
static const struct attribute_group *csrow_attr_groups[] = {
272
&csrow_attr_grp,
273
NULL
274
};
275
276
static const struct device_type csrow_attr_type = {
277
.groups = csrow_attr_groups,
278
};
279
280
/*
281
* possible dynamic channel DIMM Label attribute files
282
*
283
*/
284
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
285
channel_dimm_label_show, channel_dimm_label_store, 0);
286
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
287
channel_dimm_label_show, channel_dimm_label_store, 1);
288
DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
289
channel_dimm_label_show, channel_dimm_label_store, 2);
290
DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
291
channel_dimm_label_show, channel_dimm_label_store, 3);
292
DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
293
channel_dimm_label_show, channel_dimm_label_store, 4);
294
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
295
channel_dimm_label_show, channel_dimm_label_store, 5);
296
DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
297
channel_dimm_label_show, channel_dimm_label_store, 6);
298
DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
299
channel_dimm_label_show, channel_dimm_label_store, 7);
300
DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR,
301
channel_dimm_label_show, channel_dimm_label_store, 8);
302
DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR,
303
channel_dimm_label_show, channel_dimm_label_store, 9);
304
DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
305
channel_dimm_label_show, channel_dimm_label_store, 10);
306
DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
307
channel_dimm_label_show, channel_dimm_label_store, 11);
308
309
/* Total possible dynamic DIMM Label attribute file table */
310
static struct attribute *dynamic_csrow_dimm_attr[] = {
311
&dev_attr_legacy_ch0_dimm_label.attr.attr,
312
&dev_attr_legacy_ch1_dimm_label.attr.attr,
313
&dev_attr_legacy_ch2_dimm_label.attr.attr,
314
&dev_attr_legacy_ch3_dimm_label.attr.attr,
315
&dev_attr_legacy_ch4_dimm_label.attr.attr,
316
&dev_attr_legacy_ch5_dimm_label.attr.attr,
317
&dev_attr_legacy_ch6_dimm_label.attr.attr,
318
&dev_attr_legacy_ch7_dimm_label.attr.attr,
319
&dev_attr_legacy_ch8_dimm_label.attr.attr,
320
&dev_attr_legacy_ch9_dimm_label.attr.attr,
321
&dev_attr_legacy_ch10_dimm_label.attr.attr,
322
&dev_attr_legacy_ch11_dimm_label.attr.attr,
323
NULL
324
};
325
326
/* possible dynamic channel ce_count attribute files */
327
DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
328
channel_ce_count_show, NULL, 0);
329
DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
330
channel_ce_count_show, NULL, 1);
331
DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
332
channel_ce_count_show, NULL, 2);
333
DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
334
channel_ce_count_show, NULL, 3);
335
DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
336
channel_ce_count_show, NULL, 4);
337
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
338
channel_ce_count_show, NULL, 5);
339
DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
340
channel_ce_count_show, NULL, 6);
341
DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
342
channel_ce_count_show, NULL, 7);
343
DEVICE_CHANNEL(ch8_ce_count, S_IRUGO,
344
channel_ce_count_show, NULL, 8);
345
DEVICE_CHANNEL(ch9_ce_count, S_IRUGO,
346
channel_ce_count_show, NULL, 9);
347
DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
348
channel_ce_count_show, NULL, 10);
349
DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
350
channel_ce_count_show, NULL, 11);
351
352
/* Total possible dynamic ce_count attribute file table */
353
static struct attribute *dynamic_csrow_ce_count_attr[] = {
354
&dev_attr_legacy_ch0_ce_count.attr.attr,
355
&dev_attr_legacy_ch1_ce_count.attr.attr,
356
&dev_attr_legacy_ch2_ce_count.attr.attr,
357
&dev_attr_legacy_ch3_ce_count.attr.attr,
358
&dev_attr_legacy_ch4_ce_count.attr.attr,
359
&dev_attr_legacy_ch5_ce_count.attr.attr,
360
&dev_attr_legacy_ch6_ce_count.attr.attr,
361
&dev_attr_legacy_ch7_ce_count.attr.attr,
362
&dev_attr_legacy_ch8_ce_count.attr.attr,
363
&dev_attr_legacy_ch9_ce_count.attr.attr,
364
&dev_attr_legacy_ch10_ce_count.attr.attr,
365
&dev_attr_legacy_ch11_ce_count.attr.attr,
366
NULL
367
};
368
369
static umode_t csrow_dev_is_visible(struct kobject *kobj,
370
struct attribute *attr, int idx)
371
{
372
struct device *dev = kobj_to_dev(kobj);
373
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
374
375
if (idx >= csrow->nr_channels)
376
return 0;
377
378
if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
379
WARN_ONCE(1, "idx: %d\n", idx);
380
return 0;
381
}
382
383
/* Only expose populated DIMMs */
384
if (!csrow->channels[idx]->dimm->nr_pages)
385
return 0;
386
387
return attr->mode;
388
}
389
390
391
static const struct attribute_group csrow_dev_dimm_group = {
392
.attrs = dynamic_csrow_dimm_attr,
393
.is_visible = csrow_dev_is_visible,
394
};
395
396
static const struct attribute_group csrow_dev_ce_count_group = {
397
.attrs = dynamic_csrow_ce_count_attr,
398
.is_visible = csrow_dev_is_visible,
399
};
400
401
static const struct attribute_group *csrow_dev_groups[] = {
402
&csrow_dev_dimm_group,
403
&csrow_dev_ce_count_group,
404
NULL
405
};
406
407
static void csrow_release(struct device *dev)
408
{
409
/*
410
* Nothing to do, just unregister sysfs here. The mci
411
* device owns the data and will also release it.
412
*/
413
}
414
415
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
416
{
417
int chan, nr_pages = 0;
418
419
for (chan = 0; chan < csrow->nr_channels; chan++)
420
nr_pages += csrow->channels[chan]->dimm->nr_pages;
421
422
return nr_pages;
423
}
424
425
/* Create a CSROW object under specified edac_mc_device */
426
static int edac_create_csrow_object(struct mem_ctl_info *mci,
427
struct csrow_info *csrow, int index)
428
{
429
int err;
430
431
csrow->dev.type = &csrow_attr_type;
432
csrow->dev.groups = csrow_dev_groups;
433
csrow->dev.release = csrow_release;
434
device_initialize(&csrow->dev);
435
csrow->dev.parent = &mci->dev;
436
csrow->mci = mci;
437
dev_set_name(&csrow->dev, "csrow%d", index);
438
dev_set_drvdata(&csrow->dev, csrow);
439
440
err = device_add(&csrow->dev);
441
if (err) {
442
edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
443
put_device(&csrow->dev);
444
return err;
445
}
446
447
edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
448
449
return 0;
450
}
451
452
/* Create a CSROW object under specified edac_mc_device */
453
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
454
{
455
int err, i;
456
struct csrow_info *csrow;
457
458
for (i = 0; i < mci->nr_csrows; i++) {
459
csrow = mci->csrows[i];
460
if (!nr_pages_per_csrow(csrow))
461
continue;
462
err = edac_create_csrow_object(mci, mci->csrows[i], i);
463
if (err < 0)
464
goto error;
465
}
466
return 0;
467
468
error:
469
for (--i; i >= 0; i--) {
470
if (device_is_registered(&mci->csrows[i]->dev))
471
device_unregister(&mci->csrows[i]->dev);
472
}
473
474
return err;
475
}
476
477
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
478
{
479
int i;
480
481
for (i = 0; i < mci->nr_csrows; i++) {
482
if (device_is_registered(&mci->csrows[i]->dev))
483
device_unregister(&mci->csrows[i]->dev);
484
}
485
}
486
487
#endif
488
489
/*
490
* Per-dimm (or per-rank) devices
491
*/
492
493
#define to_dimm(k) container_of(k, struct dimm_info, dev)
494
495
/* show/store functions for DIMM Label attributes */
496
static ssize_t dimmdev_location_show(struct device *dev,
497
struct device_attribute *mattr, char *data)
498
{
499
struct dimm_info *dimm = to_dimm(dev);
500
ssize_t count;
501
502
count = edac_dimm_info_location(dimm, data, PAGE_SIZE);
503
count += scnprintf(data + count, PAGE_SIZE - count, "\n");
504
505
return count;
506
}
507
508
static ssize_t dimmdev_label_show(struct device *dev,
509
struct device_attribute *mattr, char *data)
510
{
511
struct dimm_info *dimm = to_dimm(dev);
512
513
/* if field has not been initialized, there is nothing to send */
514
if (!dimm->label[0])
515
return 0;
516
517
return sysfs_emit(data, "%s\n", dimm->label);
518
}
519
520
static ssize_t dimmdev_label_store(struct device *dev,
521
struct device_attribute *mattr,
522
const char *data,
523
size_t count)
524
{
525
struct dimm_info *dimm = to_dimm(dev);
526
size_t copy_count = count;
527
528
if (count == 0)
529
return -EINVAL;
530
531
if (data[count - 1] == '\0' || data[count - 1] == '\n')
532
copy_count -= 1;
533
534
if (copy_count == 0 || copy_count >= sizeof(dimm->label))
535
return -EINVAL;
536
537
memcpy(dimm->label, data, copy_count);
538
dimm->label[copy_count] = '\0';
539
540
return count;
541
}
542
543
static ssize_t dimmdev_size_show(struct device *dev,
544
struct device_attribute *mattr, char *data)
545
{
546
struct dimm_info *dimm = to_dimm(dev);
547
548
return sysfs_emit(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
549
}
550
551
static ssize_t dimmdev_mem_type_show(struct device *dev,
552
struct device_attribute *mattr, char *data)
553
{
554
struct dimm_info *dimm = to_dimm(dev);
555
556
return sysfs_emit(data, "%s\n", edac_mem_types[dimm->mtype]);
557
}
558
559
static ssize_t dimmdev_dev_type_show(struct device *dev,
560
struct device_attribute *mattr, char *data)
561
{
562
struct dimm_info *dimm = to_dimm(dev);
563
564
return sysfs_emit(data, "%s\n", dev_types[dimm->dtype]);
565
}
566
567
static ssize_t dimmdev_edac_mode_show(struct device *dev,
568
struct device_attribute *mattr,
569
char *data)
570
{
571
struct dimm_info *dimm = to_dimm(dev);
572
573
return sysfs_emit(data, "%s\n", edac_caps[dimm->edac_mode]);
574
}
575
576
static ssize_t dimmdev_ce_count_show(struct device *dev,
577
struct device_attribute *mattr,
578
char *data)
579
{
580
struct dimm_info *dimm = to_dimm(dev);
581
582
return sysfs_emit(data, "%u\n", dimm->ce_count);
583
}
584
585
static ssize_t dimmdev_ue_count_show(struct device *dev,
586
struct device_attribute *mattr,
587
char *data)
588
{
589
struct dimm_info *dimm = to_dimm(dev);
590
591
return sysfs_emit(data, "%u\n", dimm->ue_count);
592
}
593
594
/* dimm/rank attribute files */
595
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
596
dimmdev_label_show, dimmdev_label_store);
597
static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
598
static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
599
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
600
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
601
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
602
static DEVICE_ATTR(dimm_ce_count, S_IRUGO, dimmdev_ce_count_show, NULL);
603
static DEVICE_ATTR(dimm_ue_count, S_IRUGO, dimmdev_ue_count_show, NULL);
604
605
/* attributes of the dimm<id>/rank<id> object */
606
static struct attribute *dimm_attrs[] = {
607
&dev_attr_dimm_label.attr,
608
&dev_attr_dimm_location.attr,
609
&dev_attr_size.attr,
610
&dev_attr_dimm_mem_type.attr,
611
&dev_attr_dimm_dev_type.attr,
612
&dev_attr_dimm_edac_mode.attr,
613
&dev_attr_dimm_ce_count.attr,
614
&dev_attr_dimm_ue_count.attr,
615
NULL,
616
};
617
618
static const struct attribute_group dimm_attr_grp = {
619
.attrs = dimm_attrs,
620
};
621
622
static const struct attribute_group *dimm_attr_groups[] = {
623
&dimm_attr_grp,
624
NULL
625
};
626
627
static const struct device_type dimm_attr_type = {
628
.groups = dimm_attr_groups,
629
};
630
631
static void dimm_release(struct device *dev)
632
{
633
/*
634
* Nothing to do, just unregister sysfs here. The mci
635
* device owns the data and will also release it.
636
*/
637
}
638
639
/* Create a DIMM object under specified memory controller device */
640
static int edac_create_dimm_object(struct mem_ctl_info *mci,
641
struct dimm_info *dimm)
642
{
643
int err;
644
dimm->mci = mci;
645
646
dimm->dev.type = &dimm_attr_type;
647
dimm->dev.release = dimm_release;
648
device_initialize(&dimm->dev);
649
650
dimm->dev.parent = &mci->dev;
651
if (mci->csbased)
652
dev_set_name(&dimm->dev, "rank%d", dimm->idx);
653
else
654
dev_set_name(&dimm->dev, "dimm%d", dimm->idx);
655
dev_set_drvdata(&dimm->dev, dimm);
656
pm_runtime_forbid(&mci->dev);
657
658
err = device_add(&dimm->dev);
659
if (err) {
660
edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
661
put_device(&dimm->dev);
662
return err;
663
}
664
665
if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
666
char location[80];
667
668
edac_dimm_info_location(dimm, location, sizeof(location));
669
edac_dbg(0, "device %s created at location %s\n",
670
dev_name(&dimm->dev), location);
671
}
672
673
return 0;
674
}
675
676
/*
677
* Memory controller device
678
*/
679
680
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
681
682
static ssize_t mci_reset_counters_store(struct device *dev,
683
struct device_attribute *mattr,
684
const char *data, size_t count)
685
{
686
struct mem_ctl_info *mci = to_mci(dev);
687
struct dimm_info *dimm;
688
int row, chan;
689
690
mci->ue_mc = 0;
691
mci->ce_mc = 0;
692
mci->ue_noinfo_count = 0;
693
mci->ce_noinfo_count = 0;
694
695
for (row = 0; row < mci->nr_csrows; row++) {
696
struct csrow_info *ri = mci->csrows[row];
697
698
ri->ue_count = 0;
699
ri->ce_count = 0;
700
701
for (chan = 0; chan < ri->nr_channels; chan++)
702
ri->channels[chan]->ce_count = 0;
703
}
704
705
mci_for_each_dimm(mci, dimm) {
706
dimm->ue_count = 0;
707
dimm->ce_count = 0;
708
}
709
710
mci->start_time = jiffies;
711
return count;
712
}
713
714
/* Memory scrubbing interface:
715
*
716
* A MC driver can limit the scrubbing bandwidth based on the CPU type.
717
* Therefore, ->set_sdram_scrub_rate should be made to return the actual
718
* bandwidth that is accepted or 0 when scrubbing is to be disabled.
719
*
720
* Negative value still means that an error has occurred while setting
721
* the scrub rate.
722
*/
723
static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
724
struct device_attribute *mattr,
725
const char *data, size_t count)
726
{
727
struct mem_ctl_info *mci = to_mci(dev);
728
unsigned long bandwidth = 0;
729
int new_bw = 0;
730
731
if (kstrtoul(data, 10, &bandwidth) < 0)
732
return -EINVAL;
733
734
new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
735
if (new_bw < 0) {
736
edac_printk(KERN_WARNING, EDAC_MC,
737
"Error setting scrub rate to: %lu\n", bandwidth);
738
return -EINVAL;
739
}
740
741
return count;
742
}
743
744
/*
745
* ->get_sdram_scrub_rate() return value semantics same as above.
746
*/
747
static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
748
struct device_attribute *mattr,
749
char *data)
750
{
751
struct mem_ctl_info *mci = to_mci(dev);
752
int bandwidth = 0;
753
754
bandwidth = mci->get_sdram_scrub_rate(mci);
755
if (bandwidth < 0) {
756
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
757
return bandwidth;
758
}
759
760
return sysfs_emit(data, "%d\n", bandwidth);
761
}
762
763
/* default attribute files for the MCI object */
764
static ssize_t mci_ue_count_show(struct device *dev,
765
struct device_attribute *mattr,
766
char *data)
767
{
768
struct mem_ctl_info *mci = to_mci(dev);
769
770
return sysfs_emit(data, "%u\n", mci->ue_mc);
771
}
772
773
static ssize_t mci_ce_count_show(struct device *dev,
774
struct device_attribute *mattr,
775
char *data)
776
{
777
struct mem_ctl_info *mci = to_mci(dev);
778
779
return sysfs_emit(data, "%u\n", mci->ce_mc);
780
}
781
782
static ssize_t mci_ce_noinfo_show(struct device *dev,
783
struct device_attribute *mattr,
784
char *data)
785
{
786
struct mem_ctl_info *mci = to_mci(dev);
787
788
return sysfs_emit(data, "%u\n", mci->ce_noinfo_count);
789
}
790
791
static ssize_t mci_ue_noinfo_show(struct device *dev,
792
struct device_attribute *mattr,
793
char *data)
794
{
795
struct mem_ctl_info *mci = to_mci(dev);
796
797
return sysfs_emit(data, "%u\n", mci->ue_noinfo_count);
798
}
799
800
static ssize_t mci_seconds_show(struct device *dev,
801
struct device_attribute *mattr,
802
char *data)
803
{
804
struct mem_ctl_info *mci = to_mci(dev);
805
806
return sysfs_emit(data, "%ld\n", (jiffies - mci->start_time) / HZ);
807
}
808
809
static ssize_t mci_ctl_name_show(struct device *dev,
810
struct device_attribute *mattr,
811
char *data)
812
{
813
struct mem_ctl_info *mci = to_mci(dev);
814
815
return sysfs_emit(data, "%s\n", mci->ctl_name);
816
}
817
818
static ssize_t mci_size_mb_show(struct device *dev,
819
struct device_attribute *mattr,
820
char *data)
821
{
822
struct mem_ctl_info *mci = to_mci(dev);
823
int total_pages = 0, csrow_idx, j;
824
825
for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
826
struct csrow_info *csrow = mci->csrows[csrow_idx];
827
828
for (j = 0; j < csrow->nr_channels; j++) {
829
struct dimm_info *dimm = csrow->channels[j]->dimm;
830
831
total_pages += dimm->nr_pages;
832
}
833
}
834
835
return sysfs_emit(data, "%u\n", PAGES_TO_MiB(total_pages));
836
}
837
838
static ssize_t mci_max_location_show(struct device *dev,
839
struct device_attribute *mattr,
840
char *data)
841
{
842
struct mem_ctl_info *mci = to_mci(dev);
843
int len = PAGE_SIZE;
844
char *p = data;
845
int i, n;
846
847
for (i = 0; i < mci->n_layers; i++) {
848
n = scnprintf(p, len, "%s %d ",
849
edac_layer_name[mci->layers[i].type],
850
mci->layers[i].size - 1);
851
len -= n;
852
if (len <= 0)
853
goto out;
854
855
p += n;
856
}
857
858
p += scnprintf(p, len, "\n");
859
out:
860
return p - data;
861
}
862
863
/* default Control file */
864
static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
865
866
/* default Attribute files */
867
static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
868
static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
869
static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
870
static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
871
static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
872
static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
873
static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
874
static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
875
876
/* memory scrubber attribute file */
877
static DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
878
mci_sdram_scrub_rate_store); /* umode set later in is_visible */
879
880
static struct attribute *mci_attrs[] = {
881
&dev_attr_reset_counters.attr,
882
&dev_attr_mc_name.attr,
883
&dev_attr_size_mb.attr,
884
&dev_attr_seconds_since_reset.attr,
885
&dev_attr_ue_noinfo_count.attr,
886
&dev_attr_ce_noinfo_count.attr,
887
&dev_attr_ue_count.attr,
888
&dev_attr_ce_count.attr,
889
&dev_attr_max_location.attr,
890
&dev_attr_sdram_scrub_rate.attr,
891
NULL
892
};
893
894
static umode_t mci_attr_is_visible(struct kobject *kobj,
895
struct attribute *attr, int idx)
896
{
897
struct device *dev = kobj_to_dev(kobj);
898
struct mem_ctl_info *mci = to_mci(dev);
899
umode_t mode = 0;
900
901
if (attr != &dev_attr_sdram_scrub_rate.attr)
902
return attr->mode;
903
if (mci->get_sdram_scrub_rate)
904
mode |= S_IRUGO;
905
if (mci->set_sdram_scrub_rate)
906
mode |= S_IWUSR;
907
return mode;
908
}
909
910
static const struct attribute_group mci_attr_grp = {
911
.attrs = mci_attrs,
912
.is_visible = mci_attr_is_visible,
913
};
914
915
static const struct attribute_group *mci_attr_groups[] = {
916
&mci_attr_grp,
917
NULL
918
};
919
920
static const struct device_type mci_attr_type = {
921
.groups = mci_attr_groups,
922
};
923
924
/*
925
* Create a new Memory Controller kobject instance,
926
* mc<id> under the 'mc' directory
927
*
928
* Return:
929
* 0 Success
930
* !0 Failure
931
*/
932
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
933
const struct attribute_group **groups)
934
{
935
struct dimm_info *dimm;
936
int err;
937
938
/* get the /sys/devices/system/edac subsys reference */
939
mci->dev.type = &mci_attr_type;
940
mci->dev.parent = mci_pdev;
941
mci->dev.groups = groups;
942
dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
943
dev_set_drvdata(&mci->dev, mci);
944
pm_runtime_forbid(&mci->dev);
945
946
err = device_add(&mci->dev);
947
if (err < 0) {
948
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
949
/* no put_device() here, free mci with _edac_mc_free() */
950
return err;
951
}
952
953
edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
954
955
/*
956
* Create the dimm/rank devices
957
*/
958
mci_for_each_dimm(mci, dimm) {
959
/* Only expose populated DIMMs */
960
if (!dimm->nr_pages)
961
continue;
962
963
err = edac_create_dimm_object(mci, dimm);
964
if (err)
965
goto fail;
966
}
967
968
#ifdef CONFIG_EDAC_LEGACY_SYSFS
969
err = edac_create_csrow_objects(mci);
970
if (err < 0)
971
goto fail;
972
#endif
973
974
edac_create_debugfs_nodes(mci);
975
return 0;
976
977
fail:
978
edac_remove_sysfs_mci_device(mci);
979
980
return err;
981
}
982
983
/*
984
* remove a Memory Controller instance
985
*/
986
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
987
{
988
struct dimm_info *dimm;
989
990
if (!device_is_registered(&mci->dev))
991
return;
992
993
edac_dbg(0, "\n");
994
995
#ifdef CONFIG_EDAC_DEBUG
996
edac_debugfs_remove_recursive(mci->debugfs);
997
#endif
998
#ifdef CONFIG_EDAC_LEGACY_SYSFS
999
edac_delete_csrow_objects(mci);
1000
#endif
1001
1002
mci_for_each_dimm(mci, dimm) {
1003
if (!device_is_registered(&dimm->dev))
1004
continue;
1005
edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
1006
device_unregister(&dimm->dev);
1007
}
1008
1009
/* only remove the device, but keep mci */
1010
device_del(&mci->dev);
1011
}
1012
1013
static void mc_attr_release(struct device *dev)
1014
{
1015
/*
1016
* There's no container structure here, as this is just the mci
1017
* parent device, used to create the /sys/devices/mc sysfs node.
1018
* So, there are no attributes on it.
1019
*/
1020
edac_dbg(1, "device %s released\n", dev_name(dev));
1021
kfree(dev);
1022
}
1023
1024
/*
1025
* Init/exit code for the module. Basically, creates/removes /sys/class/rc
1026
*/
1027
int __init edac_mc_sysfs_init(void)
1028
{
1029
int err;
1030
1031
mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1032
if (!mci_pdev)
1033
return -ENOMEM;
1034
1035
mci_pdev->bus = edac_get_sysfs_subsys();
1036
mci_pdev->release = mc_attr_release;
1037
mci_pdev->init_name = "mc";
1038
1039
err = device_register(mci_pdev);
1040
if (err < 0) {
1041
edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
1042
put_device(mci_pdev);
1043
return err;
1044
}
1045
1046
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1047
1048
return 0;
1049
}
1050
1051
void edac_mc_sysfs_exit(void)
1052
{
1053
device_unregister(mci_pdev);
1054
}
1055
1056