Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/kernel/cpu/mcheck/mce_amd.c
10775 views
1
/*
2
* (c) 2005, 2006 Advanced Micro Devices, Inc.
3
* Your use of this code is subject to the terms and conditions of the
4
* GNU general public license version 2. See "COPYING" or
5
* http://www.gnu.org/licenses/gpl.html
6
*
7
* Written by Jacob Shin - AMD, Inc.
8
*
9
* Support : [email protected]
10
*
11
* April 2006
12
* - added support for AMD Family 0x10 processors
13
*
14
* All MC4_MISCi registers are shared between multi-cores
15
*/
16
#include <linux/interrupt.h>
17
#include <linux/notifier.h>
18
#include <linux/kobject.h>
19
#include <linux/percpu.h>
20
#include <linux/sysdev.h>
21
#include <linux/errno.h>
22
#include <linux/sched.h>
23
#include <linux/sysfs.h>
24
#include <linux/slab.h>
25
#include <linux/init.h>
26
#include <linux/cpu.h>
27
#include <linux/smp.h>
28
29
#include <asm/apic.h>
30
#include <asm/idle.h>
31
#include <asm/mce.h>
32
#include <asm/msr.h>
33
34
#define NR_BANKS 6
35
#define NR_BLOCKS 9
36
#define THRESHOLD_MAX 0xFFF
37
#define INT_TYPE_APIC 0x00020000
38
#define MASK_VALID_HI 0x80000000
39
#define MASK_CNTP_HI 0x40000000
40
#define MASK_LOCKED_HI 0x20000000
41
#define MASK_LVTOFF_HI 0x00F00000
42
#define MASK_COUNT_EN_HI 0x00080000
43
#define MASK_INT_TYPE_HI 0x00060000
44
#define MASK_OVERFLOW_HI 0x00010000
45
#define MASK_ERR_COUNT_HI 0x00000FFF
46
#define MASK_BLKPTR_LO 0xFF000000
47
#define MCG_XBLK_ADDR 0xC0000400
48
49
struct threshold_block {
50
unsigned int block;
51
unsigned int bank;
52
unsigned int cpu;
53
u32 address;
54
u16 interrupt_enable;
55
u16 threshold_limit;
56
struct kobject kobj;
57
struct list_head miscj;
58
};
59
60
struct threshold_bank {
61
struct kobject *kobj;
62
struct threshold_block *blocks;
63
cpumask_var_t cpus;
64
};
65
static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
66
67
#ifdef CONFIG_SMP
68
static unsigned char shared_bank[NR_BANKS] = {
69
0, 0, 0, 0, 1
70
};
71
#endif
72
73
static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
74
75
static void amd_threshold_interrupt(void);
76
77
/*
78
* CPU Initialization
79
*/
80
81
struct thresh_restart {
82
struct threshold_block *b;
83
int reset;
84
int set_lvt_off;
85
int lvt_off;
86
u16 old_limit;
87
};
88
89
static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
90
{
91
int msr = (hi & MASK_LVTOFF_HI) >> 20;
92
93
if (apic < 0) {
94
pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
95
"for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
96
b->bank, b->block, b->address, hi, lo);
97
return 0;
98
}
99
100
if (apic != msr) {
101
pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
102
"for bank %d, block %d (MSR%08X=0x%x%08x)\n",
103
b->cpu, apic, b->bank, b->block, b->address, hi, lo);
104
return 0;
105
}
106
107
return 1;
108
};
109
110
/* must be called with correct cpu affinity */
111
/* Called via smp_call_function_single() */
112
static void threshold_restart_bank(void *_tr)
113
{
114
struct thresh_restart *tr = _tr;
115
u32 hi, lo;
116
117
rdmsr(tr->b->address, lo, hi);
118
119
if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
120
tr->reset = 1; /* limit cannot be lower than err count */
121
122
if (tr->reset) { /* reset err count and overflow bit */
123
hi =
124
(hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
125
(THRESHOLD_MAX - tr->b->threshold_limit);
126
} else if (tr->old_limit) { /* change limit w/o reset */
127
int new_count = (hi & THRESHOLD_MAX) +
128
(tr->old_limit - tr->b->threshold_limit);
129
130
hi = (hi & ~MASK_ERR_COUNT_HI) |
131
(new_count & THRESHOLD_MAX);
132
}
133
134
if (tr->set_lvt_off) {
135
if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
136
/* set new lvt offset */
137
hi &= ~MASK_LVTOFF_HI;
138
hi |= tr->lvt_off << 20;
139
}
140
}
141
142
tr->b->interrupt_enable ?
143
(hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
144
(hi &= ~MASK_INT_TYPE_HI);
145
146
hi |= MASK_COUNT_EN_HI;
147
wrmsr(tr->b->address, lo, hi);
148
}
149
150
static void mce_threshold_block_init(struct threshold_block *b, int offset)
151
{
152
struct thresh_restart tr = {
153
.b = b,
154
.set_lvt_off = 1,
155
.lvt_off = offset,
156
};
157
158
b->threshold_limit = THRESHOLD_MAX;
159
threshold_restart_bank(&tr);
160
};
161
162
static int setup_APIC_mce(int reserved, int new)
163
{
164
if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
165
APIC_EILVT_MSG_FIX, 0))
166
return new;
167
168
return reserved;
169
}
170
171
/* cpu init entry point, called from mce.c with preempt off */
172
void mce_amd_feature_init(struct cpuinfo_x86 *c)
173
{
174
struct threshold_block b;
175
unsigned int cpu = smp_processor_id();
176
u32 low = 0, high = 0, address = 0;
177
unsigned int bank, block;
178
int offset = -1;
179
180
for (bank = 0; bank < NR_BANKS; ++bank) {
181
for (block = 0; block < NR_BLOCKS; ++block) {
182
if (block == 0)
183
address = MSR_IA32_MC0_MISC + bank * 4;
184
else if (block == 1) {
185
address = (low & MASK_BLKPTR_LO) >> 21;
186
if (!address)
187
break;
188
189
address += MCG_XBLK_ADDR;
190
} else
191
++address;
192
193
if (rdmsr_safe(address, &low, &high))
194
break;
195
196
if (!(high & MASK_VALID_HI))
197
continue;
198
199
if (!(high & MASK_CNTP_HI) ||
200
(high & MASK_LOCKED_HI))
201
continue;
202
203
if (!block)
204
per_cpu(bank_map, cpu) |= (1 << bank);
205
#ifdef CONFIG_SMP
206
if (shared_bank[bank] && c->cpu_core_id)
207
break;
208
#endif
209
offset = setup_APIC_mce(offset,
210
(high & MASK_LVTOFF_HI) >> 20);
211
212
memset(&b, 0, sizeof(b));
213
b.cpu = cpu;
214
b.bank = bank;
215
b.block = block;
216
b.address = address;
217
218
mce_threshold_block_init(&b, offset);
219
mce_threshold_vector = amd_threshold_interrupt;
220
}
221
}
222
}
223
224
/*
225
* APIC Interrupt Handler
226
*/
227
228
/*
229
* threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
230
* the interrupt goes off when error_count reaches threshold_limit.
231
* the handler will simply log mcelog w/ software defined bank number.
232
*/
233
static void amd_threshold_interrupt(void)
234
{
235
u32 low = 0, high = 0, address = 0;
236
unsigned int bank, block;
237
struct mce m;
238
239
mce_setup(&m);
240
241
/* assume first bank caused it */
242
for (bank = 0; bank < NR_BANKS; ++bank) {
243
if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
244
continue;
245
for (block = 0; block < NR_BLOCKS; ++block) {
246
if (block == 0) {
247
address = MSR_IA32_MC0_MISC + bank * 4;
248
} else if (block == 1) {
249
address = (low & MASK_BLKPTR_LO) >> 21;
250
if (!address)
251
break;
252
address += MCG_XBLK_ADDR;
253
} else {
254
++address;
255
}
256
257
if (rdmsr_safe(address, &low, &high))
258
break;
259
260
if (!(high & MASK_VALID_HI)) {
261
if (block)
262
continue;
263
else
264
break;
265
}
266
267
if (!(high & MASK_CNTP_HI) ||
268
(high & MASK_LOCKED_HI))
269
continue;
270
271
/*
272
* Log the machine check that caused the threshold
273
* event.
274
*/
275
machine_check_poll(MCP_TIMESTAMP,
276
&__get_cpu_var(mce_poll_banks));
277
278
if (high & MASK_OVERFLOW_HI) {
279
rdmsrl(address, m.misc);
280
rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
281
m.status);
282
m.bank = K8_MCE_THRESHOLD_BASE
283
+ bank * NR_BLOCKS
284
+ block;
285
mce_log(&m);
286
return;
287
}
288
}
289
}
290
}
291
292
/*
293
* Sysfs Interface
294
*/
295
296
struct threshold_attr {
297
struct attribute attr;
298
ssize_t (*show) (struct threshold_block *, char *);
299
ssize_t (*store) (struct threshold_block *, const char *, size_t count);
300
};
301
302
#define SHOW_FIELDS(name) \
303
static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
304
{ \
305
return sprintf(buf, "%lx\n", (unsigned long) b->name); \
306
}
307
SHOW_FIELDS(interrupt_enable)
308
SHOW_FIELDS(threshold_limit)
309
310
static ssize_t
311
store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
312
{
313
struct thresh_restart tr;
314
unsigned long new;
315
316
if (strict_strtoul(buf, 0, &new) < 0)
317
return -EINVAL;
318
319
b->interrupt_enable = !!new;
320
321
memset(&tr, 0, sizeof(tr));
322
tr.b = b;
323
324
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
325
326
return size;
327
}
328
329
static ssize_t
330
store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
331
{
332
struct thresh_restart tr;
333
unsigned long new;
334
335
if (strict_strtoul(buf, 0, &new) < 0)
336
return -EINVAL;
337
338
if (new > THRESHOLD_MAX)
339
new = THRESHOLD_MAX;
340
if (new < 1)
341
new = 1;
342
343
memset(&tr, 0, sizeof(tr));
344
tr.old_limit = b->threshold_limit;
345
b->threshold_limit = new;
346
tr.b = b;
347
348
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
349
350
return size;
351
}
352
353
struct threshold_block_cross_cpu {
354
struct threshold_block *tb;
355
long retval;
356
};
357
358
static void local_error_count_handler(void *_tbcc)
359
{
360
struct threshold_block_cross_cpu *tbcc = _tbcc;
361
struct threshold_block *b = tbcc->tb;
362
u32 low, high;
363
364
rdmsr(b->address, low, high);
365
tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
366
}
367
368
static ssize_t show_error_count(struct threshold_block *b, char *buf)
369
{
370
struct threshold_block_cross_cpu tbcc = { .tb = b, };
371
372
smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
373
return sprintf(buf, "%lx\n", tbcc.retval);
374
}
375
376
static ssize_t store_error_count(struct threshold_block *b,
377
const char *buf, size_t count)
378
{
379
struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
380
381
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
382
return 1;
383
}
384
385
#define RW_ATTR(val) \
386
static struct threshold_attr val = { \
387
.attr = {.name = __stringify(val), .mode = 0644 }, \
388
.show = show_## val, \
389
.store = store_## val, \
390
};
391
392
RW_ATTR(interrupt_enable);
393
RW_ATTR(threshold_limit);
394
RW_ATTR(error_count);
395
396
static struct attribute *default_attrs[] = {
397
&interrupt_enable.attr,
398
&threshold_limit.attr,
399
&error_count.attr,
400
NULL
401
};
402
403
#define to_block(k) container_of(k, struct threshold_block, kobj)
404
#define to_attr(a) container_of(a, struct threshold_attr, attr)
405
406
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
407
{
408
struct threshold_block *b = to_block(kobj);
409
struct threshold_attr *a = to_attr(attr);
410
ssize_t ret;
411
412
ret = a->show ? a->show(b, buf) : -EIO;
413
414
return ret;
415
}
416
417
static ssize_t store(struct kobject *kobj, struct attribute *attr,
418
const char *buf, size_t count)
419
{
420
struct threshold_block *b = to_block(kobj);
421
struct threshold_attr *a = to_attr(attr);
422
ssize_t ret;
423
424
ret = a->store ? a->store(b, buf, count) : -EIO;
425
426
return ret;
427
}
428
429
static const struct sysfs_ops threshold_ops = {
430
.show = show,
431
.store = store,
432
};
433
434
static struct kobj_type threshold_ktype = {
435
.sysfs_ops = &threshold_ops,
436
.default_attrs = default_attrs,
437
};
438
439
static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
440
unsigned int bank,
441
unsigned int block,
442
u32 address)
443
{
444
struct threshold_block *b = NULL;
445
u32 low, high;
446
int err;
447
448
if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
449
return 0;
450
451
if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
452
return 0;
453
454
if (!(high & MASK_VALID_HI)) {
455
if (block)
456
goto recurse;
457
else
458
return 0;
459
}
460
461
if (!(high & MASK_CNTP_HI) ||
462
(high & MASK_LOCKED_HI))
463
goto recurse;
464
465
b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
466
if (!b)
467
return -ENOMEM;
468
469
b->block = block;
470
b->bank = bank;
471
b->cpu = cpu;
472
b->address = address;
473
b->interrupt_enable = 0;
474
b->threshold_limit = THRESHOLD_MAX;
475
476
INIT_LIST_HEAD(&b->miscj);
477
478
if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
479
list_add(&b->miscj,
480
&per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
481
} else {
482
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
483
}
484
485
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
486
per_cpu(threshold_banks, cpu)[bank]->kobj,
487
"misc%i", block);
488
if (err)
489
goto out_free;
490
recurse:
491
if (!block) {
492
address = (low & MASK_BLKPTR_LO) >> 21;
493
if (!address)
494
return 0;
495
address += MCG_XBLK_ADDR;
496
} else {
497
++address;
498
}
499
500
err = allocate_threshold_blocks(cpu, bank, ++block, address);
501
if (err)
502
goto out_free;
503
504
if (b)
505
kobject_uevent(&b->kobj, KOBJ_ADD);
506
507
return err;
508
509
out_free:
510
if (b) {
511
kobject_put(&b->kobj);
512
list_del(&b->miscj);
513
kfree(b);
514
}
515
return err;
516
}
517
518
static __cpuinit long
519
local_allocate_threshold_blocks(int cpu, unsigned int bank)
520
{
521
return allocate_threshold_blocks(cpu, bank, 0,
522
MSR_IA32_MC0_MISC + bank * 4);
523
}
524
525
/* symlinks sibling shared banks to first core. first core owns dir/files. */
526
static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
527
{
528
int i, err = 0;
529
struct threshold_bank *b = NULL;
530
char name[32];
531
532
sprintf(name, "threshold_bank%i", bank);
533
534
#ifdef CONFIG_SMP
535
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
536
i = cpumask_first(cpu_llc_shared_mask(cpu));
537
538
/* first core not up yet */
539
if (cpu_data(i).cpu_core_id)
540
goto out;
541
542
/* already linked */
543
if (per_cpu(threshold_banks, cpu)[bank])
544
goto out;
545
546
b = per_cpu(threshold_banks, i)[bank];
547
548
if (!b)
549
goto out;
550
551
err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
552
b->kobj, name);
553
if (err)
554
goto out;
555
556
cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
557
per_cpu(threshold_banks, cpu)[bank] = b;
558
559
goto out;
560
}
561
#endif
562
563
b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
564
if (!b) {
565
err = -ENOMEM;
566
goto out;
567
}
568
if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
569
kfree(b);
570
err = -ENOMEM;
571
goto out;
572
}
573
574
b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
575
if (!b->kobj)
576
goto out_free;
577
578
#ifndef CONFIG_SMP
579
cpumask_setall(b->cpus);
580
#else
581
cpumask_set_cpu(cpu, b->cpus);
582
#endif
583
584
per_cpu(threshold_banks, cpu)[bank] = b;
585
586
err = local_allocate_threshold_blocks(cpu, bank);
587
if (err)
588
goto out_free;
589
590
for_each_cpu(i, b->cpus) {
591
if (i == cpu)
592
continue;
593
594
err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
595
b->kobj, name);
596
if (err)
597
goto out;
598
599
per_cpu(threshold_banks, i)[bank] = b;
600
}
601
602
goto out;
603
604
out_free:
605
per_cpu(threshold_banks, cpu)[bank] = NULL;
606
free_cpumask_var(b->cpus);
607
kfree(b);
608
out:
609
return err;
610
}
611
612
/* create dir/files for all valid threshold banks */
613
static __cpuinit int threshold_create_device(unsigned int cpu)
614
{
615
unsigned int bank;
616
int err = 0;
617
618
for (bank = 0; bank < NR_BANKS; ++bank) {
619
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
620
continue;
621
err = threshold_create_bank(cpu, bank);
622
if (err)
623
return err;
624
}
625
626
return err;
627
}
628
629
/*
630
* let's be hotplug friendly.
631
* in case of multiple core processors, the first core always takes ownership
632
* of shared sysfs dir/files, and rest of the cores will be symlinked to it.
633
*/
634
635
static void deallocate_threshold_block(unsigned int cpu,
636
unsigned int bank)
637
{
638
struct threshold_block *pos = NULL;
639
struct threshold_block *tmp = NULL;
640
struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
641
642
if (!head)
643
return;
644
645
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
646
kobject_put(&pos->kobj);
647
list_del(&pos->miscj);
648
kfree(pos);
649
}
650
651
kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
652
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
653
}
654
655
static void threshold_remove_bank(unsigned int cpu, int bank)
656
{
657
struct threshold_bank *b;
658
char name[32];
659
int i = 0;
660
661
b = per_cpu(threshold_banks, cpu)[bank];
662
if (!b)
663
return;
664
if (!b->blocks)
665
goto free_out;
666
667
sprintf(name, "threshold_bank%i", bank);
668
669
#ifdef CONFIG_SMP
670
/* sibling symlink */
671
if (shared_bank[bank] && b->blocks->cpu != cpu) {
672
sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
673
per_cpu(threshold_banks, cpu)[bank] = NULL;
674
675
return;
676
}
677
#endif
678
679
/* remove all sibling symlinks before unregistering */
680
for_each_cpu(i, b->cpus) {
681
if (i == cpu)
682
continue;
683
684
sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
685
per_cpu(threshold_banks, i)[bank] = NULL;
686
}
687
688
deallocate_threshold_block(cpu, bank);
689
690
free_out:
691
kobject_del(b->kobj);
692
kobject_put(b->kobj);
693
free_cpumask_var(b->cpus);
694
kfree(b);
695
per_cpu(threshold_banks, cpu)[bank] = NULL;
696
}
697
698
static void threshold_remove_device(unsigned int cpu)
699
{
700
unsigned int bank;
701
702
for (bank = 0; bank < NR_BANKS; ++bank) {
703
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
704
continue;
705
threshold_remove_bank(cpu, bank);
706
}
707
}
708
709
/* get notified when a cpu comes on/off */
710
static void __cpuinit
711
amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
712
{
713
switch (action) {
714
case CPU_ONLINE:
715
case CPU_ONLINE_FROZEN:
716
threshold_create_device(cpu);
717
break;
718
case CPU_DEAD:
719
case CPU_DEAD_FROZEN:
720
threshold_remove_device(cpu);
721
break;
722
default:
723
break;
724
}
725
}
726
727
static __init int threshold_init_device(void)
728
{
729
unsigned lcpu = 0;
730
731
/* to hit CPUs online before the notifier is up */
732
for_each_online_cpu(lcpu) {
733
int err = threshold_create_device(lcpu);
734
735
if (err)
736
return err;
737
}
738
threshold_cpu_callback = amd_64_threshold_cpu_callback;
739
740
return 0;
741
}
742
device_initcall(threshold_init_device);
743
744