Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/kernel/eeh.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright IBM Corporation 2001, 2005, 2006
4
* Copyright Dave Engebretsen & Todd Inglett 2001
5
* Copyright Linas Vepstas 2005, 2006
6
* Copyright 2001-2012 IBM Corporation.
7
*
8
* Please address comments and feedback to Linas Vepstas <[email protected]>
9
*/
10
11
#include <linux/delay.h>
12
#include <linux/sched.h>
13
#include <linux/init.h>
14
#include <linux/list.h>
15
#include <linux/pci.h>
16
#include <linux/iommu.h>
17
#include <linux/proc_fs.h>
18
#include <linux/rbtree.h>
19
#include <linux/reboot.h>
20
#include <linux/seq_file.h>
21
#include <linux/spinlock.h>
22
#include <linux/export.h>
23
#include <linux/of.h>
24
#include <linux/debugfs.h>
25
26
#include <linux/atomic.h>
27
#include <asm/eeh.h>
28
#include <asm/eeh_event.h>
29
#include <asm/io.h>
30
#include <asm/iommu.h>
31
#include <asm/machdep.h>
32
#include <asm/ppc-pci.h>
33
#include <asm/rtas.h>
34
#include <asm/pte-walk.h>
35
36
37
/** Overview:
38
* EEH, or "Enhanced Error Handling" is a PCI bridge technology for
39
* dealing with PCI bus errors that can't be dealt with within the
40
* usual PCI framework, except by check-stopping the CPU. Systems
41
* that are designed for high-availability/reliability cannot afford
42
* to crash due to a "mere" PCI error, thus the need for EEH.
43
* An EEH-capable bridge operates by converting a detected error
44
* into a "slot freeze", taking the PCI adapter off-line, making
45
* the slot behave, from the OS'es point of view, as if the slot
46
* were "empty": all reads return 0xff's and all writes are silently
47
* ignored. EEH slot isolation events can be triggered by parity
48
* errors on the address or data busses (e.g. during posted writes),
49
* which in turn might be caused by low voltage on the bus, dust,
50
* vibration, humidity, radioactivity or plain-old failed hardware.
51
*
52
* Note, however, that one of the leading causes of EEH slot
53
* freeze events are buggy device drivers, buggy device microcode,
54
* or buggy device hardware. This is because any attempt by the
55
* device to bus-master data to a memory address that is not
56
* assigned to the device will trigger a slot freeze. (The idea
57
* is to prevent devices-gone-wild from corrupting system memory).
58
* Buggy hardware/drivers will have a miserable time co-existing
59
* with EEH.
60
*
61
* Ideally, a PCI device driver, when suspecting that an isolation
62
* event has occurred (e.g. by reading 0xff's), will then ask EEH
63
* whether this is the case, and then take appropriate steps to
64
* reset the PCI slot, the PCI device, and then resume operations.
65
* However, until that day, the checking is done here, with the
66
* eeh_check_failure() routine embedded in the MMIO macros. If
67
* the slot is found to be isolated, an "EEH Event" is synthesized
68
* and sent out for processing.
69
*/
70
71
/* If a device driver keeps reading an MMIO register in an interrupt
72
* handler after a slot isolation event, it might be broken.
73
* This sets the threshold for how many read attempts we allow
74
* before printing an error message.
75
*/
76
#define EEH_MAX_FAILS 2100000
77
78
/* Time to wait for a PCI slot to report status, in milliseconds */
79
#define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
80
81
/*
82
* EEH probe mode support, which is part of the flags,
83
* is to support multiple platforms for EEH. Some platforms
84
* like pSeries do PCI emunation based on device tree.
85
* However, other platforms like powernv probe PCI devices
86
* from hardware. The flag is used to distinguish that.
87
* In addition, struct eeh_ops::probe would be invoked for
88
* particular OF node or PCI device so that the corresponding
89
* PE would be created there.
90
*/
91
int eeh_subsystem_flags;
92
EXPORT_SYMBOL(eeh_subsystem_flags);
93
94
/*
95
* EEH allowed maximal frozen times. If one particular PE's
96
* frozen count in last hour exceeds this limit, the PE will
97
* be forced to be offline permanently.
98
*/
99
u32 eeh_max_freezes = 5;
100
101
/*
102
* Controls whether a recovery event should be scheduled when an
103
* isolated device is discovered. This is only really useful for
104
* debugging problems with the EEH core.
105
*/
106
bool eeh_debugfs_no_recover;
107
108
/* Platform dependent EEH operations */
109
struct eeh_ops *eeh_ops = NULL;
110
111
/* Lock to avoid races due to multiple reports of an error */
112
DEFINE_RAW_SPINLOCK(confirm_error_lock);
113
EXPORT_SYMBOL_GPL(confirm_error_lock);
114
115
/* Lock to protect passed flags */
116
static DEFINE_MUTEX(eeh_dev_mutex);
117
118
/* Buffer for reporting pci register dumps. Its here in BSS, and
119
* not dynamically alloced, so that it ends up in RMO where RTAS
120
* can access it.
121
*/
122
#define EEH_PCI_REGS_LOG_LEN 8192
123
static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
124
125
/*
126
* The struct is used to maintain the EEH global statistic
127
* information. Besides, the EEH global statistics will be
128
* exported to user space through procfs
129
*/
130
struct eeh_stats {
131
u64 no_device; /* PCI device not found */
132
u64 no_dn; /* OF node not found */
133
u64 no_cfg_addr; /* Config address not found */
134
u64 ignored_check; /* EEH check skipped */
135
u64 total_mmio_ffs; /* Total EEH checks */
136
u64 false_positives; /* Unnecessary EEH checks */
137
u64 slot_resets; /* PE reset */
138
};
139
140
static struct eeh_stats eeh_stats;
141
142
static int __init eeh_setup(char *str)
143
{
144
if (!strcmp(str, "off"))
145
eeh_add_flag(EEH_FORCE_DISABLED);
146
else if (!strcmp(str, "early_log"))
147
eeh_add_flag(EEH_EARLY_DUMP_LOG);
148
149
return 1;
150
}
151
__setup("eeh=", eeh_setup);
152
153
void eeh_show_enabled(void)
154
{
155
if (eeh_has_flag(EEH_FORCE_DISABLED))
156
pr_info("EEH: Recovery disabled by kernel parameter.\n");
157
else if (eeh_has_flag(EEH_ENABLED))
158
pr_info("EEH: Capable adapter found: recovery enabled.\n");
159
else
160
pr_info("EEH: No capable adapters found: recovery disabled.\n");
161
}
162
163
/*
164
* This routine captures assorted PCI configuration space data
165
* for the indicated PCI device, and puts them into a buffer
166
* for RTAS error logging.
167
*/
168
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
169
{
170
u32 cfg;
171
int cap, i;
172
int n = 0, l = 0;
173
char buffer[128];
174
175
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
176
edev->pe->phb->global_number, edev->bdfn >> 8,
177
PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
178
pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
179
edev->pe->phb->global_number, edev->bdfn >> 8,
180
PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
181
182
eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg);
183
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
184
pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
185
186
eeh_ops->read_config(edev, PCI_COMMAND, 4, &cfg);
187
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
188
pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
189
190
/* Gather bridge-specific registers */
191
if (edev->mode & EEH_DEV_BRIDGE) {
192
eeh_ops->read_config(edev, PCI_SEC_STATUS, 2, &cfg);
193
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
194
pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
195
196
eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &cfg);
197
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
198
pr_warn("EEH: Bridge control: %04x\n", cfg);
199
}
200
201
/* Dump out the PCI-X command and status regs */
202
cap = edev->pcix_cap;
203
if (cap) {
204
eeh_ops->read_config(edev, cap, 4, &cfg);
205
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
206
pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
207
208
eeh_ops->read_config(edev, cap+4, 4, &cfg);
209
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
210
pr_warn("EEH: PCI-X status: %08x\n", cfg);
211
}
212
213
/* If PCI-E capable, dump PCI-E cap 10 */
214
cap = edev->pcie_cap;
215
if (cap) {
216
n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
217
pr_warn("EEH: PCI-E capabilities and status follow:\n");
218
219
for (i=0; i<=8; i++) {
220
eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
221
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
222
223
if ((i % 4) == 0) {
224
if (i != 0)
225
pr_warn("%s\n", buffer);
226
227
l = scnprintf(buffer, sizeof(buffer),
228
"EEH: PCI-E %02x: %08x ",
229
4*i, cfg);
230
} else {
231
l += scnprintf(buffer+l, sizeof(buffer)-l,
232
"%08x ", cfg);
233
}
234
235
}
236
237
pr_warn("%s\n", buffer);
238
}
239
240
/* If AER capable, dump it */
241
cap = edev->aer_cap;
242
if (cap) {
243
n += scnprintf(buf+n, len-n, "pci-e AER:\n");
244
pr_warn("EEH: PCI-E AER capability register set follows:\n");
245
246
for (i=0; i<=13; i++) {
247
eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
248
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
249
250
if ((i % 4) == 0) {
251
if (i != 0)
252
pr_warn("%s\n", buffer);
253
254
l = scnprintf(buffer, sizeof(buffer),
255
"EEH: PCI-E AER %02x: %08x ",
256
4*i, cfg);
257
} else {
258
l += scnprintf(buffer+l, sizeof(buffer)-l,
259
"%08x ", cfg);
260
}
261
}
262
263
pr_warn("%s\n", buffer);
264
}
265
266
return n;
267
}
268
269
static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag)
270
{
271
struct eeh_dev *edev, *tmp;
272
size_t *plen = flag;
273
274
eeh_pe_for_each_dev(pe, edev, tmp)
275
*plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
276
EEH_PCI_REGS_LOG_LEN - *plen);
277
278
return NULL;
279
}
280
281
/**
282
* eeh_slot_error_detail - Generate combined log including driver log and error log
283
* @pe: EEH PE
284
* @severity: temporary or permanent error log
285
*
286
* This routine should be called to generate the combined log, which
287
* is comprised of driver log and error log. The driver log is figured
288
* out from the config space of the corresponding PCI device, while
289
* the error log is fetched through platform dependent function call.
290
*/
291
void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
292
{
293
size_t loglen = 0;
294
295
/*
296
* When the PHB is fenced or dead, it's pointless to collect
297
* the data from PCI config space because it should return
298
* 0xFF's. For ER, we still retrieve the data from the PCI
299
* config space.
300
*
301
* For pHyp, we have to enable IO for log retrieval. Otherwise,
302
* 0xFF's is always returned from PCI config space.
303
*
304
* When the @severity is EEH_LOG_PERM, the PE is going to be
305
* removed. Prior to that, the drivers for devices included in
306
* the PE will be closed. The drivers rely on working IO path
307
* to bring the devices to quiet state. Otherwise, PCI traffic
308
* from those devices after they are removed is like to cause
309
* another unexpected EEH error.
310
*/
311
if (!(pe->type & EEH_PE_PHB)) {
312
if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
313
severity == EEH_LOG_PERM)
314
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
315
316
/*
317
* The config space of some PCI devices can't be accessed
318
* when their PEs are in frozen state. Otherwise, fenced
319
* PHB might be seen. Those PEs are identified with flag
320
* EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
321
* is set automatically when the PE is put to EEH_PE_ISOLATED.
322
*
323
* Restoring BARs possibly triggers PCI config access in
324
* (OPAL) firmware and then causes fenced PHB. If the
325
* PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
326
* pointless to restore BARs and dump config space.
327
*/
328
eeh_ops->configure_bridge(pe);
329
if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
330
eeh_pe_restore_bars(pe);
331
332
pci_regs_buf[0] = 0;
333
eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
334
}
335
}
336
337
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
338
}
339
340
/**
341
* eeh_token_to_phys - Convert EEH address token to phys address
342
* @token: I/O token, should be address in the form 0xA....
343
*
344
* This routine should be called to convert virtual I/O address
345
* to physical one.
346
*/
347
static inline unsigned long eeh_token_to_phys(unsigned long token)
348
{
349
return ppc_find_vmap_phys(token);
350
}
351
352
/*
353
* On PowerNV platform, we might already have fenced PHB there.
354
* For that case, it's meaningless to recover frozen PE. Intead,
355
* We have to handle fenced PHB firstly.
356
*/
357
static int eeh_phb_check_failure(struct eeh_pe *pe)
358
{
359
struct eeh_pe *phb_pe;
360
unsigned long flags;
361
int ret;
362
363
if (!eeh_has_flag(EEH_PROBE_MODE_DEV))
364
return -EPERM;
365
366
/* Find the PHB PE */
367
phb_pe = eeh_phb_pe_get(pe->phb);
368
if (!phb_pe) {
369
pr_warn("%s Can't find PE for PHB#%x\n",
370
__func__, pe->phb->global_number);
371
return -EEXIST;
372
}
373
374
/* If the PHB has been in problematic state */
375
eeh_serialize_lock(&flags);
376
if (phb_pe->state & EEH_PE_ISOLATED) {
377
ret = 0;
378
goto out;
379
}
380
381
/* Check PHB state */
382
ret = eeh_ops->get_state(phb_pe, NULL);
383
if ((ret < 0) ||
384
(ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) {
385
ret = 0;
386
goto out;
387
}
388
389
/* Isolate the PHB and send event */
390
eeh_pe_mark_isolated(phb_pe);
391
eeh_serialize_unlock(flags);
392
393
pr_debug("EEH: PHB#%x failure detected, location: %s\n",
394
phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
395
eeh_send_failure_event(phb_pe);
396
return 1;
397
out:
398
eeh_serialize_unlock(flags);
399
return ret;
400
}
401
402
static inline const char *eeh_driver_name(struct pci_dev *pdev)
403
{
404
if (pdev)
405
return dev_driver_string(&pdev->dev);
406
407
return "<null>";
408
}
409
410
/**
411
* eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
412
* @edev: eeh device
413
*
414
* Check for an EEH failure for the given device node. Call this
415
* routine if the result of a read was all 0xff's and you want to
416
* find out if this is due to an EEH slot freeze. This routine
417
* will query firmware for the EEH status.
418
*
419
* Returns 0 if there has not been an EEH error; otherwise returns
420
* a non-zero value and queues up a slot isolation event notification.
421
*
422
* It is safe to call this routine in an interrupt context.
423
*/
424
int eeh_dev_check_failure(struct eeh_dev *edev)
425
{
426
int ret;
427
unsigned long flags;
428
struct device_node *dn;
429
struct pci_dev *dev;
430
struct eeh_pe *pe, *parent_pe;
431
int rc = 0;
432
const char *location = NULL;
433
434
eeh_stats.total_mmio_ffs++;
435
436
if (!eeh_enabled())
437
return 0;
438
439
if (!edev) {
440
eeh_stats.no_dn++;
441
return 0;
442
}
443
dev = eeh_dev_to_pci_dev(edev);
444
pe = eeh_dev_to_pe(edev);
445
446
/* Access to IO BARs might get this far and still not want checking. */
447
if (!pe) {
448
eeh_stats.ignored_check++;
449
eeh_edev_dbg(edev, "Ignored check\n");
450
return 0;
451
}
452
453
/*
454
* On PowerNV platform, we might already have fenced PHB
455
* there and we need take care of that firstly.
456
*/
457
ret = eeh_phb_check_failure(pe);
458
if (ret > 0)
459
return ret;
460
461
/*
462
* If the PE isn't owned by us, we shouldn't check the
463
* state. Instead, let the owner handle it if the PE has
464
* been frozen.
465
*/
466
if (eeh_pe_passed(pe))
467
return 0;
468
469
/* If we already have a pending isolation event for this
470
* slot, we know it's bad already, we don't need to check.
471
* Do this checking under a lock; as multiple PCI devices
472
* in one slot might report errors simultaneously, and we
473
* only want one error recovery routine running.
474
*/
475
eeh_serialize_lock(&flags);
476
rc = 1;
477
if (pe->state & EEH_PE_ISOLATED) {
478
pe->check_count++;
479
if (pe->check_count == EEH_MAX_FAILS) {
480
dn = pci_device_to_OF_node(dev);
481
if (dn)
482
location = of_get_property(dn, "ibm,loc-code",
483
NULL);
484
eeh_edev_err(edev, "%d reads ignored for recovering device at location=%s driver=%s\n",
485
pe->check_count,
486
location ? location : "unknown",
487
eeh_driver_name(dev));
488
eeh_edev_err(edev, "Might be infinite loop in %s driver\n",
489
eeh_driver_name(dev));
490
dump_stack();
491
}
492
goto dn_unlock;
493
}
494
495
/*
496
* Now test for an EEH failure. This is VERY expensive.
497
* Note that the eeh_config_addr may be a parent device
498
* in the case of a device behind a bridge, or it may be
499
* function zero of a multi-function device.
500
* In any case they must share a common PHB.
501
*/
502
ret = eeh_ops->get_state(pe, NULL);
503
504
/* Note that config-io to empty slots may fail;
505
* they are empty when they don't have children.
506
* We will punt with the following conditions: Failure to get
507
* PE's state, EEH not support and Permanently unavailable
508
* state, PE is in good state.
509
*
510
* On the pSeries, after reaching the threshold, get_state might
511
* return EEH_STATE_NOT_SUPPORT. However, it's possible that the
512
* device state remains uncleared if the device is not marked
513
* pci_channel_io_perm_failure. Therefore, consider logging the
514
* event to let device removal happen.
515
*
516
*/
517
if ((ret < 0) ||
518
(ret == EEH_STATE_NOT_SUPPORT &&
519
dev->error_state == pci_channel_io_perm_failure) ||
520
eeh_state_active(ret)) {
521
eeh_stats.false_positives++;
522
pe->false_positives++;
523
rc = 0;
524
goto dn_unlock;
525
}
526
527
/*
528
* It should be corner case that the parent PE has been
529
* put into frozen state as well. We should take care
530
* that at first.
531
*/
532
parent_pe = pe->parent;
533
while (parent_pe) {
534
/* Hit the ceiling ? */
535
if (parent_pe->type & EEH_PE_PHB)
536
break;
537
538
/* Frozen parent PE ? */
539
ret = eeh_ops->get_state(parent_pe, NULL);
540
if (ret > 0 && !eeh_state_active(ret)) {
541
pe = parent_pe;
542
pr_err("EEH: Failure of PHB#%x-PE#%x will be handled at parent PHB#%x-PE#%x.\n",
543
pe->phb->global_number, pe->addr,
544
pe->phb->global_number, parent_pe->addr);
545
}
546
547
/* Next parent level */
548
parent_pe = parent_pe->parent;
549
}
550
551
eeh_stats.slot_resets++;
552
553
/* Avoid repeated reports of this failure, including problems
554
* with other functions on this device, and functions under
555
* bridges.
556
*/
557
eeh_pe_mark_isolated(pe);
558
eeh_serialize_unlock(flags);
559
560
/* Most EEH events are due to device driver bugs. Having
561
* a stack trace will help the device-driver authors figure
562
* out what happened. So print that out.
563
*/
564
pr_debug("EEH: %s: Frozen PHB#%x-PE#%x detected\n",
565
__func__, pe->phb->global_number, pe->addr);
566
eeh_send_failure_event(pe);
567
568
return 1;
569
570
dn_unlock:
571
eeh_serialize_unlock(flags);
572
return rc;
573
}
574
575
EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
576
577
/**
578
* eeh_check_failure - Check if all 1's data is due to EEH slot freeze
579
* @token: I/O address
580
*
581
* Check for an EEH failure at the given I/O address. Call this
582
* routine if the result of a read was all 0xff's and you want to
583
* find out if this is due to an EEH slot freeze event. This routine
584
* will query firmware for the EEH status.
585
*
586
* Note this routine is safe to call in an interrupt context.
587
*/
588
int eeh_check_failure(const volatile void __iomem *token)
589
{
590
unsigned long addr;
591
struct eeh_dev *edev;
592
593
/* Finding the phys addr + pci device; this is pretty quick. */
594
addr = eeh_token_to_phys((unsigned long __force) token);
595
edev = eeh_addr_cache_get_dev(addr);
596
if (!edev) {
597
eeh_stats.no_device++;
598
return 0;
599
}
600
601
return eeh_dev_check_failure(edev);
602
}
603
EXPORT_SYMBOL(eeh_check_failure);
604
605
606
/**
607
* eeh_pci_enable - Enable MMIO or DMA transfers for this slot
608
* @pe: EEH PE
609
* @function: EEH option
610
*
611
* This routine should be called to reenable frozen MMIO or DMA
612
* so that it would work correctly again. It's useful while doing
613
* recovery or log collection on the indicated device.
614
*/
615
int eeh_pci_enable(struct eeh_pe *pe, int function)
616
{
617
int active_flag, rc;
618
619
/*
620
* pHyp doesn't allow to enable IO or DMA on unfrozen PE.
621
* Also, it's pointless to enable them on unfrozen PE. So
622
* we have to check before enabling IO or DMA.
623
*/
624
switch (function) {
625
case EEH_OPT_THAW_MMIO:
626
active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED;
627
break;
628
case EEH_OPT_THAW_DMA:
629
active_flag = EEH_STATE_DMA_ACTIVE;
630
break;
631
case EEH_OPT_DISABLE:
632
case EEH_OPT_ENABLE:
633
case EEH_OPT_FREEZE_PE:
634
active_flag = 0;
635
break;
636
default:
637
pr_warn("%s: Invalid function %d\n",
638
__func__, function);
639
return -EINVAL;
640
}
641
642
/*
643
* Check if IO or DMA has been enabled before
644
* enabling them.
645
*/
646
if (active_flag) {
647
rc = eeh_ops->get_state(pe, NULL);
648
if (rc < 0)
649
return rc;
650
651
/* Needn't enable it at all */
652
if (rc == EEH_STATE_NOT_SUPPORT)
653
return 0;
654
655
/* It's already enabled */
656
if (rc & active_flag)
657
return 0;
658
}
659
660
661
/* Issue the request */
662
rc = eeh_ops->set_option(pe, function);
663
if (rc)
664
pr_warn("%s: Unexpected state change %d on "
665
"PHB#%x-PE#%x, err=%d\n",
666
__func__, function, pe->phb->global_number,
667
pe->addr, rc);
668
669
/* Check if the request is finished successfully */
670
if (active_flag) {
671
rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
672
if (rc < 0)
673
return rc;
674
675
if (rc & active_flag)
676
return 0;
677
678
return -EIO;
679
}
680
681
return rc;
682
}
683
684
static void eeh_disable_and_save_dev_state(struct eeh_dev *edev,
685
void *userdata)
686
{
687
struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
688
struct pci_dev *dev = userdata;
689
690
/*
691
* The caller should have disabled and saved the
692
* state for the specified device
693
*/
694
if (!pdev || pdev == dev)
695
return;
696
697
/* Ensure we have D0 power state */
698
pci_set_power_state(pdev, PCI_D0);
699
700
/* Save device state */
701
pci_save_state(pdev);
702
703
/*
704
* Disable device to avoid any DMA traffic and
705
* interrupt from the device
706
*/
707
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
708
}
709
710
static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata)
711
{
712
struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
713
struct pci_dev *dev = userdata;
714
715
if (!pdev)
716
return;
717
718
/* Apply customization from firmware */
719
if (eeh_ops->restore_config)
720
eeh_ops->restore_config(edev);
721
722
/* The caller should restore state for the specified device */
723
if (pdev != dev)
724
pci_restore_state(pdev);
725
}
726
727
/**
728
* pcibios_set_pcie_reset_state - Set PCI-E reset state
729
* @dev: pci device struct
730
* @state: reset state to enter
731
*
732
* Return value:
733
* 0 if success
734
*/
735
int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
736
{
737
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
738
struct eeh_pe *pe = eeh_dev_to_pe(edev);
739
740
if (!pe) {
741
pr_err("%s: No PE found on PCI device %s\n",
742
__func__, pci_name(dev));
743
return -EINVAL;
744
}
745
746
switch (state) {
747
case pcie_deassert_reset:
748
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
749
eeh_unfreeze_pe(pe);
750
if (!(pe->type & EEH_PE_VF))
751
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
752
eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
753
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
754
break;
755
case pcie_hot_reset:
756
eeh_pe_mark_isolated(pe);
757
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
758
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
759
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
760
if (!(pe->type & EEH_PE_VF))
761
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
762
eeh_ops->reset(pe, EEH_RESET_HOT);
763
break;
764
case pcie_warm_reset:
765
eeh_pe_mark_isolated(pe);
766
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true);
767
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
768
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
769
if (!(pe->type & EEH_PE_VF))
770
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
771
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
772
break;
773
default:
774
eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
775
return -EINVAL;
776
}
777
778
return 0;
779
}
780
781
/**
782
* eeh_set_dev_freset - Check the required reset for the indicated device
783
* @edev: EEH device
784
* @flag: return value
785
*
786
* Each device might have its preferred reset type: fundamental or
787
* hot reset. The routine is used to collected the information for
788
* the indicated device and its children so that the bunch of the
789
* devices could be reset properly.
790
*/
791
static void eeh_set_dev_freset(struct eeh_dev *edev, void *flag)
792
{
793
struct pci_dev *dev;
794
unsigned int *freset = (unsigned int *)flag;
795
796
dev = eeh_dev_to_pci_dev(edev);
797
if (dev)
798
*freset |= dev->needs_freset;
799
}
800
801
static void eeh_pe_refreeze_passed(struct eeh_pe *root)
802
{
803
struct eeh_pe *pe;
804
int state;
805
806
eeh_for_each_pe(root, pe) {
807
if (eeh_pe_passed(pe)) {
808
state = eeh_ops->get_state(pe, NULL);
809
if (state &
810
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED)) {
811
pr_info("EEH: Passed-through PE PHB#%x-PE#%x was thawed by reset, re-freezing for safety.\n",
812
pe->phb->global_number, pe->addr);
813
eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
814
}
815
}
816
}
817
}
818
819
/**
820
* eeh_pe_reset_full - Complete a full reset process on the indicated PE
821
* @pe: EEH PE
822
* @include_passed: include passed-through devices?
823
*
824
* This function executes a full reset procedure on a PE, including setting
825
* the appropriate flags, performing a fundamental or hot reset, and then
826
* deactivating the reset status. It is designed to be used within the EEH
827
* subsystem, as opposed to eeh_pe_reset which is exported to drivers and
828
* only performs a single operation at a time.
829
*
830
* This function will attempt to reset a PE three times before failing.
831
*/
832
int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed)
833
{
834
int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
835
int type = EEH_RESET_HOT;
836
unsigned int freset = 0;
837
int i, state = 0, ret;
838
839
/*
840
* Determine the type of reset to perform - hot or fundamental.
841
* Hot reset is the default operation, unless any device under the
842
* PE requires a fundamental reset.
843
*/
844
eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
845
846
if (freset)
847
type = EEH_RESET_FUNDAMENTAL;
848
849
/* Mark the PE as in reset state and block config space accesses */
850
eeh_pe_state_mark(pe, reset_state);
851
852
/* Make three attempts at resetting the bus */
853
for (i = 0; i < 3; i++) {
854
ret = eeh_pe_reset(pe, type, include_passed);
855
if (!ret)
856
ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE,
857
include_passed);
858
if (ret) {
859
ret = -EIO;
860
pr_warn("EEH: Failure %d resetting PHB#%x-PE#%x (attempt %d)\n\n",
861
state, pe->phb->global_number, pe->addr, i + 1);
862
continue;
863
}
864
if (i)
865
pr_warn("EEH: PHB#%x-PE#%x: Successful reset (attempt %d)\n",
866
pe->phb->global_number, pe->addr, i + 1);
867
868
/* Wait until the PE is in a functioning state */
869
state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
870
if (state < 0) {
871
pr_warn("EEH: Unrecoverable slot failure on PHB#%x-PE#%x",
872
pe->phb->global_number, pe->addr);
873
ret = -ENOTRECOVERABLE;
874
break;
875
}
876
if (eeh_state_active(state))
877
break;
878
else
879
pr_warn("EEH: PHB#%x-PE#%x: Slot inactive after reset: 0x%x (attempt %d)\n",
880
pe->phb->global_number, pe->addr, state, i + 1);
881
}
882
883
/* Resetting the PE may have unfrozen child PEs. If those PEs have been
884
* (potentially) passed through to a guest, re-freeze them:
885
*/
886
if (!include_passed)
887
eeh_pe_refreeze_passed(pe);
888
889
eeh_pe_state_clear(pe, reset_state, true);
890
return ret;
891
}
892
893
/**
894
* eeh_save_bars - Save device bars
895
* @edev: PCI device associated EEH device
896
*
897
* Save the values of the device bars. Unlike the restore
898
* routine, this routine is *not* recursive. This is because
899
* PCI devices are added individually; but, for the restore,
900
* an entire slot is reset at a time.
901
*/
902
void eeh_save_bars(struct eeh_dev *edev)
903
{
904
int i;
905
906
if (!edev)
907
return;
908
909
for (i = 0; i < 16; i++)
910
eeh_ops->read_config(edev, i * 4, 4, &edev->config_space[i]);
911
912
/*
913
* For PCI bridges including root port, we need enable bus
914
* master explicitly. Otherwise, it can't fetch IODA table
915
* entries correctly. So we cache the bit in advance so that
916
* we can restore it after reset, either PHB range or PE range.
917
*/
918
if (edev->mode & EEH_DEV_BRIDGE)
919
edev->config_space[1] |= PCI_COMMAND_MASTER;
920
}
921
922
static int eeh_reboot_notifier(struct notifier_block *nb,
923
unsigned long action, void *unused)
924
{
925
eeh_clear_flag(EEH_ENABLED);
926
return NOTIFY_DONE;
927
}
928
929
static struct notifier_block eeh_reboot_nb = {
930
.notifier_call = eeh_reboot_notifier,
931
};
932
933
static int eeh_device_notifier(struct notifier_block *nb,
934
unsigned long action, void *data)
935
{
936
struct device *dev = data;
937
938
switch (action) {
939
/*
940
* Note: It's not possible to perform EEH device addition (i.e.
941
* {pseries,pnv}_pcibios_bus_add_device()) here because it depends on
942
* the device's resources, which have not yet been set up.
943
*/
944
case BUS_NOTIFY_DEL_DEVICE:
945
eeh_remove_device(to_pci_dev(dev));
946
break;
947
default:
948
break;
949
}
950
return NOTIFY_DONE;
951
}
952
953
static struct notifier_block eeh_device_nb = {
954
.notifier_call = eeh_device_notifier,
955
};
956
957
/**
958
* eeh_init - System wide EEH initialization
959
* @ops: struct to trace EEH operation callback functions
960
*
961
* It's the platform's job to call this from an arch_initcall().
962
*/
963
int eeh_init(struct eeh_ops *ops)
964
{
965
struct pci_controller *hose, *tmp;
966
int ret = 0;
967
968
/* the platform should only initialise EEH once */
969
if (WARN_ON(eeh_ops))
970
return -EEXIST;
971
if (WARN_ON(!ops))
972
return -ENOENT;
973
eeh_ops = ops;
974
975
/* Register reboot notifier */
976
ret = register_reboot_notifier(&eeh_reboot_nb);
977
if (ret) {
978
pr_warn("%s: Failed to register reboot notifier (%d)\n",
979
__func__, ret);
980
return ret;
981
}
982
983
ret = bus_register_notifier(&pci_bus_type, &eeh_device_nb);
984
if (ret) {
985
pr_warn("%s: Failed to register bus notifier (%d)\n",
986
__func__, ret);
987
return ret;
988
}
989
990
/* Initialize PHB PEs */
991
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
992
eeh_phb_pe_create(hose);
993
994
eeh_addr_cache_init();
995
996
/* Initialize EEH event */
997
return eeh_event_init();
998
}
999
1000
/**
1001
* eeh_probe_device() - Perform EEH initialization for the indicated pci device
1002
* @dev: pci device for which to set up EEH
1003
*
1004
* This routine must be used to complete EEH initialization for PCI
1005
* devices that were added after system boot (e.g. hotplug, dlpar).
1006
*/
1007
void eeh_probe_device(struct pci_dev *dev)
1008
{
1009
struct eeh_dev *edev;
1010
1011
pr_debug("EEH: Adding device %s\n", pci_name(dev));
1012
1013
/*
1014
* pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was
1015
* already called for this device.
1016
*/
1017
if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
1018
pci_dbg(dev, "Already bound to an eeh_dev!\n");
1019
return;
1020
}
1021
1022
edev = eeh_ops->probe(dev);
1023
if (!edev) {
1024
pr_debug("EEH: Adding device failed\n");
1025
return;
1026
}
1027
1028
/*
1029
* FIXME: We rely on pcibios_release_device() to remove the
1030
* existing EEH state. The release function is only called if
1031
* the pci_dev's refcount drops to zero so if something is
1032
* keeping a ref to a device (e.g. a filesystem) we need to
1033
* remove the old EEH state.
1034
*
1035
* FIXME: HEY MA, LOOK AT ME, NO LOCKING!
1036
*/
1037
if (edev->pdev && edev->pdev != dev) {
1038
eeh_pe_tree_remove(edev);
1039
eeh_addr_cache_rmv_dev(edev->pdev);
1040
eeh_sysfs_remove_device(edev->pdev);
1041
1042
/*
1043
* We definitely should have the PCI device removed
1044
* though it wasn't correctly. So we needn't call
1045
* into error handler afterwards.
1046
*/
1047
edev->mode |= EEH_DEV_NO_HANDLER;
1048
}
1049
1050
/* bind the pdev and the edev together */
1051
edev->pdev = dev;
1052
dev->dev.archdata.edev = edev;
1053
eeh_addr_cache_insert_dev(dev);
1054
eeh_sysfs_add_device(dev);
1055
}
1056
1057
/**
1058
* eeh_remove_device - Undo EEH setup for the indicated pci device
1059
* @dev: pci device to be removed
1060
*
1061
* This routine should be called when a device is removed from
1062
* a running system (e.g. by hotplug or dlpar). It unregisters
1063
* the PCI device from the EEH subsystem. I/O errors affecting
1064
* this device will no longer be detected after this call; thus,
1065
* i/o errors affecting this slot may leave this device unusable.
1066
*/
1067
void eeh_remove_device(struct pci_dev *dev)
1068
{
1069
struct eeh_dev *edev;
1070
1071
if (!dev || !eeh_enabled())
1072
return;
1073
edev = pci_dev_to_eeh_dev(dev);
1074
1075
/* Unregister the device with the EEH/PCI address search system */
1076
dev_dbg(&dev->dev, "EEH: Removing device\n");
1077
1078
if (!edev || !edev->pdev || !edev->pe) {
1079
dev_dbg(&dev->dev, "EEH: Device not referenced!\n");
1080
return;
1081
}
1082
1083
/*
1084
* During the hotplug for EEH error recovery, we need the EEH
1085
* device attached to the parent PE in order for BAR restore
1086
* a bit later. So we keep it for BAR restore and remove it
1087
* from the parent PE during the BAR resotre.
1088
*/
1089
edev->pdev = NULL;
1090
1091
/*
1092
* eeh_sysfs_remove_device() uses pci_dev_to_eeh_dev() so we need to
1093
* remove the sysfs files before clearing dev.archdata.edev
1094
*/
1095
if (edev->mode & EEH_DEV_SYSFS)
1096
eeh_sysfs_remove_device(dev);
1097
1098
/*
1099
* We're removing from the PCI subsystem, that means
1100
* the PCI device driver can't support EEH or not
1101
* well. So we rely on hotplug completely to do recovery
1102
* for the specific PCI device.
1103
*/
1104
edev->mode |= EEH_DEV_NO_HANDLER;
1105
1106
eeh_addr_cache_rmv_dev(dev);
1107
1108
/*
1109
* The flag "in_error" is used to trace EEH devices for VFs
1110
* in error state or not. It's set in eeh_report_error(). If
1111
* it's not set, eeh_report_{reset,resume}() won't be called
1112
* for the VF EEH device.
1113
*/
1114
edev->in_error = false;
1115
dev->dev.archdata.edev = NULL;
1116
if (!(edev->pe->state & EEH_PE_KEEP))
1117
eeh_pe_tree_remove(edev);
1118
else
1119
edev->mode |= EEH_DEV_DISCONNECTED;
1120
}
1121
1122
int eeh_unfreeze_pe(struct eeh_pe *pe)
1123
{
1124
int ret;
1125
1126
ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
1127
if (ret) {
1128
pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
1129
__func__, ret, pe->phb->global_number, pe->addr);
1130
return ret;
1131
}
1132
1133
ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
1134
if (ret) {
1135
pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
1136
__func__, ret, pe->phb->global_number, pe->addr);
1137
return ret;
1138
}
1139
1140
return ret;
1141
}
1142
EXPORT_SYMBOL_GPL(eeh_unfreeze_pe);
1143
1144
1145
static struct pci_device_id eeh_reset_ids[] = {
1146
{ PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
1147
{ PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
1148
{ PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
1149
{ 0 }
1150
};
1151
1152
static int eeh_pe_change_owner(struct eeh_pe *pe)
1153
{
1154
struct eeh_dev *edev, *tmp;
1155
struct pci_dev *pdev;
1156
struct pci_device_id *id;
1157
int ret;
1158
1159
/* Check PE state */
1160
ret = eeh_ops->get_state(pe, NULL);
1161
if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
1162
return 0;
1163
1164
/* Unfrozen PE, nothing to do */
1165
if (eeh_state_active(ret))
1166
return 0;
1167
1168
/* Frozen PE, check if it needs PE level reset */
1169
eeh_pe_for_each_dev(pe, edev, tmp) {
1170
pdev = eeh_dev_to_pci_dev(edev);
1171
if (!pdev)
1172
continue;
1173
1174
for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
1175
if (id->vendor != PCI_ANY_ID &&
1176
id->vendor != pdev->vendor)
1177
continue;
1178
if (id->device != PCI_ANY_ID &&
1179
id->device != pdev->device)
1180
continue;
1181
if (id->subvendor != PCI_ANY_ID &&
1182
id->subvendor != pdev->subsystem_vendor)
1183
continue;
1184
if (id->subdevice != PCI_ANY_ID &&
1185
id->subdevice != pdev->subsystem_device)
1186
continue;
1187
1188
return eeh_pe_reset_and_recover(pe);
1189
}
1190
}
1191
1192
ret = eeh_unfreeze_pe(pe);
1193
if (!ret)
1194
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1195
return ret;
1196
}
1197
1198
/**
1199
* eeh_dev_open - Increase count of pass through devices for PE
1200
* @pdev: PCI device
1201
*
1202
* Increase count of passed through devices for the indicated
1203
* PE. In the result, the EEH errors detected on the PE won't be
1204
* reported. The PE owner will be responsible for detection
1205
* and recovery.
1206
*/
1207
int eeh_dev_open(struct pci_dev *pdev)
1208
{
1209
struct eeh_dev *edev;
1210
int ret = -ENODEV;
1211
1212
guard(mutex)(&eeh_dev_mutex);
1213
1214
/* No PCI device ? */
1215
if (!pdev)
1216
return ret;
1217
1218
/* No EEH device or PE ? */
1219
edev = pci_dev_to_eeh_dev(pdev);
1220
if (!edev || !edev->pe)
1221
return ret;
1222
1223
/*
1224
* The PE might have been put into frozen state, but we
1225
* didn't detect that yet. The passed through PCI devices
1226
* in frozen PE won't work properly. Clear the frozen state
1227
* in advance.
1228
*/
1229
ret = eeh_pe_change_owner(edev->pe);
1230
if (ret)
1231
return ret;
1232
1233
/* Increase PE's pass through count */
1234
atomic_inc(&edev->pe->pass_dev_cnt);
1235
1236
return 0;
1237
}
1238
EXPORT_SYMBOL_GPL(eeh_dev_open);
1239
1240
/**
1241
* eeh_dev_release - Decrease count of pass through devices for PE
1242
* @pdev: PCI device
1243
*
1244
* Decrease count of pass through devices for the indicated PE. If
1245
* there is no passed through device in PE, the EEH errors detected
1246
* on the PE will be reported and handled as usual.
1247
*/
1248
void eeh_dev_release(struct pci_dev *pdev)
1249
{
1250
struct eeh_dev *edev;
1251
1252
guard(mutex)(&eeh_dev_mutex);
1253
1254
/* No PCI device ? */
1255
if (!pdev)
1256
return;
1257
1258
/* No EEH device ? */
1259
edev = pci_dev_to_eeh_dev(pdev);
1260
if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
1261
return;
1262
1263
/* Decrease PE's pass through count */
1264
WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0);
1265
eeh_pe_change_owner(edev->pe);
1266
}
1267
EXPORT_SYMBOL(eeh_dev_release);
1268
1269
#ifdef CONFIG_IOMMU_API
1270
1271
/**
1272
* eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE
1273
* @group: IOMMU group
1274
*
1275
* The routine is called to convert IOMMU group to EEH PE.
1276
*/
1277
struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group)
1278
{
1279
struct pci_dev *pdev = NULL;
1280
struct eeh_dev *edev;
1281
int ret;
1282
1283
/* No IOMMU group ? */
1284
if (!group)
1285
return NULL;
1286
1287
ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table);
1288
if (!ret || !pdev)
1289
return NULL;
1290
1291
/* No EEH device or PE ? */
1292
edev = pci_dev_to_eeh_dev(pdev);
1293
if (!edev || !edev->pe)
1294
return NULL;
1295
1296
return edev->pe;
1297
}
1298
EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe);
1299
1300
#endif /* CONFIG_IOMMU_API */
1301
1302
/**
1303
* eeh_pe_set_option - Set options for the indicated PE
1304
* @pe: EEH PE
1305
* @option: requested option
1306
*
1307
* The routine is called to enable or disable EEH functionality
1308
* on the indicated PE, to enable IO or DMA for the frozen PE.
1309
*/
1310
int eeh_pe_set_option(struct eeh_pe *pe, int option)
1311
{
1312
int ret = 0;
1313
1314
/* Invalid PE ? */
1315
if (!pe)
1316
return -ENODEV;
1317
1318
/*
1319
* EEH functionality could possibly be disabled, just
1320
* return error for the case. And the EEH functionality
1321
* isn't expected to be disabled on one specific PE.
1322
*/
1323
switch (option) {
1324
case EEH_OPT_ENABLE:
1325
if (eeh_enabled()) {
1326
ret = eeh_pe_change_owner(pe);
1327
break;
1328
}
1329
ret = -EIO;
1330
break;
1331
case EEH_OPT_DISABLE:
1332
break;
1333
case EEH_OPT_THAW_MMIO:
1334
case EEH_OPT_THAW_DMA:
1335
case EEH_OPT_FREEZE_PE:
1336
if (!eeh_ops || !eeh_ops->set_option) {
1337
ret = -ENOENT;
1338
break;
1339
}
1340
1341
ret = eeh_pci_enable(pe, option);
1342
break;
1343
default:
1344
pr_debug("%s: Option %d out of range (%d, %d)\n",
1345
__func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA);
1346
ret = -EINVAL;
1347
}
1348
1349
return ret;
1350
}
1351
EXPORT_SYMBOL_GPL(eeh_pe_set_option);
1352
1353
/**
1354
* eeh_pe_get_state - Retrieve PE's state
1355
* @pe: EEH PE
1356
*
1357
* Retrieve the PE's state, which includes 3 aspects: enabled
1358
* DMA, enabled IO and asserted reset.
1359
*/
1360
int eeh_pe_get_state(struct eeh_pe *pe)
1361
{
1362
int result, ret = 0;
1363
bool rst_active, dma_en, mmio_en;
1364
1365
/* Existing PE ? */
1366
if (!pe)
1367
return -ENODEV;
1368
1369
if (!eeh_ops || !eeh_ops->get_state)
1370
return -ENOENT;
1371
1372
/*
1373
* If the parent PE is owned by the host kernel and is undergoing
1374
* error recovery, we should return the PE state as temporarily
1375
* unavailable so that the error recovery on the guest is suspended
1376
* until the recovery completes on the host.
1377
*/
1378
if (pe->parent &&
1379
!(pe->state & EEH_PE_REMOVED) &&
1380
(pe->parent->state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING)))
1381
return EEH_PE_STATE_UNAVAIL;
1382
1383
result = eeh_ops->get_state(pe, NULL);
1384
rst_active = !!(result & EEH_STATE_RESET_ACTIVE);
1385
dma_en = !!(result & EEH_STATE_DMA_ENABLED);
1386
mmio_en = !!(result & EEH_STATE_MMIO_ENABLED);
1387
1388
if (rst_active)
1389
ret = EEH_PE_STATE_RESET;
1390
else if (dma_en && mmio_en)
1391
ret = EEH_PE_STATE_NORMAL;
1392
else if (!dma_en && !mmio_en)
1393
ret = EEH_PE_STATE_STOPPED_IO_DMA;
1394
else if (!dma_en && mmio_en)
1395
ret = EEH_PE_STATE_STOPPED_DMA;
1396
else
1397
ret = EEH_PE_STATE_UNAVAIL;
1398
1399
return ret;
1400
}
1401
EXPORT_SYMBOL_GPL(eeh_pe_get_state);
1402
1403
static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed)
1404
{
1405
struct eeh_dev *edev, *tmp;
1406
struct pci_dev *pdev;
1407
int ret = 0;
1408
1409
eeh_pe_restore_bars(pe);
1410
1411
/*
1412
* Reenable PCI devices as the devices passed
1413
* through are always enabled before the reset.
1414
*/
1415
eeh_pe_for_each_dev(pe, edev, tmp) {
1416
pdev = eeh_dev_to_pci_dev(edev);
1417
if (!pdev)
1418
continue;
1419
1420
ret = pci_reenable_device(pdev);
1421
if (ret) {
1422
pr_warn("%s: Failure %d reenabling %s\n",
1423
__func__, ret, pci_name(pdev));
1424
return ret;
1425
}
1426
}
1427
1428
/* The PE is still in frozen state */
1429
if (include_passed || !eeh_pe_passed(pe)) {
1430
ret = eeh_unfreeze_pe(pe);
1431
} else
1432
pr_info("EEH: Note: Leaving passthrough PHB#%x-PE#%x frozen.\n",
1433
pe->phb->global_number, pe->addr);
1434
if (!ret)
1435
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, include_passed);
1436
return ret;
1437
}
1438
1439
1440
/**
1441
* eeh_pe_reset - Issue PE reset according to specified type
1442
* @pe: EEH PE
1443
* @option: reset type
1444
* @include_passed: include passed-through devices?
1445
*
1446
* The routine is called to reset the specified PE with the
1447
* indicated type, either fundamental reset or hot reset.
1448
* PE reset is the most important part for error recovery.
1449
*/
1450
int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed)
1451
{
1452
int ret = 0;
1453
1454
/* Invalid PE ? */
1455
if (!pe)
1456
return -ENODEV;
1457
1458
if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset)
1459
return -ENOENT;
1460
1461
switch (option) {
1462
case EEH_RESET_DEACTIVATE:
1463
ret = eeh_ops->reset(pe, option);
1464
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, include_passed);
1465
if (ret)
1466
break;
1467
1468
ret = eeh_pe_reenable_devices(pe, include_passed);
1469
break;
1470
case EEH_RESET_HOT:
1471
case EEH_RESET_FUNDAMENTAL:
1472
/*
1473
* Proactively freeze the PE to drop all MMIO access
1474
* during reset, which should be banned as it's always
1475
* cause recursive EEH error.
1476
*/
1477
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
1478
1479
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
1480
ret = eeh_ops->reset(pe, option);
1481
break;
1482
default:
1483
pr_debug("%s: Unsupported option %d\n",
1484
__func__, option);
1485
ret = -EINVAL;
1486
}
1487
1488
return ret;
1489
}
1490
EXPORT_SYMBOL_GPL(eeh_pe_reset);
1491
1492
/**
1493
* eeh_pe_configure - Configure PCI bridges after PE reset
1494
* @pe: EEH PE
1495
*
1496
* The routine is called to restore the PCI config space for
1497
* those PCI devices, especially PCI bridges affected by PE
1498
* reset issued previously.
1499
*/
1500
int eeh_pe_configure(struct eeh_pe *pe)
1501
{
1502
int ret = 0;
1503
1504
/* Invalid PE ? */
1505
if (!pe)
1506
return -ENODEV;
1507
else
1508
ret = eeh_ops->configure_bridge(pe);
1509
1510
return ret;
1511
}
1512
EXPORT_SYMBOL_GPL(eeh_pe_configure);
1513
1514
/**
1515
* eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE
1516
* @pe: the indicated PE
1517
* @type: error type
1518
* @func: error function
1519
* @addr: address
1520
* @mask: address mask
1521
*
1522
* The routine is called to inject the specified PCI error, which
1523
* is determined by @type and @func, to the indicated PE for
1524
* testing purpose.
1525
*/
1526
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
1527
unsigned long addr, unsigned long mask)
1528
{
1529
/* Invalid PE ? */
1530
if (!pe)
1531
return -ENODEV;
1532
1533
/* Unsupported operation ? */
1534
if (!eeh_ops || !eeh_ops->err_inject)
1535
return -ENOENT;
1536
1537
/* Check on PCI error function */
1538
if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX)
1539
return -EINVAL;
1540
1541
return eeh_ops->err_inject(pe, type, func, addr, mask);
1542
}
1543
EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
1544
1545
#ifdef CONFIG_PROC_FS
1546
static int proc_eeh_show(struct seq_file *m, void *v)
1547
{
1548
if (!eeh_enabled()) {
1549
seq_printf(m, "EEH Subsystem is globally disabled\n");
1550
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1551
} else {
1552
seq_printf(m, "EEH Subsystem is enabled\n");
1553
seq_printf(m,
1554
"no device=%llu\n"
1555
"no device node=%llu\n"
1556
"no config address=%llu\n"
1557
"check not wanted=%llu\n"
1558
"eeh_total_mmio_ffs=%llu\n"
1559
"eeh_false_positives=%llu\n"
1560
"eeh_slot_resets=%llu\n",
1561
eeh_stats.no_device,
1562
eeh_stats.no_dn,
1563
eeh_stats.no_cfg_addr,
1564
eeh_stats.ignored_check,
1565
eeh_stats.total_mmio_ffs,
1566
eeh_stats.false_positives,
1567
eeh_stats.slot_resets);
1568
}
1569
1570
return 0;
1571
}
1572
#endif /* CONFIG_PROC_FS */
1573
1574
static int eeh_break_device(struct pci_dev *pdev)
1575
{
1576
struct resource *bar = NULL;
1577
void __iomem *mapped;
1578
u16 old, bit;
1579
int i, pos;
1580
1581
/* Do we have an MMIO BAR to disable? */
1582
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
1583
struct resource *r = &pdev->resource[i];
1584
1585
if (!r->flags || !r->start)
1586
continue;
1587
if (r->flags & IORESOURCE_IO)
1588
continue;
1589
if (r->flags & IORESOURCE_UNSET)
1590
continue;
1591
1592
bar = r;
1593
break;
1594
}
1595
1596
if (!bar) {
1597
pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
1598
return -ENXIO;
1599
}
1600
1601
pci_err(pdev, "Going to break: %pR\n", bar);
1602
1603
if (pdev->is_virtfn) {
1604
#ifndef CONFIG_PCI_IOV
1605
return -ENXIO;
1606
#else
1607
/*
1608
* VFs don't have a per-function COMMAND register, so the best
1609
* we can do is clear the Memory Space Enable bit in the PF's
1610
* SRIOV control reg.
1611
*
1612
* Unfortunately, this requires that we have a PF (i.e doesn't
1613
* work for a passed-through VF) and it has the potential side
1614
* effect of also causing an EEH on every other VF under the
1615
* PF. Oh well.
1616
*/
1617
pdev = pdev->physfn;
1618
if (!pdev)
1619
return -ENXIO; /* passed through VFs have no PF */
1620
1621
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1622
pos += PCI_SRIOV_CTRL;
1623
bit = PCI_SRIOV_CTRL_MSE;
1624
#endif /* !CONFIG_PCI_IOV */
1625
} else {
1626
bit = PCI_COMMAND_MEMORY;
1627
pos = PCI_COMMAND;
1628
}
1629
1630
/*
1631
* Process here is:
1632
*
1633
* 1. Disable Memory space.
1634
*
1635
* 2. Perform an MMIO to the device. This should result in an error
1636
* (CA / UR) being raised by the device which results in an EEH
1637
* PE freeze. Using the in_8() accessor skips the eeh detection hook
1638
* so the freeze hook so the EEH Detection machinery won't be
1639
* triggered here. This is to match the usual behaviour of EEH
1640
* where the HW will asynchronously freeze a PE and it's up to
1641
* the kernel to notice and deal with it.
1642
*
1643
* 3. Turn Memory space back on. This is more important for VFs
1644
* since recovery will probably fail if we don't. For normal
1645
* the COMMAND register is reset as a part of re-initialising
1646
* the device.
1647
*
1648
* Breaking stuff is the point so who cares if it's racy ;)
1649
*/
1650
pci_read_config_word(pdev, pos, &old);
1651
1652
mapped = ioremap(bar->start, PAGE_SIZE);
1653
if (!mapped) {
1654
pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
1655
return -ENXIO;
1656
}
1657
1658
pci_write_config_word(pdev, pos, old & ~bit);
1659
in_8(mapped);
1660
pci_write_config_word(pdev, pos, old);
1661
1662
iounmap(mapped);
1663
1664
return 0;
1665
}
1666
1667
int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
1668
{
1669
return eeh_break_device(pdev);
1670
}
1671
1672
#ifdef CONFIG_DEBUG_FS
1673
1674
1675
static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp,
1676
const char __user *user_buf,
1677
size_t count, loff_t *ppos)
1678
{
1679
uint32_t domain, bus, dev, fn;
1680
struct pci_dev *pdev;
1681
char buf[20];
1682
int ret;
1683
1684
memset(buf, 0, sizeof(buf));
1685
ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
1686
if (!ret)
1687
return ERR_PTR(-EFAULT);
1688
1689
ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
1690
if (ret != 4) {
1691
pr_err("%s: expected 4 args, got %d\n", __func__, ret);
1692
return ERR_PTR(-EINVAL);
1693
}
1694
1695
pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
1696
if (!pdev)
1697
return ERR_PTR(-ENODEV);
1698
1699
return pdev;
1700
}
1701
1702
static int eeh_enable_dbgfs_set(void *data, u64 val)
1703
{
1704
if (val)
1705
eeh_clear_flag(EEH_FORCE_DISABLED);
1706
else
1707
eeh_add_flag(EEH_FORCE_DISABLED);
1708
1709
return 0;
1710
}
1711
1712
static int eeh_enable_dbgfs_get(void *data, u64 *val)
1713
{
1714
if (eeh_enabled())
1715
*val = 0x1ul;
1716
else
1717
*val = 0x0ul;
1718
return 0;
1719
}
1720
1721
DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
1722
eeh_enable_dbgfs_set, "0x%llx\n");
1723
1724
static ssize_t eeh_force_recover_write(struct file *filp,
1725
const char __user *user_buf,
1726
size_t count, loff_t *ppos)
1727
{
1728
struct pci_controller *hose;
1729
uint32_t phbid, pe_no;
1730
struct eeh_pe *pe;
1731
char buf[20];
1732
int ret;
1733
1734
ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
1735
if (!ret)
1736
return -EFAULT;
1737
1738
/*
1739
* When PE is NULL the event is a "special" event. Rather than
1740
* recovering a specific PE it forces the EEH core to scan for failed
1741
* PHBs and recovers each. This needs to be done before any device
1742
* recoveries can occur.
1743
*/
1744
if (!strncmp(buf, "hwcheck", 7)) {
1745
__eeh_send_failure_event(NULL);
1746
return count;
1747
}
1748
1749
ret = sscanf(buf, "%x:%x", &phbid, &pe_no);
1750
if (ret != 2)
1751
return -EINVAL;
1752
1753
hose = pci_find_controller_for_domain(phbid);
1754
if (!hose)
1755
return -ENODEV;
1756
1757
/* Retrieve PE */
1758
pe = eeh_pe_get(hose, pe_no);
1759
if (!pe)
1760
return -ENODEV;
1761
1762
/*
1763
* We don't do any state checking here since the detection
1764
* process is async to the recovery process. The recovery
1765
* thread *should* not break even if we schedule a recovery
1766
* from an odd state (e.g. PE removed, or recovery of a
1767
* non-isolated PE)
1768
*/
1769
__eeh_send_failure_event(pe);
1770
1771
return ret < 0 ? ret : count;
1772
}
1773
1774
static const struct file_operations eeh_force_recover_fops = {
1775
.open = simple_open,
1776
.write = eeh_force_recover_write,
1777
};
1778
1779
static ssize_t eeh_debugfs_dev_usage(struct file *filp,
1780
char __user *user_buf,
1781
size_t count, loff_t *ppos)
1782
{
1783
static const char usage[] = "input format: <domain>:<bus>:<dev>.<fn>\n";
1784
1785
return simple_read_from_buffer(user_buf, count, ppos,
1786
usage, sizeof(usage) - 1);
1787
}
1788
1789
static ssize_t eeh_dev_check_write(struct file *filp,
1790
const char __user *user_buf,
1791
size_t count, loff_t *ppos)
1792
{
1793
struct pci_dev *pdev;
1794
struct eeh_dev *edev;
1795
int ret;
1796
1797
pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
1798
if (IS_ERR(pdev))
1799
return PTR_ERR(pdev);
1800
1801
edev = pci_dev_to_eeh_dev(pdev);
1802
if (!edev) {
1803
pci_err(pdev, "No eeh_dev for this device!\n");
1804
pci_dev_put(pdev);
1805
return -ENODEV;
1806
}
1807
1808
ret = eeh_dev_check_failure(edev);
1809
pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n",
1810
pci_name(pdev), ret);
1811
1812
pci_dev_put(pdev);
1813
1814
return count;
1815
}
1816
1817
static const struct file_operations eeh_dev_check_fops = {
1818
.open = simple_open,
1819
.write = eeh_dev_check_write,
1820
.read = eeh_debugfs_dev_usage,
1821
};
1822
1823
static ssize_t eeh_dev_break_write(struct file *filp,
1824
const char __user *user_buf,
1825
size_t count, loff_t *ppos)
1826
{
1827
struct pci_dev *pdev;
1828
int ret;
1829
1830
pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
1831
if (IS_ERR(pdev))
1832
return PTR_ERR(pdev);
1833
1834
ret = eeh_break_device(pdev);
1835
pci_dev_put(pdev);
1836
1837
if (ret < 0)
1838
return ret;
1839
1840
return count;
1841
}
1842
1843
static const struct file_operations eeh_dev_break_fops = {
1844
.open = simple_open,
1845
.write = eeh_dev_break_write,
1846
.read = eeh_debugfs_dev_usage,
1847
};
1848
1849
static ssize_t eeh_dev_can_recover(struct file *filp,
1850
const char __user *user_buf,
1851
size_t count, loff_t *ppos)
1852
{
1853
struct pci_driver *drv;
1854
struct pci_dev *pdev;
1855
size_t ret;
1856
1857
pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
1858
if (IS_ERR(pdev))
1859
return PTR_ERR(pdev);
1860
1861
/*
1862
* In order for error recovery to work the driver needs to implement
1863
* .error_detected(), so it can quiesce IO to the device, and
1864
* .slot_reset() so it can re-initialise the device after a reset.
1865
*
1866
* Ideally they'd implement .resume() too, but some drivers which
1867
* we need to support (notably IPR) don't so I guess we can tolerate
1868
* that.
1869
*
1870
* .mmio_enabled() is mostly there as a work-around for devices which
1871
* take forever to re-init after a hot reset. Implementing that is
1872
* strictly optional.
1873
*/
1874
drv = pci_dev_driver(pdev);
1875
if (drv &&
1876
drv->err_handler &&
1877
drv->err_handler->error_detected &&
1878
drv->err_handler->slot_reset) {
1879
ret = count;
1880
} else {
1881
ret = -EOPNOTSUPP;
1882
}
1883
1884
pci_dev_put(pdev);
1885
1886
return ret;
1887
}
1888
1889
static const struct file_operations eeh_dev_can_recover_fops = {
1890
.open = simple_open,
1891
.write = eeh_dev_can_recover,
1892
.read = eeh_debugfs_dev_usage,
1893
};
1894
1895
#endif
1896
1897
static int __init eeh_init_proc(void)
1898
{
1899
if (machine_is(pseries) || machine_is(powernv)) {
1900
proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
1901
#ifdef CONFIG_DEBUG_FS
1902
debugfs_create_file_unsafe("eeh_enable", 0600,
1903
arch_debugfs_dir, NULL,
1904
&eeh_enable_dbgfs_ops);
1905
debugfs_create_u32("eeh_max_freezes", 0600,
1906
arch_debugfs_dir, &eeh_max_freezes);
1907
debugfs_create_bool("eeh_disable_recovery", 0600,
1908
arch_debugfs_dir,
1909
&eeh_debugfs_no_recover);
1910
debugfs_create_file_unsafe("eeh_dev_check", 0600,
1911
arch_debugfs_dir, NULL,
1912
&eeh_dev_check_fops);
1913
debugfs_create_file_unsafe("eeh_dev_break", 0600,
1914
arch_debugfs_dir, NULL,
1915
&eeh_dev_break_fops);
1916
debugfs_create_file_unsafe("eeh_force_recover", 0600,
1917
arch_debugfs_dir, NULL,
1918
&eeh_force_recover_fops);
1919
debugfs_create_file_unsafe("eeh_dev_can_recover", 0600,
1920
arch_debugfs_dir, NULL,
1921
&eeh_dev_can_recover_fops);
1922
eeh_cache_debugfs_init();
1923
#endif
1924
}
1925
1926
return 0;
1927
}
1928
__initcall(eeh_init_proc);
1929
1930