Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/sysdev/fsl_pci.c
26486 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* MPC83xx/85xx/86xx PCI/PCIE support routing.
4
*
5
* Copyright 2007-2012 Freescale Semiconductor, Inc.
6
* Copyright 2008-2009 MontaVista Software, Inc.
7
*
8
* Initial author: Xianghua Xiao <[email protected]>
9
* Recode: ZHANG WEI <[email protected]>
10
* Rewrite the routing for Frescale PCI and PCI Express
11
* Roy Zang <[email protected]>
12
* MPC83xx PCI-Express support:
13
* Tony Li <[email protected]>
14
* Anton Vorontsov <[email protected]>
15
*/
16
#include <linux/kernel.h>
17
#include <linux/pci.h>
18
#include <linux/delay.h>
19
#include <linux/string.h>
20
#include <linux/fsl/edac.h>
21
#include <linux/init.h>
22
#include <linux/interrupt.h>
23
#include <linux/memblock.h>
24
#include <linux/log2.h>
25
#include <linux/of_address.h>
26
#include <linux/of_irq.h>
27
#include <linux/platform_device.h>
28
#include <linux/slab.h>
29
#include <linux/suspend.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/uaccess.h>
32
33
#include <asm/io.h>
34
#include <asm/pci-bridge.h>
35
#include <asm/ppc-pci.h>
36
#include <asm/machdep.h>
37
#include <asm/mpc85xx.h>
38
#include <asm/disassemble.h>
39
#include <asm/ppc-opcode.h>
40
#include <asm/swiotlb.h>
41
#include <asm/setup.h>
42
#include <sysdev/fsl_soc.h>
43
#include <sysdev/fsl_pci.h>
44
45
static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
46
47
static void quirk_fsl_pcie_early(struct pci_dev *dev)
48
{
49
u8 hdr_type;
50
51
/* if we aren't a PCIe don't bother */
52
if (!pci_is_pcie(dev))
53
return;
54
55
/* if we aren't in host mode don't bother */
56
pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
57
if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
58
return;
59
60
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
61
fsl_pcie_bus_fixup = 1;
62
return;
63
}
64
65
static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
66
int, int, u32 *);
67
68
static int fsl_pcie_check_link(struct pci_controller *hose)
69
{
70
u32 val = 0;
71
72
if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
73
if (hose->ops->read == fsl_indirect_read_config)
74
__indirect_read_config(hose, hose->first_busno, 0,
75
PCIE_LTSSM, 4, &val);
76
else
77
early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
78
if (val < PCIE_LTSSM_L0)
79
return 1;
80
} else {
81
struct ccsr_pci __iomem *pci = hose->private_data;
82
/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
83
val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
84
>> PEX_CSR0_LTSSM_SHIFT;
85
if (val != PEX_CSR0_LTSSM_L0)
86
return 1;
87
}
88
89
return 0;
90
}
91
92
static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
93
int offset, int len, u32 *val)
94
{
95
struct pci_controller *hose = pci_bus_to_host(bus);
96
97
if (fsl_pcie_check_link(hose))
98
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
99
else
100
hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
101
102
return indirect_read_config(bus, devfn, offset, len, val);
103
}
104
105
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
106
107
static struct pci_ops fsl_indirect_pcie_ops =
108
{
109
.read = fsl_indirect_read_config,
110
.write = indirect_write_config,
111
};
112
113
static u64 pci64_dma_offset;
114
115
#ifdef CONFIG_SWIOTLB
116
static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
117
{
118
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
119
120
pdev->dev.bus_dma_limit =
121
hose->dma_window_base_cur + hose->dma_window_size - 1;
122
}
123
124
static void setup_swiotlb_ops(struct pci_controller *hose)
125
{
126
if (ppc_swiotlb_enable)
127
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
128
}
129
#else
130
static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
131
#endif
132
133
static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
134
{
135
/*
136
* Fix up PCI devices that are able to DMA to the large inbound
137
* mapping that allows addressing any RAM address from across PCI.
138
*/
139
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
140
dev->bus_dma_limit = 0;
141
dev->archdata.dma_offset = pci64_dma_offset;
142
}
143
}
144
145
static int setup_one_atmu(struct ccsr_pci __iomem *pci,
146
unsigned int index, const struct resource *res,
147
resource_size_t offset)
148
{
149
resource_size_t pci_addr = res->start - offset;
150
resource_size_t phys_addr = res->start;
151
resource_size_t size = resource_size(res);
152
u32 flags = 0x80044000; /* enable & mem R/W */
153
unsigned int i;
154
155
pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
156
(u64)res->start, (u64)size);
157
158
if (res->flags & IORESOURCE_PREFETCH)
159
flags |= 0x10000000; /* enable relaxed ordering */
160
161
for (i = 0; size > 0; i++) {
162
unsigned int bits = min_t(u32, ilog2(size),
163
__ffs(pci_addr | phys_addr));
164
165
if (index + i >= 5)
166
return -1;
167
168
out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
169
out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
170
out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
171
out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
172
173
pci_addr += (resource_size_t)1U << bits;
174
phys_addr += (resource_size_t)1U << bits;
175
size -= (resource_size_t)1U << bits;
176
}
177
178
return i;
179
}
180
181
static bool is_kdump(void)
182
{
183
struct device_node *node;
184
bool ret;
185
186
node = of_find_node_by_type(NULL, "memory");
187
if (!node) {
188
WARN_ON_ONCE(1);
189
return false;
190
}
191
192
ret = of_property_read_bool(node, "linux,usable-memory");
193
of_node_put(node);
194
195
return ret;
196
}
197
198
/* atmu setup for fsl pci/pcie controller */
199
static void setup_pci_atmu(struct pci_controller *hose)
200
{
201
struct ccsr_pci __iomem *pci = hose->private_data;
202
int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
203
u64 mem, sz, paddr_hi = 0;
204
u64 offset = 0, paddr_lo = ULLONG_MAX;
205
u32 pcicsrbar = 0, pcicsrbar_sz;
206
u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
207
PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
208
const u64 *reg;
209
int len;
210
bool setup_inbound;
211
212
/*
213
* If this is kdump, we don't want to trigger a bunch of PCI
214
* errors by closing the window on in-flight DMA.
215
*
216
* We still run most of the function's logic so that things like
217
* hose->dma_window_size still get set.
218
*/
219
setup_inbound = !is_kdump();
220
221
if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
222
/*
223
* BSC9132 Rev1.0 has an issue where all the PEX inbound
224
* windows have implemented the default target value as 0xf
225
* for CCSR space.In all Freescale legacy devices the target
226
* of 0xf is reserved for local memory space. 9132 Rev1.0
227
* now has local memory space mapped to target 0x0 instead of
228
* 0xf. Hence adding a workaround to remove the target 0xf
229
* defined for memory space from Inbound window attributes.
230
*/
231
piwar &= ~PIWAR_TGI_LOCAL;
232
}
233
234
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
235
if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
236
win_idx = 2;
237
start_idx = 0;
238
end_idx = 3;
239
}
240
}
241
242
/* Disable all windows (except powar0 since it's ignored) */
243
for(i = 1; i < 5; i++)
244
out_be32(&pci->pow[i].powar, 0);
245
246
if (setup_inbound) {
247
for (i = start_idx; i < end_idx; i++)
248
out_be32(&pci->piw[i].piwar, 0);
249
}
250
251
/* Setup outbound MEM window */
252
for(i = 0, j = 1; i < 3; i++) {
253
if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
254
continue;
255
256
paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
257
paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
258
259
/* We assume all memory resources have the same offset */
260
offset = hose->mem_offset[i];
261
n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
262
263
if (n < 0 || j >= 5) {
264
pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
265
hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
266
} else
267
j += n;
268
}
269
270
/* Setup outbound IO window */
271
if (hose->io_resource.flags & IORESOURCE_IO) {
272
if (j >= 5) {
273
pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
274
} else {
275
pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
276
"phy base 0x%016llx.\n",
277
(u64)hose->io_resource.start,
278
(u64)resource_size(&hose->io_resource),
279
(u64)hose->io_base_phys);
280
out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
281
out_be32(&pci->pow[j].potear, 0);
282
out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
283
/* Enable, IO R/W */
284
out_be32(&pci->pow[j].powar, 0x80088000
285
| (ilog2(hose->io_resource.end
286
- hose->io_resource.start + 1) - 1));
287
}
288
}
289
290
/* convert to pci address space */
291
paddr_hi -= offset;
292
paddr_lo -= offset;
293
294
if (paddr_hi == paddr_lo) {
295
pr_err("%pOF: No outbound window space\n", hose->dn);
296
return;
297
}
298
299
if (paddr_lo == 0) {
300
pr_err("%pOF: No space for inbound window\n", hose->dn);
301
return;
302
}
303
304
/* setup PCSRBAR/PEXCSRBAR */
305
early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
306
early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
307
pcicsrbar_sz = ~pcicsrbar_sz + 1;
308
309
if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
310
(paddr_lo > 0x100000000ull))
311
pcicsrbar = 0x100000000ull - pcicsrbar_sz;
312
else
313
pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
314
early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
315
316
paddr_lo = min(paddr_lo, (u64)pcicsrbar);
317
318
pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
319
320
/* Setup inbound mem window */
321
mem = memblock_end_of_DRAM();
322
pr_info("%s: end of DRAM %llx\n", __func__, mem);
323
324
/*
325
* The msi-address-64 property, if it exists, indicates the physical
326
* address of the MSIIR register. Normally, this register is located
327
* inside CCSR, so the ATMU that covers all of CCSR is used. But if
328
* this property exists, then we normally need to create a new ATMU
329
* for it. For now, however, we cheat. The only entity that creates
330
* this property is the Freescale hypervisor, and the address is
331
* specified in the partition configuration. Typically, the address
332
* is located in the page immediately after the end of DDR. If so, we
333
* can avoid allocating a new ATMU by extending the DDR ATMU by one
334
* page.
335
*/
336
reg = of_get_property(hose->dn, "msi-address-64", &len);
337
if (reg && (len == sizeof(u64))) {
338
u64 address = be64_to_cpup(reg);
339
340
if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
341
pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
342
mem += PAGE_SIZE;
343
} else {
344
/* TODO: Create a new ATMU for MSIIR */
345
pr_warn("%pOF: msi-address-64 address of %llx is "
346
"unsupported\n", hose->dn, address);
347
}
348
}
349
350
sz = min(mem, paddr_lo);
351
mem_log = ilog2(sz);
352
353
/* PCIe can overmap inbound & outbound since RX & TX are separated */
354
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
355
/* Size window to exact size if power-of-two or one size up */
356
if ((1ull << mem_log) != mem) {
357
mem_log++;
358
if ((1ull << mem_log) > mem)
359
pr_info("%pOF: Setting PCI inbound window "
360
"greater than memory size\n", hose->dn);
361
}
362
363
piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
364
365
if (setup_inbound) {
366
/* Setup inbound memory window */
367
out_be32(&pci->piw[win_idx].pitar, 0x00000000);
368
out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
369
out_be32(&pci->piw[win_idx].piwar, piwar);
370
}
371
372
win_idx--;
373
hose->dma_window_base_cur = 0x00000000;
374
hose->dma_window_size = (resource_size_t)sz;
375
376
/*
377
* if we have >4G of memory setup second PCI inbound window to
378
* let devices that are 64-bit address capable to work w/o
379
* SWIOTLB and access the full range of memory
380
*/
381
if (sz != mem) {
382
mem_log = ilog2(mem);
383
384
/* Size window up if we dont fit in exact power-of-2 */
385
if ((1ull << mem_log) != mem)
386
mem_log++;
387
388
piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
389
pci64_dma_offset = 1ULL << mem_log;
390
391
if (setup_inbound) {
392
/* Setup inbound memory window */
393
out_be32(&pci->piw[win_idx].pitar, 0x00000000);
394
out_be32(&pci->piw[win_idx].piwbear,
395
pci64_dma_offset >> 44);
396
out_be32(&pci->piw[win_idx].piwbar,
397
pci64_dma_offset >> 12);
398
out_be32(&pci->piw[win_idx].piwar, piwar);
399
}
400
401
/*
402
* install our own dma_set_mask handler to fixup dma_ops
403
* and dma_offset
404
*/
405
ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
406
407
pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
408
}
409
} else {
410
u64 paddr = 0;
411
412
if (setup_inbound) {
413
/* Setup inbound memory window */
414
out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
415
out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
416
out_be32(&pci->piw[win_idx].piwar,
417
(piwar | (mem_log - 1)));
418
}
419
420
win_idx--;
421
paddr += 1ull << mem_log;
422
sz -= 1ull << mem_log;
423
424
if (sz) {
425
mem_log = ilog2(sz);
426
piwar |= (mem_log - 1);
427
428
if (setup_inbound) {
429
out_be32(&pci->piw[win_idx].pitar,
430
paddr >> 12);
431
out_be32(&pci->piw[win_idx].piwbar,
432
paddr >> 12);
433
out_be32(&pci->piw[win_idx].piwar, piwar);
434
}
435
436
win_idx--;
437
paddr += 1ull << mem_log;
438
}
439
440
hose->dma_window_base_cur = 0x00000000;
441
hose->dma_window_size = (resource_size_t)paddr;
442
}
443
444
if (hose->dma_window_size < mem) {
445
#ifdef CONFIG_SWIOTLB
446
ppc_swiotlb_enable = 1;
447
#else
448
pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
449
"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
450
hose->dn);
451
#endif
452
/* adjusting outbound windows could reclaim space in mem map */
453
if (paddr_hi < 0xffffffffull)
454
pr_warn("%pOF: WARNING: Outbound window cfg leaves "
455
"gaps in memory map. Adjusting the memory map "
456
"could reduce unnecessary bounce buffering.\n",
457
hose->dn);
458
459
pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
460
(u64)hose->dma_window_size);
461
}
462
}
463
464
static void setup_pci_cmd(struct pci_controller *hose)
465
{
466
u16 cmd;
467
int cap_x;
468
469
early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
470
cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
471
| PCI_COMMAND_IO;
472
early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
473
474
cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
475
if (cap_x) {
476
int pci_x_cmd = cap_x + PCI_X_CMD;
477
cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
478
| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
479
early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
480
} else {
481
early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
482
}
483
}
484
485
void fsl_pcibios_fixup_bus(struct pci_bus *bus)
486
{
487
struct pci_controller *hose = pci_bus_to_host(bus);
488
int i, is_pcie = 0, no_link;
489
490
/* The root complex bridge comes up with bogus resources,
491
* we copy the PHB ones in.
492
*
493
* With the current generic PCI code, the PHB bus no longer
494
* has bus->resource[0..4] set, so things are a bit more
495
* tricky.
496
*/
497
498
if (fsl_pcie_bus_fixup)
499
is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
500
no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
501
502
if (bus->parent == hose->bus && (is_pcie || no_link)) {
503
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
504
struct resource *res = bus->resource[i];
505
struct resource *par;
506
507
if (!res)
508
continue;
509
if (i == 0)
510
par = &hose->io_resource;
511
else if (i < 4)
512
par = &hose->mem_resources[i-1];
513
else par = NULL;
514
515
res->start = par ? par->start : 0;
516
res->end = par ? par->end : 0;
517
res->flags = par ? par->flags : 0;
518
}
519
}
520
}
521
522
static int fsl_add_bridge(struct platform_device *pdev, int is_primary)
523
{
524
int len;
525
struct pci_controller *hose;
526
struct resource rsrc;
527
const int *bus_range;
528
u8 hdr_type, progif;
529
u32 class_code;
530
struct device_node *dev;
531
struct ccsr_pci __iomem *pci;
532
u16 temp;
533
u32 svr = mfspr(SPRN_SVR);
534
535
dev = pdev->dev.of_node;
536
537
if (!of_device_is_available(dev)) {
538
pr_warn("%pOF: disabled\n", dev);
539
return -ENODEV;
540
}
541
542
pr_debug("Adding PCI host bridge %pOF\n", dev);
543
544
/* Fetch host bridge registers address */
545
if (of_address_to_resource(dev, 0, &rsrc)) {
546
printk(KERN_WARNING "Can't get pci register base!");
547
return -ENOMEM;
548
}
549
550
/* Get bus range if any */
551
bus_range = of_get_property(dev, "bus-range", &len);
552
if (bus_range == NULL || len < 2 * sizeof(int))
553
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
554
" bus 0\n", dev);
555
556
pci_add_flags(PCI_REASSIGN_ALL_BUS);
557
hose = pcibios_alloc_controller(dev);
558
if (!hose)
559
return -ENOMEM;
560
561
/* set platform device as the parent */
562
hose->parent = &pdev->dev;
563
hose->first_busno = bus_range ? bus_range[0] : 0x0;
564
hose->last_busno = bus_range ? bus_range[1] : 0xff;
565
566
pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
567
(u64)rsrc.start, (u64)resource_size(&rsrc));
568
569
pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
570
if (!hose->private_data)
571
goto no_bridge;
572
573
setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
574
PPC_INDIRECT_TYPE_BIG_ENDIAN);
575
576
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
577
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
578
579
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
580
/* use fsl_indirect_read_config for PCIe */
581
hose->ops = &fsl_indirect_pcie_ops;
582
/* For PCIE read HEADER_TYPE to identify controller mode */
583
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
584
if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
585
goto no_bridge;
586
587
} else {
588
/* For PCI read PROG to identify controller mode */
589
early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
590
if ((progif & 1) &&
591
!of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
592
goto no_bridge;
593
}
594
595
setup_pci_cmd(hose);
596
597
/* check PCI express link status */
598
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
599
hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
600
PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
601
if (fsl_pcie_check_link(hose))
602
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
603
/* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
604
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
605
early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
606
class_code &= 0xff;
607
class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
608
early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
609
}
610
} else {
611
/*
612
* Set PBFR(PCI Bus Function Register)[10] = 1 to
613
* disable the combining of crossing cacheline
614
* boundary requests into one burst transaction.
615
* PCI-X operation is not affected.
616
* Fix erratum PCI 5 on MPC8548
617
*/
618
#define PCI_BUS_FUNCTION 0x44
619
#define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */
620
if (((SVR_SOC_VER(svr) == SVR_8543) ||
621
(SVR_SOC_VER(svr) == SVR_8545) ||
622
(SVR_SOC_VER(svr) == SVR_8547) ||
623
(SVR_SOC_VER(svr) == SVR_8548)) &&
624
!early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
625
early_read_config_word(hose, 0, 0,
626
PCI_BUS_FUNCTION, &temp);
627
temp |= PCI_BUS_FUNCTION_MDS;
628
early_write_config_word(hose, 0, 0,
629
PCI_BUS_FUNCTION, temp);
630
}
631
}
632
633
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
634
"Firmware bus number: %d->%d\n",
635
(unsigned long long)rsrc.start, hose->first_busno,
636
hose->last_busno);
637
638
pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
639
hose, hose->cfg_addr, hose->cfg_data);
640
641
/* Interpret the "ranges" property */
642
/* This also maps the I/O region and sets isa_io/mem_base */
643
pci_process_bridge_OF_ranges(hose, dev, is_primary);
644
645
/* Setup PEX window registers */
646
setup_pci_atmu(hose);
647
648
/* Set up controller operations */
649
setup_swiotlb_ops(hose);
650
651
return 0;
652
653
no_bridge:
654
iounmap(hose->private_data);
655
/* unmap cfg_data & cfg_addr separately if not on same page */
656
if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
657
((unsigned long)hose->cfg_addr & PAGE_MASK))
658
iounmap(hose->cfg_data);
659
iounmap(hose->cfg_addr);
660
pcibios_free_controller(hose);
661
return -ENODEV;
662
}
663
#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
664
665
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
666
quirk_fsl_pcie_early);
667
668
#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
669
struct mpc83xx_pcie_priv {
670
void __iomem *cfg_type0;
671
void __iomem *cfg_type1;
672
u32 dev_base;
673
};
674
675
struct pex_inbound_window {
676
u32 ar;
677
u32 tar;
678
u32 barl;
679
u32 barh;
680
};
681
682
/*
683
* With the convention of u-boot, the PCIE outbound window 0 serves
684
* as configuration transactions outbound.
685
*/
686
#define PEX_OUTWIN0_BAR 0xCA4
687
#define PEX_OUTWIN0_TAL 0xCA8
688
#define PEX_OUTWIN0_TAH 0xCAC
689
#define PEX_RC_INWIN_BASE 0xE60
690
#define PEX_RCIWARn_EN 0x1
691
692
static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
693
{
694
struct pci_controller *hose = pci_bus_to_host(bus);
695
696
if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
697
return PCIBIOS_DEVICE_NOT_FOUND;
698
/*
699
* Workaround for the HW bug: for Type 0 configure transactions the
700
* PCI-E controller does not check the device number bits and just
701
* assumes that the device number bits are 0.
702
*/
703
if (bus->number == hose->first_busno ||
704
bus->primary == hose->first_busno) {
705
if (devfn & 0xf8)
706
return PCIBIOS_DEVICE_NOT_FOUND;
707
}
708
709
if (ppc_md.pci_exclude_device) {
710
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
711
return PCIBIOS_DEVICE_NOT_FOUND;
712
}
713
714
return PCIBIOS_SUCCESSFUL;
715
}
716
717
static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
718
unsigned int devfn, int offset)
719
{
720
struct pci_controller *hose = pci_bus_to_host(bus);
721
struct mpc83xx_pcie_priv *pcie = hose->dn->data;
722
u32 dev_base = bus->number << 24 | devfn << 16;
723
int ret;
724
725
ret = mpc83xx_pcie_exclude_device(bus, devfn);
726
if (ret)
727
return NULL;
728
729
offset &= 0xfff;
730
731
/* Type 0 */
732
if (bus->number == hose->first_busno)
733
return pcie->cfg_type0 + offset;
734
735
if (pcie->dev_base == dev_base)
736
goto mapped;
737
738
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
739
740
pcie->dev_base = dev_base;
741
mapped:
742
return pcie->cfg_type1 + offset;
743
}
744
745
static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
746
int offset, int len, u32 val)
747
{
748
struct pci_controller *hose = pci_bus_to_host(bus);
749
750
/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
751
if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
752
val &= 0xffffff00;
753
754
return pci_generic_config_write(bus, devfn, offset, len, val);
755
}
756
757
static struct pci_ops mpc83xx_pcie_ops = {
758
.map_bus = mpc83xx_pcie_remap_cfg,
759
.read = pci_generic_config_read,
760
.write = mpc83xx_pcie_write_config,
761
};
762
763
static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
764
struct resource *reg)
765
{
766
struct mpc83xx_pcie_priv *pcie;
767
u32 cfg_bar;
768
int ret = -ENOMEM;
769
770
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
771
if (!pcie)
772
return ret;
773
774
pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
775
if (!pcie->cfg_type0)
776
goto err0;
777
778
cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
779
if (!cfg_bar) {
780
/* PCI-E isn't configured. */
781
ret = -ENODEV;
782
goto err1;
783
}
784
785
pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
786
if (!pcie->cfg_type1)
787
goto err1;
788
789
WARN_ON(hose->dn->data);
790
hose->dn->data = pcie;
791
hose->ops = &mpc83xx_pcie_ops;
792
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
793
794
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
795
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
796
797
if (fsl_pcie_check_link(hose))
798
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
799
800
return 0;
801
err1:
802
iounmap(pcie->cfg_type0);
803
err0:
804
kfree(pcie);
805
return ret;
806
807
}
808
809
int __init mpc83xx_add_bridge(struct device_node *dev)
810
{
811
int ret;
812
int len;
813
struct pci_controller *hose;
814
struct resource rsrc_reg;
815
struct resource rsrc_cfg;
816
const int *bus_range;
817
int primary;
818
819
is_mpc83xx_pci = 1;
820
821
if (!of_device_is_available(dev)) {
822
pr_warn("%pOF: disabled by the firmware.\n",
823
dev);
824
return -ENODEV;
825
}
826
pr_debug("Adding PCI host bridge %pOF\n", dev);
827
828
/* Fetch host bridge registers address */
829
if (of_address_to_resource(dev, 0, &rsrc_reg)) {
830
printk(KERN_WARNING "Can't get pci register base!\n");
831
return -ENOMEM;
832
}
833
834
memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
835
836
if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
837
printk(KERN_WARNING
838
"No pci config register base in dev tree, "
839
"using default\n");
840
/*
841
* MPC83xx supports up to two host controllers
842
* one at 0x8500 has config space registers at 0x8300
843
* one at 0x8600 has config space registers at 0x8380
844
*/
845
if ((rsrc_reg.start & 0xfffff) == 0x8500)
846
rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
847
else if ((rsrc_reg.start & 0xfffff) == 0x8600)
848
rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
849
}
850
/*
851
* Controller at offset 0x8500 is primary
852
*/
853
if ((rsrc_reg.start & 0xfffff) == 0x8500)
854
primary = 1;
855
else
856
primary = 0;
857
858
/* Get bus range if any */
859
bus_range = of_get_property(dev, "bus-range", &len);
860
if (bus_range == NULL || len < 2 * sizeof(int)) {
861
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
862
" bus 0\n", dev);
863
}
864
865
pci_add_flags(PCI_REASSIGN_ALL_BUS);
866
hose = pcibios_alloc_controller(dev);
867
if (!hose)
868
return -ENOMEM;
869
870
hose->first_busno = bus_range ? bus_range[0] : 0;
871
hose->last_busno = bus_range ? bus_range[1] : 0xff;
872
873
if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
874
ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
875
if (ret)
876
goto err0;
877
} else {
878
setup_indirect_pci(hose, rsrc_cfg.start,
879
rsrc_cfg.start + 4, 0);
880
}
881
882
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
883
"Firmware bus number: %d->%d\n",
884
(unsigned long long)rsrc_reg.start, hose->first_busno,
885
hose->last_busno);
886
887
pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
888
hose, hose->cfg_addr, hose->cfg_data);
889
890
/* Interpret the "ranges" property */
891
/* This also maps the I/O region and sets isa_io/mem_base */
892
pci_process_bridge_OF_ranges(hose, dev, primary);
893
894
return 0;
895
err0:
896
pcibios_free_controller(hose);
897
return ret;
898
}
899
#endif /* CONFIG_PPC_83xx */
900
901
u64 fsl_pci_immrbar_base(struct pci_controller *hose)
902
{
903
#ifdef CONFIG_PPC_83xx
904
if (is_mpc83xx_pci) {
905
struct mpc83xx_pcie_priv *pcie = hose->dn->data;
906
struct pex_inbound_window *in;
907
int i;
908
909
/* Walk the Root Complex Inbound windows to match IMMR base */
910
in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
911
for (i = 0; i < 4; i++) {
912
/* not enabled, skip */
913
if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
914
continue;
915
916
if (get_immrbase() == in_le32(&in[i].tar))
917
return (u64)in_le32(&in[i].barh) << 32 |
918
in_le32(&in[i].barl);
919
}
920
921
printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
922
}
923
#endif
924
925
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
926
if (!is_mpc83xx_pci) {
927
u32 base;
928
929
pci_bus_read_config_dword(hose->bus,
930
PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
931
932
/*
933
* For PEXCSRBAR, bit 3-0 indicate prefetchable and
934
* address type. So when getting base address, these
935
* bits should be masked
936
*/
937
base &= PCI_BASE_ADDRESS_MEM_MASK;
938
939
return base;
940
}
941
#endif
942
943
return 0;
944
}
945
946
#ifdef CONFIG_PPC_E500
947
static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
948
{
949
unsigned int rd, ra, rb, d;
950
951
rd = get_rt(inst);
952
ra = get_ra(inst);
953
rb = get_rb(inst);
954
d = get_d(inst);
955
956
switch (get_op(inst)) {
957
case 31:
958
switch (get_xop(inst)) {
959
case OP_31_XOP_LWZX:
960
case OP_31_XOP_LWBRX:
961
regs->gpr[rd] = 0xffffffff;
962
break;
963
964
case OP_31_XOP_LWZUX:
965
regs->gpr[rd] = 0xffffffff;
966
regs->gpr[ra] += regs->gpr[rb];
967
break;
968
969
case OP_31_XOP_LBZX:
970
regs->gpr[rd] = 0xff;
971
break;
972
973
case OP_31_XOP_LBZUX:
974
regs->gpr[rd] = 0xff;
975
regs->gpr[ra] += regs->gpr[rb];
976
break;
977
978
case OP_31_XOP_LHZX:
979
case OP_31_XOP_LHBRX:
980
regs->gpr[rd] = 0xffff;
981
break;
982
983
case OP_31_XOP_LHZUX:
984
regs->gpr[rd] = 0xffff;
985
regs->gpr[ra] += regs->gpr[rb];
986
break;
987
988
case OP_31_XOP_LHAX:
989
regs->gpr[rd] = ~0UL;
990
break;
991
992
case OP_31_XOP_LHAUX:
993
regs->gpr[rd] = ~0UL;
994
regs->gpr[ra] += regs->gpr[rb];
995
break;
996
997
default:
998
return 0;
999
}
1000
break;
1001
1002
case OP_LWZ:
1003
regs->gpr[rd] = 0xffffffff;
1004
break;
1005
1006
case OP_LWZU:
1007
regs->gpr[rd] = 0xffffffff;
1008
regs->gpr[ra] += (s16)d;
1009
break;
1010
1011
case OP_LBZ:
1012
regs->gpr[rd] = 0xff;
1013
break;
1014
1015
case OP_LBZU:
1016
regs->gpr[rd] = 0xff;
1017
regs->gpr[ra] += (s16)d;
1018
break;
1019
1020
case OP_LHZ:
1021
regs->gpr[rd] = 0xffff;
1022
break;
1023
1024
case OP_LHZU:
1025
regs->gpr[rd] = 0xffff;
1026
regs->gpr[ra] += (s16)d;
1027
break;
1028
1029
case OP_LHA:
1030
regs->gpr[rd] = ~0UL;
1031
break;
1032
1033
case OP_LHAU:
1034
regs->gpr[rd] = ~0UL;
1035
regs->gpr[ra] += (s16)d;
1036
break;
1037
1038
default:
1039
return 0;
1040
}
1041
1042
return 1;
1043
}
1044
1045
static int is_in_pci_mem_space(phys_addr_t addr)
1046
{
1047
struct pci_controller *hose;
1048
struct resource *res;
1049
int i;
1050
1051
list_for_each_entry(hose, &hose_list, list_node) {
1052
if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1053
continue;
1054
1055
for (i = 0; i < 3; i++) {
1056
res = &hose->mem_resources[i];
1057
if ((res->flags & IORESOURCE_MEM) &&
1058
addr >= res->start && addr <= res->end)
1059
return 1;
1060
}
1061
}
1062
return 0;
1063
}
1064
1065
int fsl_pci_mcheck_exception(struct pt_regs *regs)
1066
{
1067
u32 inst;
1068
int ret;
1069
phys_addr_t addr = 0;
1070
1071
/* Let KVM/QEMU deal with the exception */
1072
if (regs->msr & MSR_GS)
1073
return 0;
1074
1075
#ifdef CONFIG_PHYS_64BIT
1076
addr = mfspr(SPRN_MCARU);
1077
addr <<= 32;
1078
#endif
1079
addr += mfspr(SPRN_MCAR);
1080
1081
if (is_in_pci_mem_space(addr)) {
1082
if (user_mode(regs))
1083
ret = copy_from_user_nofault(&inst,
1084
(void __user *)regs->nip, sizeof(inst));
1085
else
1086
ret = get_kernel_nofault(inst, (void *)regs->nip);
1087
1088
if (!ret && mcheck_handle_load(regs, inst)) {
1089
regs_add_return_ip(regs, 4);
1090
return 1;
1091
}
1092
}
1093
1094
return 0;
1095
}
1096
#endif
1097
1098
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1099
static const struct of_device_id pci_ids[] = {
1100
{ .compatible = "fsl,mpc8540-pci", },
1101
{ .compatible = "fsl,mpc8548-pcie", },
1102
{ .compatible = "fsl,mpc8610-pci", },
1103
{ .compatible = "fsl,mpc8641-pcie", },
1104
{ .compatible = "fsl,qoriq-pcie", },
1105
{ .compatible = "fsl,qoriq-pcie-v2.1", },
1106
{ .compatible = "fsl,qoriq-pcie-v2.2", },
1107
{ .compatible = "fsl,qoriq-pcie-v2.3", },
1108
{ .compatible = "fsl,qoriq-pcie-v2.4", },
1109
{ .compatible = "fsl,qoriq-pcie-v3.0", },
1110
1111
/*
1112
* The following entries are for compatibility with older device
1113
* trees.
1114
*/
1115
{ .compatible = "fsl,p1022-pcie", },
1116
{ .compatible = "fsl,p4080-pcie", },
1117
1118
{},
1119
};
1120
1121
struct device_node *fsl_pci_primary;
1122
1123
void __init fsl_pci_assign_primary(void)
1124
{
1125
struct device_node *np;
1126
1127
/* Callers can specify the primary bus using other means. */
1128
if (fsl_pci_primary)
1129
return;
1130
1131
/* If a PCI host bridge contains an ISA node, it's primary. */
1132
np = of_find_node_by_type(NULL, "isa");
1133
while ((fsl_pci_primary = of_get_parent(np))) {
1134
of_node_put(np);
1135
np = fsl_pci_primary;
1136
1137
if (of_match_node(pci_ids, np) && of_device_is_available(np))
1138
return;
1139
}
1140
1141
/*
1142
* If there's no PCI host bridge with ISA then check for
1143
* PCI host bridge with alias "pci0" (first PCI host bridge).
1144
*/
1145
np = of_find_node_by_path("pci0");
1146
if (np && of_match_node(pci_ids, np) && of_device_is_available(np)) {
1147
fsl_pci_primary = np;
1148
of_node_put(np);
1149
return;
1150
}
1151
if (np)
1152
of_node_put(np);
1153
1154
/*
1155
* If there's no PCI host bridge with ISA, arbitrarily
1156
* designate one as primary. This can go away once
1157
* various bugs with primary-less systems are fixed.
1158
*/
1159
for_each_matching_node(np, pci_ids) {
1160
if (of_device_is_available(np)) {
1161
fsl_pci_primary = np;
1162
return;
1163
}
1164
}
1165
}
1166
1167
#ifdef CONFIG_PM_SLEEP
1168
static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1169
{
1170
struct pci_controller *hose = dev_id;
1171
struct ccsr_pci __iomem *pci = hose->private_data;
1172
u32 dr;
1173
1174
dr = in_be32(&pci->pex_pme_mes_dr);
1175
if (!dr)
1176
return IRQ_NONE;
1177
1178
out_be32(&pci->pex_pme_mes_dr, dr);
1179
1180
return IRQ_HANDLED;
1181
}
1182
1183
static int fsl_pci_pme_probe(struct pci_controller *hose)
1184
{
1185
struct ccsr_pci __iomem *pci;
1186
struct pci_dev *dev;
1187
int pme_irq;
1188
int res;
1189
u16 pms;
1190
1191
/* Get hose's pci_dev */
1192
dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1193
1194
/* PME Disable */
1195
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1196
pms &= ~PCI_PM_CTRL_PME_ENABLE;
1197
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1198
1199
pme_irq = irq_of_parse_and_map(hose->dn, 0);
1200
if (!pme_irq) {
1201
dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1202
1203
return -ENXIO;
1204
}
1205
1206
res = devm_request_irq(hose->parent, pme_irq,
1207
fsl_pci_pme_handle,
1208
IRQF_SHARED,
1209
"[PCI] PME", hose);
1210
if (res < 0) {
1211
dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1212
irq_dispose_mapping(pme_irq);
1213
1214
return -ENODEV;
1215
}
1216
1217
pci = hose->private_data;
1218
1219
/* Enable PTOD, ENL23D & EXL23D */
1220
clrbits32(&pci->pex_pme_mes_disr,
1221
PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1222
1223
out_be32(&pci->pex_pme_mes_ier, 0);
1224
setbits32(&pci->pex_pme_mes_ier,
1225
PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1226
1227
/* PME Enable */
1228
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1229
pms |= PCI_PM_CTRL_PME_ENABLE;
1230
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1231
1232
return 0;
1233
}
1234
1235
static void send_pme_turnoff_message(struct pci_controller *hose)
1236
{
1237
struct ccsr_pci __iomem *pci = hose->private_data;
1238
u32 dr;
1239
int i;
1240
1241
/* Send PME_Turn_Off Message Request */
1242
setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1243
1244
/* Wait trun off done */
1245
for (i = 0; i < 150; i++) {
1246
dr = in_be32(&pci->pex_pme_mes_dr);
1247
if (dr) {
1248
out_be32(&pci->pex_pme_mes_dr, dr);
1249
break;
1250
}
1251
1252
udelay(1000);
1253
}
1254
}
1255
1256
static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1257
{
1258
send_pme_turnoff_message(hose);
1259
}
1260
1261
static int fsl_pci_syscore_suspend(void)
1262
{
1263
struct pci_controller *hose, *tmp;
1264
1265
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1266
fsl_pci_syscore_do_suspend(hose);
1267
1268
return 0;
1269
}
1270
1271
static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1272
{
1273
struct ccsr_pci __iomem *pci = hose->private_data;
1274
u32 dr;
1275
int i;
1276
1277
/* Send Exit L2 State Message */
1278
setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1279
1280
/* Wait exit done */
1281
for (i = 0; i < 150; i++) {
1282
dr = in_be32(&pci->pex_pme_mes_dr);
1283
if (dr) {
1284
out_be32(&pci->pex_pme_mes_dr, dr);
1285
break;
1286
}
1287
1288
udelay(1000);
1289
}
1290
1291
setup_pci_atmu(hose);
1292
}
1293
1294
static void fsl_pci_syscore_resume(void)
1295
{
1296
struct pci_controller *hose, *tmp;
1297
1298
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1299
fsl_pci_syscore_do_resume(hose);
1300
}
1301
1302
static struct syscore_ops pci_syscore_pm_ops = {
1303
.suspend = fsl_pci_syscore_suspend,
1304
.resume = fsl_pci_syscore_resume,
1305
};
1306
#endif
1307
1308
void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1309
{
1310
#ifdef CONFIG_PM_SLEEP
1311
fsl_pci_pme_probe(phb);
1312
#endif
1313
}
1314
1315
static int add_err_dev(struct platform_device *pdev)
1316
{
1317
struct platform_device *errdev;
1318
struct mpc85xx_edac_pci_plat_data pd = {
1319
.of_node = pdev->dev.of_node
1320
};
1321
1322
errdev = platform_device_register_resndata(&pdev->dev,
1323
"mpc85xx-pci-edac",
1324
PLATFORM_DEVID_AUTO,
1325
pdev->resource,
1326
pdev->num_resources,
1327
&pd, sizeof(pd));
1328
1329
return PTR_ERR_OR_ZERO(errdev);
1330
}
1331
1332
static int fsl_pci_probe(struct platform_device *pdev)
1333
{
1334
struct device_node *node;
1335
int ret;
1336
1337
node = pdev->dev.of_node;
1338
ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1339
if (ret)
1340
return ret;
1341
1342
ret = add_err_dev(pdev);
1343
if (ret)
1344
dev_err(&pdev->dev, "couldn't register error device: %d\n",
1345
ret);
1346
1347
return 0;
1348
}
1349
1350
static struct platform_driver fsl_pci_driver = {
1351
.driver = {
1352
.name = "fsl-pci",
1353
.of_match_table = pci_ids,
1354
},
1355
.probe = fsl_pci_probe,
1356
.driver_managed_dma = true,
1357
};
1358
1359
static int __init fsl_pci_init(void)
1360
{
1361
#ifdef CONFIG_PM_SLEEP
1362
register_syscore_ops(&pci_syscore_pm_ops);
1363
#endif
1364
return platform_driver_register(&fsl_pci_driver);
1365
}
1366
arch_initcall(fsl_pci_init);
1367
#endif
1368
1369