Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bcma/host_pci.c
26278 views
1
/*
2
* Broadcom specific AMBA
3
* PCI Host
4
*
5
* Licensed under the GNU/GPL. See COPYING for details.
6
*/
7
8
#include "bcma_private.h"
9
#include <linux/slab.h>
10
#include <linux/bcma/bcma.h>
11
#include <linux/pci.h>
12
#include <linux/module.h>
13
14
static void bcma_host_pci_switch_core(struct bcma_device *core)
15
{
16
int win2 = core->bus->host_is_pcie2 ?
17
BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
18
19
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
20
core->addr);
21
pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
22
core->bus->mapped_core = core;
23
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
24
}
25
26
/* Provides access to the requested core. Returns base offset that has to be
27
* used. It makes use of fixed windows when possible. */
28
static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
29
{
30
switch (core->id.id) {
31
case BCMA_CORE_CHIPCOMMON:
32
return 3 * BCMA_CORE_SIZE;
33
case BCMA_CORE_PCIE:
34
return 2 * BCMA_CORE_SIZE;
35
}
36
37
if (core->bus->mapped_core != core)
38
bcma_host_pci_switch_core(core);
39
return 0;
40
}
41
42
static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
43
{
44
offset += bcma_host_pci_provide_access_to_core(core);
45
return ioread8(core->bus->mmio + offset);
46
}
47
48
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
49
{
50
offset += bcma_host_pci_provide_access_to_core(core);
51
return ioread16(core->bus->mmio + offset);
52
}
53
54
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
55
{
56
offset += bcma_host_pci_provide_access_to_core(core);
57
return ioread32(core->bus->mmio + offset);
58
}
59
60
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
61
u8 value)
62
{
63
offset += bcma_host_pci_provide_access_to_core(core);
64
iowrite8(value, core->bus->mmio + offset);
65
}
66
67
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
68
u16 value)
69
{
70
offset += bcma_host_pci_provide_access_to_core(core);
71
iowrite16(value, core->bus->mmio + offset);
72
}
73
74
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
75
u32 value)
76
{
77
offset += bcma_host_pci_provide_access_to_core(core);
78
iowrite32(value, core->bus->mmio + offset);
79
}
80
81
#ifdef CONFIG_BCMA_BLOCKIO
82
static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
83
size_t count, u16 offset, u8 reg_width)
84
{
85
void __iomem *addr = core->bus->mmio + offset;
86
if (core->bus->mapped_core != core)
87
bcma_host_pci_switch_core(core);
88
switch (reg_width) {
89
case sizeof(u8):
90
ioread8_rep(addr, buffer, count);
91
break;
92
case sizeof(u16):
93
WARN_ON(count & 1);
94
ioread16_rep(addr, buffer, count >> 1);
95
break;
96
case sizeof(u32):
97
WARN_ON(count & 3);
98
ioread32_rep(addr, buffer, count >> 2);
99
break;
100
default:
101
WARN_ON(1);
102
}
103
}
104
105
static void bcma_host_pci_block_write(struct bcma_device *core,
106
const void *buffer, size_t count,
107
u16 offset, u8 reg_width)
108
{
109
void __iomem *addr = core->bus->mmio + offset;
110
if (core->bus->mapped_core != core)
111
bcma_host_pci_switch_core(core);
112
switch (reg_width) {
113
case sizeof(u8):
114
iowrite8_rep(addr, buffer, count);
115
break;
116
case sizeof(u16):
117
WARN_ON(count & 1);
118
iowrite16_rep(addr, buffer, count >> 1);
119
break;
120
case sizeof(u32):
121
WARN_ON(count & 3);
122
iowrite32_rep(addr, buffer, count >> 2);
123
break;
124
default:
125
WARN_ON(1);
126
}
127
}
128
#endif
129
130
static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
131
{
132
if (core->bus->mapped_core != core)
133
bcma_host_pci_switch_core(core);
134
return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
135
}
136
137
static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
138
u32 value)
139
{
140
if (core->bus->mapped_core != core)
141
bcma_host_pci_switch_core(core);
142
iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
143
}
144
145
static const struct bcma_host_ops bcma_host_pci_ops = {
146
.read8 = bcma_host_pci_read8,
147
.read16 = bcma_host_pci_read16,
148
.read32 = bcma_host_pci_read32,
149
.write8 = bcma_host_pci_write8,
150
.write16 = bcma_host_pci_write16,
151
.write32 = bcma_host_pci_write32,
152
#ifdef CONFIG_BCMA_BLOCKIO
153
.block_read = bcma_host_pci_block_read,
154
.block_write = bcma_host_pci_block_write,
155
#endif
156
.aread32 = bcma_host_pci_aread32,
157
.awrite32 = bcma_host_pci_awrite32,
158
};
159
160
static int bcma_host_pci_probe(struct pci_dev *dev,
161
const struct pci_device_id *id)
162
{
163
struct bcma_bus *bus;
164
int err = -ENOMEM;
165
u32 val;
166
167
/* Alloc */
168
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
169
if (!bus)
170
goto out;
171
172
/* Basic PCI configuration */
173
err = pci_enable_device(dev);
174
if (err)
175
goto err_kfree_bus;
176
177
err = pci_request_regions(dev, "bcma-pci-bridge");
178
if (err)
179
goto err_pci_disable;
180
pci_set_master(dev);
181
182
/* Disable the RETRY_TIMEOUT register (0x41) to keep
183
* PCI Tx retries from interfering with C3 CPU state */
184
pci_read_config_dword(dev, 0x40, &val);
185
if ((val & 0x0000ff00) != 0)
186
pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
187
188
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
189
if (!pci_is_pcie(dev)) {
190
bcma_err(bus, "PCI card detected, they are not supported.\n");
191
err = -ENXIO;
192
goto err_pci_release_regions;
193
}
194
195
bus->dev = &dev->dev;
196
197
/* Map MMIO */
198
err = -ENOMEM;
199
bus->mmio = pci_iomap(dev, 0, ~0UL);
200
if (!bus->mmio)
201
goto err_pci_release_regions;
202
203
/* Host specific */
204
bus->host_pci = dev;
205
bus->hosttype = BCMA_HOSTTYPE_PCI;
206
bus->ops = &bcma_host_pci_ops;
207
208
bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
209
bus->boardinfo.type = bus->host_pci->subsystem_device;
210
211
/* Initialize struct, detect chip */
212
bcma_init_bus(bus);
213
214
/* Scan bus to find out generation of PCIe core */
215
err = bcma_bus_scan(bus);
216
if (err)
217
goto err_pci_unmap_mmio;
218
219
if (bcma_find_core(bus, BCMA_CORE_PCIE2))
220
bus->host_is_pcie2 = true;
221
222
/* Register */
223
err = bcma_bus_register(bus);
224
if (err)
225
goto err_unregister_cores;
226
227
pci_set_drvdata(dev, bus);
228
229
out:
230
return err;
231
232
err_unregister_cores:
233
bcma_unregister_cores(bus);
234
err_pci_unmap_mmio:
235
pci_iounmap(dev, bus->mmio);
236
err_pci_release_regions:
237
pci_release_regions(dev);
238
err_pci_disable:
239
pci_disable_device(dev);
240
err_kfree_bus:
241
kfree(bus);
242
return err;
243
}
244
245
static void bcma_host_pci_remove(struct pci_dev *dev)
246
{
247
struct bcma_bus *bus = pci_get_drvdata(dev);
248
249
bcma_bus_unregister(bus);
250
pci_iounmap(dev, bus->mmio);
251
pci_release_regions(dev);
252
pci_disable_device(dev);
253
kfree(bus);
254
}
255
256
#ifdef CONFIG_PM_SLEEP
257
static int bcma_host_pci_suspend(struct device *dev)
258
{
259
struct bcma_bus *bus = dev_get_drvdata(dev);
260
261
bus->mapped_core = NULL;
262
263
return bcma_bus_suspend(bus);
264
}
265
266
static int bcma_host_pci_resume(struct device *dev)
267
{
268
struct bcma_bus *bus = dev_get_drvdata(dev);
269
270
return bcma_bus_resume(bus);
271
}
272
273
static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
274
bcma_host_pci_resume);
275
#define BCMA_PM_OPS (&bcma_pm_ops)
276
277
#else /* CONFIG_PM_SLEEP */
278
279
#define BCMA_PM_OPS NULL
280
281
#endif /* CONFIG_PM_SLEEP */
282
283
static const struct pci_device_id bcma_pci_bridge_tbl[] = {
284
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
285
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
286
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */
287
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
288
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
289
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
290
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
291
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
292
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
293
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
294
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
295
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
296
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_HP, 0x804a) },
297
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
298
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
299
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
300
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
301
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
302
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
303
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
304
{ 0, },
305
};
306
MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
307
308
static struct pci_driver bcma_pci_bridge_driver = {
309
.name = "bcma-pci-bridge",
310
.id_table = bcma_pci_bridge_tbl,
311
.probe = bcma_host_pci_probe,
312
.remove = bcma_host_pci_remove,
313
.driver.pm = BCMA_PM_OPS,
314
};
315
316
int __init bcma_host_pci_init(void)
317
{
318
return pci_register_driver(&bcma_pci_bridge_driver);
319
}
320
321
void __exit bcma_host_pci_exit(void)
322
{
323
pci_unregister_driver(&bcma_pci_bridge_driver);
324
}
325
326
/**************************************************
327
* Runtime ops for drivers.
328
**************************************************/
329
330
/* See also pcicore_up */
331
void bcma_host_pci_up(struct bcma_bus *bus)
332
{
333
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
334
return;
335
336
if (bus->host_is_pcie2)
337
bcma_core_pcie2_up(&bus->drv_pcie2);
338
else
339
bcma_core_pci_up(&bus->drv_pci[0]);
340
}
341
EXPORT_SYMBOL_GPL(bcma_host_pci_up);
342
343
/* See also pcicore_down */
344
void bcma_host_pci_down(struct bcma_bus *bus)
345
{
346
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
347
return;
348
349
if (!bus->host_is_pcie2)
350
bcma_core_pci_down(&bus->drv_pci[0]);
351
}
352
EXPORT_SYMBOL_GPL(bcma_host_pci_down);
353
354
/* See also si_pci_setup */
355
int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
356
bool enable)
357
{
358
struct pci_dev *pdev;
359
u32 coremask, tmp;
360
int err = 0;
361
362
if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
363
/* This bcma device is not on a PCI host-bus. So the IRQs are
364
* not routed through the PCI core.
365
* So we must not enable routing through the PCI core. */
366
goto out;
367
}
368
369
pdev = bus->host_pci;
370
371
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
372
if (err)
373
goto out;
374
375
coremask = BIT(core->core_index) << 8;
376
if (enable)
377
tmp |= coremask;
378
else
379
tmp &= ~coremask;
380
381
err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
382
383
out:
384
return err;
385
}
386
EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
387
388