Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/pci/acpi.c
10817 views
1
#include <linux/pci.h>
2
#include <linux/acpi.h>
3
#include <linux/init.h>
4
#include <linux/irq.h>
5
#include <linux/dmi.h>
6
#include <linux/slab.h>
7
#include <asm/numa.h>
8
#include <asm/pci_x86.h>
9
10
struct pci_root_info {
11
struct acpi_device *bridge;
12
char *name;
13
unsigned int res_num;
14
struct resource *res;
15
struct pci_bus *bus;
16
int busnum;
17
};
18
19
static bool pci_use_crs = true;
20
21
static int __init set_use_crs(const struct dmi_system_id *id)
22
{
23
pci_use_crs = true;
24
return 0;
25
}
26
27
static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28
/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
29
{
30
.callback = set_use_crs,
31
.ident = "IBM System x3800",
32
.matches = {
33
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
35
},
36
},
37
/* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38
/* 2006 AMD HT/VIA system with two host bridges */
39
{
40
.callback = set_use_crs,
41
.ident = "ASRock ALiveSATA2-GLAN",
42
.matches = {
43
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
44
},
45
},
46
{}
47
};
48
49
void __init pci_acpi_crs_quirks(void)
50
{
51
int year;
52
53
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
54
pci_use_crs = false;
55
56
dmi_check_system(pci_use_crs_table);
57
58
/*
59
* If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
60
* takes precedence over anything we figured out above.
61
*/
62
if (pci_probe & PCI_ROOT_NO_CRS)
63
pci_use_crs = false;
64
else if (pci_probe & PCI_USE__CRS)
65
pci_use_crs = true;
66
67
printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
68
"if necessary, use \"pci=%s\" and report a bug\n",
69
pci_use_crs ? "Using" : "Ignoring",
70
pci_use_crs ? "nocrs" : "use_crs");
71
}
72
73
static acpi_status
74
resource_to_addr(struct acpi_resource *resource,
75
struct acpi_resource_address64 *addr)
76
{
77
acpi_status status;
78
struct acpi_resource_memory24 *memory24;
79
struct acpi_resource_memory32 *memory32;
80
struct acpi_resource_fixed_memory32 *fixed_memory32;
81
82
memset(addr, 0, sizeof(*addr));
83
switch (resource->type) {
84
case ACPI_RESOURCE_TYPE_MEMORY24:
85
memory24 = &resource->data.memory24;
86
addr->resource_type = ACPI_MEMORY_RANGE;
87
addr->minimum = memory24->minimum;
88
addr->address_length = memory24->address_length;
89
addr->maximum = addr->minimum + addr->address_length - 1;
90
return AE_OK;
91
case ACPI_RESOURCE_TYPE_MEMORY32:
92
memory32 = &resource->data.memory32;
93
addr->resource_type = ACPI_MEMORY_RANGE;
94
addr->minimum = memory32->minimum;
95
addr->address_length = memory32->address_length;
96
addr->maximum = addr->minimum + addr->address_length - 1;
97
return AE_OK;
98
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
99
fixed_memory32 = &resource->data.fixed_memory32;
100
addr->resource_type = ACPI_MEMORY_RANGE;
101
addr->minimum = fixed_memory32->address;
102
addr->address_length = fixed_memory32->address_length;
103
addr->maximum = addr->minimum + addr->address_length - 1;
104
return AE_OK;
105
case ACPI_RESOURCE_TYPE_ADDRESS16:
106
case ACPI_RESOURCE_TYPE_ADDRESS32:
107
case ACPI_RESOURCE_TYPE_ADDRESS64:
108
status = acpi_resource_to_address64(resource, addr);
109
if (ACPI_SUCCESS(status) &&
110
(addr->resource_type == ACPI_MEMORY_RANGE ||
111
addr->resource_type == ACPI_IO_RANGE) &&
112
addr->address_length > 0) {
113
return AE_OK;
114
}
115
break;
116
}
117
return AE_ERROR;
118
}
119
120
static acpi_status
121
count_resource(struct acpi_resource *acpi_res, void *data)
122
{
123
struct pci_root_info *info = data;
124
struct acpi_resource_address64 addr;
125
acpi_status status;
126
127
status = resource_to_addr(acpi_res, &addr);
128
if (ACPI_SUCCESS(status))
129
info->res_num++;
130
return AE_OK;
131
}
132
133
static acpi_status
134
setup_resource(struct acpi_resource *acpi_res, void *data)
135
{
136
struct pci_root_info *info = data;
137
struct resource *res;
138
struct acpi_resource_address64 addr;
139
acpi_status status;
140
unsigned long flags;
141
u64 start, end;
142
143
status = resource_to_addr(acpi_res, &addr);
144
if (!ACPI_SUCCESS(status))
145
return AE_OK;
146
147
if (addr.resource_type == ACPI_MEMORY_RANGE) {
148
flags = IORESOURCE_MEM;
149
if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
150
flags |= IORESOURCE_PREFETCH;
151
} else if (addr.resource_type == ACPI_IO_RANGE) {
152
flags = IORESOURCE_IO;
153
} else
154
return AE_OK;
155
156
start = addr.minimum + addr.translation_offset;
157
end = addr.maximum + addr.translation_offset;
158
159
res = &info->res[info->res_num];
160
res->name = info->name;
161
res->flags = flags;
162
res->start = start;
163
res->end = end;
164
res->child = NULL;
165
166
if (!pci_use_crs) {
167
dev_printk(KERN_DEBUG, &info->bridge->dev,
168
"host bridge window %pR (ignored)\n", res);
169
return AE_OK;
170
}
171
172
info->res_num++;
173
if (addr.translation_offset)
174
dev_info(&info->bridge->dev, "host bridge window %pR "
175
"(PCI address [%#llx-%#llx])\n",
176
res, res->start - addr.translation_offset,
177
res->end - addr.translation_offset);
178
else
179
dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
180
181
return AE_OK;
182
}
183
184
static bool resource_contains(struct resource *res, resource_size_t point)
185
{
186
if (res->start <= point && point <= res->end)
187
return true;
188
return false;
189
}
190
191
static void coalesce_windows(struct pci_root_info *info, unsigned long type)
192
{
193
int i, j;
194
struct resource *res1, *res2;
195
196
for (i = 0; i < info->res_num; i++) {
197
res1 = &info->res[i];
198
if (!(res1->flags & type))
199
continue;
200
201
for (j = i + 1; j < info->res_num; j++) {
202
res2 = &info->res[j];
203
if (!(res2->flags & type))
204
continue;
205
206
/*
207
* I don't like throwing away windows because then
208
* our resources no longer match the ACPI _CRS, but
209
* the kernel resource tree doesn't allow overlaps.
210
*/
211
if (resource_contains(res1, res2->start) ||
212
resource_contains(res1, res2->end) ||
213
resource_contains(res2, res1->start) ||
214
resource_contains(res2, res1->end)) {
215
res1->start = min(res1->start, res2->start);
216
res1->end = max(res1->end, res2->end);
217
dev_info(&info->bridge->dev,
218
"host bridge window expanded to %pR; %pR ignored\n",
219
res1, res2);
220
res2->flags = 0;
221
}
222
}
223
}
224
}
225
226
static void add_resources(struct pci_root_info *info)
227
{
228
int i;
229
struct resource *res, *root, *conflict;
230
231
if (!pci_use_crs)
232
return;
233
234
coalesce_windows(info, IORESOURCE_MEM);
235
coalesce_windows(info, IORESOURCE_IO);
236
237
for (i = 0; i < info->res_num; i++) {
238
res = &info->res[i];
239
240
if (res->flags & IORESOURCE_MEM)
241
root = &iomem_resource;
242
else if (res->flags & IORESOURCE_IO)
243
root = &ioport_resource;
244
else
245
continue;
246
247
conflict = insert_resource_conflict(root, res);
248
if (conflict)
249
dev_err(&info->bridge->dev,
250
"address space collision: host bridge window %pR "
251
"conflicts with %s %pR\n",
252
res, conflict->name, conflict);
253
else
254
pci_bus_add_resource(info->bus, res, 0);
255
}
256
}
257
258
static void
259
get_current_resources(struct acpi_device *device, int busnum,
260
int domain, struct pci_bus *bus)
261
{
262
struct pci_root_info info;
263
size_t size;
264
265
if (pci_use_crs)
266
pci_bus_remove_resources(bus);
267
268
info.bridge = device;
269
info.bus = bus;
270
info.res_num = 0;
271
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
272
&info);
273
if (!info.res_num)
274
return;
275
276
size = sizeof(*info.res) * info.res_num;
277
info.res = kmalloc(size, GFP_KERNEL);
278
if (!info.res)
279
goto res_alloc_fail;
280
281
info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
282
if (!info.name)
283
goto name_alloc_fail;
284
285
info.res_num = 0;
286
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
287
&info);
288
289
add_resources(&info);
290
return;
291
292
name_alloc_fail:
293
kfree(info.res);
294
res_alloc_fail:
295
return;
296
}
297
298
struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
299
{
300
struct acpi_device *device = root->device;
301
int domain = root->segment;
302
int busnum = root->secondary.start;
303
struct pci_bus *bus;
304
struct pci_sysdata *sd;
305
int node;
306
#ifdef CONFIG_ACPI_NUMA
307
int pxm;
308
#endif
309
310
if (domain && !pci_domains_supported) {
311
printk(KERN_WARNING "pci_bus %04x:%02x: "
312
"ignored (multiple domains not supported)\n",
313
domain, busnum);
314
return NULL;
315
}
316
317
node = -1;
318
#ifdef CONFIG_ACPI_NUMA
319
pxm = acpi_get_pxm(device->handle);
320
if (pxm >= 0)
321
node = pxm_to_node(pxm);
322
if (node != -1)
323
set_mp_bus_to_node(busnum, node);
324
else
325
#endif
326
node = get_mp_bus_to_node(busnum);
327
328
if (node != -1 && !node_online(node))
329
node = -1;
330
331
/* Allocate per-root-bus (not per bus) arch-specific data.
332
* TODO: leak; this memory is never freed.
333
* It's arguable whether it's worth the trouble to care.
334
*/
335
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
336
if (!sd) {
337
printk(KERN_WARNING "pci_bus %04x:%02x: "
338
"ignored (out of memory)\n", domain, busnum);
339
return NULL;
340
}
341
342
sd->domain = domain;
343
sd->node = node;
344
/*
345
* Maybe the desired pci bus has been already scanned. In such case
346
* it is unnecessary to scan the pci bus with the given domain,busnum.
347
*/
348
bus = pci_find_bus(domain, busnum);
349
if (bus) {
350
/*
351
* If the desired bus exits, the content of bus->sysdata will
352
* be replaced by sd.
353
*/
354
memcpy(bus->sysdata, sd, sizeof(*sd));
355
kfree(sd);
356
} else {
357
bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
358
if (bus) {
359
get_current_resources(device, busnum, domain, bus);
360
bus->subordinate = pci_scan_child_bus(bus);
361
}
362
}
363
364
if (!bus)
365
kfree(sd);
366
367
if (bus && node != -1) {
368
#ifdef CONFIG_ACPI_NUMA
369
if (pxm >= 0)
370
dev_printk(KERN_DEBUG, &bus->dev,
371
"on NUMA node %d (pxm %d)\n", node, pxm);
372
#else
373
dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
374
#endif
375
}
376
377
return bus;
378
}
379
380
int __init pci_acpi_init(void)
381
{
382
struct pci_dev *dev = NULL;
383
384
if (acpi_noirq)
385
return -ENODEV;
386
387
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
388
acpi_irq_penalty_init();
389
pcibios_enable_irq = acpi_pci_irq_enable;
390
pcibios_disable_irq = acpi_pci_irq_disable;
391
x86_init.pci.init_irq = x86_init_noop;
392
393
if (pci_routeirq) {
394
/*
395
* PCI IRQ routing is set up by pci_enable_device(), but we
396
* also do it here in case there are still broken drivers that
397
* don't use pci_enable_device().
398
*/
399
printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
400
for_each_pci_dev(dev)
401
acpi_pci_irq_enable(dev);
402
}
403
404
return 0;
405
}
406
407