Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/pci/pci.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright IBM Corp. 2012
4
*
5
* Author(s):
6
* Jan Glauber <[email protected]>
7
*
8
* The System z PCI code is a rewrite from a prototype by
9
* the following people (Kudoz!):
10
* Alexander Schmidt
11
* Christoph Raisch
12
* Hannes Hering
13
* Hoang-Nam Nguyen
14
* Jan-Bernd Themann
15
* Stefan Roscher
16
* Thomas Klein
17
*/
18
19
#define KMSG_COMPONENT "zpci"
20
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22
#include <linux/kernel.h>
23
#include <linux/slab.h>
24
#include <linux/err.h>
25
#include <linux/export.h>
26
#include <linux/delay.h>
27
#include <linux/seq_file.h>
28
#include <linux/jump_label.h>
29
#include <linux/pci.h>
30
#include <linux/printk.h>
31
#include <linux/lockdep.h>
32
#include <linux/list_sort.h>
33
34
#include <asm/machine.h>
35
#include <asm/isc.h>
36
#include <asm/airq.h>
37
#include <asm/facility.h>
38
#include <asm/pci_insn.h>
39
#include <asm/pci_clp.h>
40
#include <asm/pci_dma.h>
41
42
#include "pci_bus.h"
43
#include "pci_iov.h"
44
45
/* list of all detected zpci devices */
46
static LIST_HEAD(zpci_list);
47
static DEFINE_SPINLOCK(zpci_list_lock);
48
static DEFINE_MUTEX(zpci_add_remove_lock);
49
50
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
51
static DEFINE_SPINLOCK(zpci_domain_lock);
52
53
#define ZPCI_IOMAP_ENTRIES \
54
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
55
ZPCI_IOMAP_MAX_ENTRIES)
56
57
unsigned int s390_pci_no_rid;
58
59
static DEFINE_SPINLOCK(zpci_iomap_lock);
60
static unsigned long *zpci_iomap_bitmap;
61
struct zpci_iomap_entry *zpci_iomap_start;
62
EXPORT_SYMBOL_GPL(zpci_iomap_start);
63
64
DEFINE_STATIC_KEY_FALSE(have_mio);
65
66
static struct kmem_cache *zdev_fmb_cache;
67
68
/* AEN structures that must be preserved over KVM module re-insertion */
69
union zpci_sic_iib *zpci_aipb;
70
EXPORT_SYMBOL_GPL(zpci_aipb);
71
struct airq_iv *zpci_aif_sbv;
72
EXPORT_SYMBOL_GPL(zpci_aif_sbv);
73
74
void zpci_zdev_put(struct zpci_dev *zdev)
75
{
76
if (!zdev)
77
return;
78
mutex_lock(&zpci_add_remove_lock);
79
kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock);
80
mutex_unlock(&zpci_add_remove_lock);
81
}
82
83
struct zpci_dev *get_zdev_by_fid(u32 fid)
84
{
85
struct zpci_dev *tmp, *zdev = NULL;
86
87
spin_lock(&zpci_list_lock);
88
list_for_each_entry(tmp, &zpci_list, entry) {
89
if (tmp->fid == fid) {
90
zdev = tmp;
91
zpci_zdev_get(zdev);
92
break;
93
}
94
}
95
spin_unlock(&zpci_list_lock);
96
return zdev;
97
}
98
99
void zpci_remove_reserved_devices(void)
100
{
101
struct zpci_dev *tmp, *zdev;
102
enum zpci_state state;
103
LIST_HEAD(remove);
104
105
spin_lock(&zpci_list_lock);
106
list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
107
if (zdev->state == ZPCI_FN_STATE_STANDBY &&
108
!clp_get_state(zdev->fid, &state) &&
109
state == ZPCI_FN_STATE_RESERVED)
110
list_move_tail(&zdev->entry, &remove);
111
}
112
spin_unlock(&zpci_list_lock);
113
114
list_for_each_entry_safe(zdev, tmp, &remove, entry)
115
zpci_device_reserved(zdev);
116
}
117
118
int pci_domain_nr(struct pci_bus *bus)
119
{
120
return ((struct zpci_bus *) bus->sysdata)->domain_nr;
121
}
122
EXPORT_SYMBOL_GPL(pci_domain_nr);
123
124
int pci_proc_domain(struct pci_bus *bus)
125
{
126
return pci_domain_nr(bus);
127
}
128
EXPORT_SYMBOL_GPL(pci_proc_domain);
129
130
/* Modify PCI: Register I/O address translation parameters */
131
int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
132
u64 base, u64 limit, u64 iota, u8 *status)
133
{
134
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
135
struct zpci_fib fib = {0};
136
u8 cc;
137
138
fib.pba = base;
139
/* Work around off by one in ISM virt device */
140
if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
141
fib.pal = limit + (1 << 12);
142
else
143
fib.pal = limit;
144
fib.iota = iota;
145
fib.gd = zdev->gisa;
146
cc = zpci_mod_fc(req, &fib, status);
147
if (cc)
148
zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
149
return cc;
150
}
151
EXPORT_SYMBOL_GPL(zpci_register_ioat);
152
153
/* Modify PCI: Unregister I/O address translation parameters */
154
int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
155
{
156
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
157
struct zpci_fib fib = {0};
158
u8 cc, status;
159
160
fib.gd = zdev->gisa;
161
162
cc = zpci_mod_fc(req, &fib, &status);
163
if (cc)
164
zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
165
return cc;
166
}
167
168
/* Modify PCI: Set PCI function measurement parameters */
169
int zpci_fmb_enable_device(struct zpci_dev *zdev)
170
{
171
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
172
struct zpci_iommu_ctrs *ctrs;
173
struct zpci_fib fib = {0};
174
unsigned long flags;
175
u8 cc, status;
176
177
if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
178
return -EINVAL;
179
180
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
181
if (!zdev->fmb)
182
return -ENOMEM;
183
WARN_ON((u64) zdev->fmb & 0xf);
184
185
/* reset software counters */
186
spin_lock_irqsave(&zdev->dom_lock, flags);
187
ctrs = zpci_get_iommu_ctrs(zdev);
188
if (ctrs) {
189
atomic64_set(&ctrs->mapped_pages, 0);
190
atomic64_set(&ctrs->unmapped_pages, 0);
191
atomic64_set(&ctrs->global_rpcits, 0);
192
atomic64_set(&ctrs->sync_map_rpcits, 0);
193
atomic64_set(&ctrs->sync_rpcits, 0);
194
}
195
spin_unlock_irqrestore(&zdev->dom_lock, flags);
196
197
198
fib.fmb_addr = virt_to_phys(zdev->fmb);
199
fib.gd = zdev->gisa;
200
cc = zpci_mod_fc(req, &fib, &status);
201
if (cc) {
202
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
203
zdev->fmb = NULL;
204
}
205
return cc ? -EIO : 0;
206
}
207
208
/* Modify PCI: Disable PCI function measurement */
209
int zpci_fmb_disable_device(struct zpci_dev *zdev)
210
{
211
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
212
struct zpci_fib fib = {0};
213
u8 cc, status;
214
215
if (!zdev->fmb)
216
return -EINVAL;
217
218
fib.gd = zdev->gisa;
219
220
/* Function measurement is disabled if fmb address is zero */
221
cc = zpci_mod_fc(req, &fib, &status);
222
if (cc == 3) /* Function already gone. */
223
cc = 0;
224
225
if (!cc) {
226
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
227
zdev->fmb = NULL;
228
}
229
return cc ? -EIO : 0;
230
}
231
232
static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
233
{
234
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
235
u64 data;
236
int rc;
237
238
rc = __zpci_load(&data, req, offset);
239
if (!rc) {
240
data = le64_to_cpu((__force __le64) data);
241
data >>= (8 - len) * 8;
242
*val = (u32) data;
243
} else
244
*val = 0xffffffff;
245
return rc;
246
}
247
248
static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
249
{
250
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
251
u64 data = val;
252
int rc;
253
254
data <<= (8 - len) * 8;
255
data = (__force u64) cpu_to_le64(data);
256
rc = __zpci_store(data, req, offset);
257
return rc;
258
}
259
260
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
261
resource_size_t size,
262
resource_size_t align)
263
{
264
return 0;
265
}
266
267
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
268
pgprot_t prot)
269
{
270
/*
271
* When PCI MIO instructions are unavailable the "physical" address
272
* encodes a hint for accessing the PCI memory space it represents.
273
* Just pass it unchanged such that ioread/iowrite can decode it.
274
*/
275
if (!static_branch_unlikely(&have_mio))
276
return (void __iomem *)phys_addr;
277
278
return generic_ioremap_prot(phys_addr, size, prot);
279
}
280
EXPORT_SYMBOL(ioremap_prot);
281
282
void iounmap(volatile void __iomem *addr)
283
{
284
if (static_branch_likely(&have_mio))
285
generic_iounmap(addr);
286
}
287
EXPORT_SYMBOL(iounmap);
288
289
/* Create a virtual mapping cookie for a PCI BAR */
290
static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
291
unsigned long offset, unsigned long max)
292
{
293
struct zpci_dev *zdev = to_zpci(pdev);
294
int idx;
295
296
idx = zdev->bars[bar].map_idx;
297
spin_lock(&zpci_iomap_lock);
298
/* Detect overrun */
299
WARN_ON(!++zpci_iomap_start[idx].count);
300
zpci_iomap_start[idx].fh = zdev->fh;
301
zpci_iomap_start[idx].bar = bar;
302
spin_unlock(&zpci_iomap_lock);
303
304
return (void __iomem *) ZPCI_ADDR(idx) + offset;
305
}
306
307
static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
308
unsigned long offset,
309
unsigned long max)
310
{
311
unsigned long barsize = pci_resource_len(pdev, bar);
312
struct zpci_dev *zdev = to_zpci(pdev);
313
void __iomem *iova;
314
315
iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
316
return iova ? iova + offset : iova;
317
}
318
319
void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
320
unsigned long offset, unsigned long max)
321
{
322
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
323
return NULL;
324
325
if (static_branch_likely(&have_mio))
326
return pci_iomap_range_mio(pdev, bar, offset, max);
327
else
328
return pci_iomap_range_fh(pdev, bar, offset, max);
329
}
330
EXPORT_SYMBOL(pci_iomap_range);
331
332
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
333
{
334
return pci_iomap_range(dev, bar, 0, maxlen);
335
}
336
EXPORT_SYMBOL(pci_iomap);
337
338
static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
339
unsigned long offset, unsigned long max)
340
{
341
unsigned long barsize = pci_resource_len(pdev, bar);
342
struct zpci_dev *zdev = to_zpci(pdev);
343
void __iomem *iova;
344
345
iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
346
return iova ? iova + offset : iova;
347
}
348
349
void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
350
unsigned long offset, unsigned long max)
351
{
352
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
353
return NULL;
354
355
if (static_branch_likely(&have_mio))
356
return pci_iomap_wc_range_mio(pdev, bar, offset, max);
357
else
358
return pci_iomap_range_fh(pdev, bar, offset, max);
359
}
360
EXPORT_SYMBOL(pci_iomap_wc_range);
361
362
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
363
{
364
return pci_iomap_wc_range(dev, bar, 0, maxlen);
365
}
366
EXPORT_SYMBOL(pci_iomap_wc);
367
368
static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
369
{
370
unsigned int idx = ZPCI_IDX(addr);
371
372
spin_lock(&zpci_iomap_lock);
373
/* Detect underrun */
374
WARN_ON(!zpci_iomap_start[idx].count);
375
if (!--zpci_iomap_start[idx].count) {
376
zpci_iomap_start[idx].fh = 0;
377
zpci_iomap_start[idx].bar = 0;
378
}
379
spin_unlock(&zpci_iomap_lock);
380
}
381
382
static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
383
{
384
iounmap(addr);
385
}
386
387
void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
388
{
389
if (static_branch_likely(&have_mio))
390
pci_iounmap_mio(pdev, addr);
391
else
392
pci_iounmap_fh(pdev, addr);
393
}
394
EXPORT_SYMBOL(pci_iounmap);
395
396
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
397
int size, u32 *val)
398
{
399
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
400
401
return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
402
}
403
404
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
405
int size, u32 val)
406
{
407
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
408
409
return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
410
}
411
412
static struct pci_ops pci_root_ops = {
413
.read = pci_read,
414
.write = pci_write,
415
};
416
417
static void zpci_map_resources(struct pci_dev *pdev)
418
{
419
struct zpci_dev *zdev = to_zpci(pdev);
420
resource_size_t len;
421
int i;
422
423
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
424
len = pci_resource_len(pdev, i);
425
if (!len)
426
continue;
427
428
if (zpci_use_mio(zdev))
429
pdev->resource[i].start =
430
(resource_size_t __force) zdev->bars[i].mio_wt;
431
else
432
pdev->resource[i].start = (resource_size_t __force)
433
pci_iomap_range_fh(pdev, i, 0, 0);
434
pdev->resource[i].end = pdev->resource[i].start + len - 1;
435
}
436
437
zpci_iov_map_resources(pdev);
438
}
439
440
static void zpci_unmap_resources(struct pci_dev *pdev)
441
{
442
struct zpci_dev *zdev = to_zpci(pdev);
443
resource_size_t len;
444
int i;
445
446
if (zpci_use_mio(zdev))
447
return;
448
449
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
450
len = pci_resource_len(pdev, i);
451
if (!len)
452
continue;
453
pci_iounmap_fh(pdev, (void __iomem __force *)
454
pdev->resource[i].start);
455
}
456
}
457
458
static int zpci_alloc_iomap(struct zpci_dev *zdev)
459
{
460
unsigned long entry;
461
462
spin_lock(&zpci_iomap_lock);
463
entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
464
if (entry == ZPCI_IOMAP_ENTRIES) {
465
spin_unlock(&zpci_iomap_lock);
466
return -ENOSPC;
467
}
468
set_bit(entry, zpci_iomap_bitmap);
469
spin_unlock(&zpci_iomap_lock);
470
return entry;
471
}
472
473
static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
474
{
475
spin_lock(&zpci_iomap_lock);
476
memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
477
clear_bit(entry, zpci_iomap_bitmap);
478
spin_unlock(&zpci_iomap_lock);
479
}
480
481
static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
482
{
483
int bar, idx;
484
485
spin_lock(&zpci_iomap_lock);
486
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
487
if (!zdev->bars[bar].size)
488
continue;
489
idx = zdev->bars[bar].map_idx;
490
if (!zpci_iomap_start[idx].count)
491
continue;
492
WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
493
}
494
spin_unlock(&zpci_iomap_lock);
495
}
496
497
void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
498
{
499
if (!fh || zdev->fh == fh)
500
return;
501
502
zdev->fh = fh;
503
if (zpci_use_mio(zdev))
504
return;
505
if (zdev->has_resources && zdev_enabled(zdev))
506
zpci_do_update_iomap_fh(zdev, fh);
507
}
508
509
static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
510
unsigned long size, unsigned long flags)
511
{
512
struct resource *r;
513
514
r = kzalloc(sizeof(*r), GFP_KERNEL);
515
if (!r)
516
return NULL;
517
518
r->start = start;
519
r->end = r->start + size - 1;
520
r->flags = flags;
521
r->name = zdev->res_name;
522
523
if (request_resource(&iomem_resource, r)) {
524
kfree(r);
525
return NULL;
526
}
527
return r;
528
}
529
530
int zpci_setup_bus_resources(struct zpci_dev *zdev)
531
{
532
unsigned long addr, size, flags;
533
struct resource *res;
534
int i, entry;
535
536
snprintf(zdev->res_name, sizeof(zdev->res_name),
537
"PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
538
539
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
540
if (!zdev->bars[i].size)
541
continue;
542
entry = zpci_alloc_iomap(zdev);
543
if (entry < 0)
544
return entry;
545
zdev->bars[i].map_idx = entry;
546
547
/* only MMIO is supported */
548
flags = IORESOURCE_MEM;
549
if (zdev->bars[i].val & 8)
550
flags |= IORESOURCE_PREFETCH;
551
if (zdev->bars[i].val & 4)
552
flags |= IORESOURCE_MEM_64;
553
554
if (zpci_use_mio(zdev))
555
addr = (unsigned long) zdev->bars[i].mio_wt;
556
else
557
addr = ZPCI_ADDR(entry);
558
size = 1UL << zdev->bars[i].size;
559
560
res = __alloc_res(zdev, addr, size, flags);
561
if (!res) {
562
zpci_free_iomap(zdev, entry);
563
return -ENOMEM;
564
}
565
zdev->bars[i].res = res;
566
}
567
zdev->has_resources = 1;
568
569
return 0;
570
}
571
572
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
573
{
574
struct resource *res;
575
int i;
576
577
pci_lock_rescan_remove();
578
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
579
res = zdev->bars[i].res;
580
if (!res)
581
continue;
582
583
release_resource(res);
584
pci_bus_remove_resource(zdev->zbus->bus, res);
585
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
586
zdev->bars[i].res = NULL;
587
kfree(res);
588
}
589
zdev->has_resources = 0;
590
pci_unlock_rescan_remove();
591
}
592
593
int pcibios_device_add(struct pci_dev *pdev)
594
{
595
struct zpci_dev *zdev = to_zpci(pdev);
596
struct resource *res;
597
int i;
598
599
/* The pdev has a reference to the zdev via its bus */
600
zpci_zdev_get(zdev);
601
if (pdev->is_physfn)
602
pdev->no_vf_scan = 1;
603
604
zpci_map_resources(pdev);
605
606
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
607
res = &pdev->resource[i];
608
if (res->parent || !res->flags)
609
continue;
610
pci_claim_resource(pdev, i);
611
}
612
613
return 0;
614
}
615
616
void pcibios_release_device(struct pci_dev *pdev)
617
{
618
struct zpci_dev *zdev = to_zpci(pdev);
619
620
zpci_unmap_resources(pdev);
621
zpci_zdev_put(zdev);
622
}
623
624
int pcibios_enable_device(struct pci_dev *pdev, int mask)
625
{
626
struct zpci_dev *zdev = to_zpci(pdev);
627
628
zpci_debug_init_device(zdev, dev_name(&pdev->dev));
629
zpci_fmb_enable_device(zdev);
630
631
return pci_enable_resources(pdev, mask);
632
}
633
634
void pcibios_disable_device(struct pci_dev *pdev)
635
{
636
struct zpci_dev *zdev = to_zpci(pdev);
637
638
zpci_fmb_disable_device(zdev);
639
zpci_debug_exit_device(zdev);
640
}
641
642
static int __zpci_register_domain(int domain)
643
{
644
spin_lock(&zpci_domain_lock);
645
if (test_bit(domain, zpci_domain)) {
646
spin_unlock(&zpci_domain_lock);
647
pr_err("Domain %04x is already assigned\n", domain);
648
return -EEXIST;
649
}
650
set_bit(domain, zpci_domain);
651
spin_unlock(&zpci_domain_lock);
652
return domain;
653
}
654
655
static int __zpci_alloc_domain(void)
656
{
657
int domain;
658
659
spin_lock(&zpci_domain_lock);
660
/*
661
* We can always auto allocate domains below ZPCI_NR_DEVICES.
662
* There is either a free domain or we have reached the maximum in
663
* which case we would have bailed earlier.
664
*/
665
domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
666
set_bit(domain, zpci_domain);
667
spin_unlock(&zpci_domain_lock);
668
return domain;
669
}
670
671
int zpci_alloc_domain(int domain)
672
{
673
if (zpci_unique_uid) {
674
if (domain)
675
return __zpci_register_domain(domain);
676
pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
677
update_uid_checking(false);
678
}
679
return __zpci_alloc_domain();
680
}
681
682
void zpci_free_domain(int domain)
683
{
684
spin_lock(&zpci_domain_lock);
685
clear_bit(domain, zpci_domain);
686
spin_unlock(&zpci_domain_lock);
687
}
688
689
690
int zpci_enable_device(struct zpci_dev *zdev)
691
{
692
u32 fh = zdev->fh;
693
int rc = 0;
694
695
if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
696
rc = -EIO;
697
else
698
zpci_update_fh(zdev, fh);
699
return rc;
700
}
701
EXPORT_SYMBOL_GPL(zpci_enable_device);
702
703
int zpci_reenable_device(struct zpci_dev *zdev)
704
{
705
u8 status;
706
int rc;
707
708
rc = zpci_enable_device(zdev);
709
if (rc)
710
return rc;
711
712
rc = zpci_iommu_register_ioat(zdev, &status);
713
if (rc)
714
zpci_disable_device(zdev);
715
716
return rc;
717
}
718
EXPORT_SYMBOL_GPL(zpci_reenable_device);
719
720
int zpci_disable_device(struct zpci_dev *zdev)
721
{
722
u32 fh = zdev->fh;
723
int cc, rc = 0;
724
725
cc = clp_disable_fh(zdev, &fh);
726
if (!cc) {
727
zpci_update_fh(zdev, fh);
728
} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
729
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
730
zdev->fid);
731
/* Function is already disabled - update handle */
732
rc = clp_refresh_fh(zdev->fid, &fh);
733
if (!rc) {
734
zpci_update_fh(zdev, fh);
735
rc = -EINVAL;
736
}
737
} else {
738
rc = -EIO;
739
}
740
return rc;
741
}
742
EXPORT_SYMBOL_GPL(zpci_disable_device);
743
744
/**
745
* zpci_hot_reset_device - perform a reset of the given zPCI function
746
* @zdev: the slot which should be reset
747
*
748
* Performs a low level reset of the zPCI function. The reset is low level in
749
* the sense that the zPCI function can be reset without detaching it from the
750
* common PCI subsystem. The reset may be performed while under control of
751
* either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
752
* table is reinstated at the end of the reset.
753
*
754
* After the reset the functions internal state is reset to an initial state
755
* equivalent to its state during boot when first probing a driver.
756
* Consequently after reset the PCI function requires re-initialization via the
757
* common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
758
* and enabling the function via e.g. pci_enable_device_flags(). The caller
759
* must guard against concurrent reset attempts.
760
*
761
* In most cases this function should not be called directly but through
762
* pci_reset_function() or pci_reset_bus() which handle the save/restore and
763
* locking - asserted by lockdep.
764
*
765
* Return: 0 on success and an error value otherwise
766
*/
767
int zpci_hot_reset_device(struct zpci_dev *zdev)
768
{
769
int rc;
770
771
lockdep_assert_held(&zdev->state_lock);
772
zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
773
if (zdev_enabled(zdev)) {
774
/* Disables device access, DMAs and IRQs (reset state) */
775
rc = zpci_disable_device(zdev);
776
/*
777
* Due to a z/VM vs LPAR inconsistency in the error state the
778
* FH may indicate an enabled device but disable says the
779
* device is already disabled don't treat it as an error here.
780
*/
781
if (rc == -EINVAL)
782
rc = 0;
783
if (rc)
784
return rc;
785
}
786
787
rc = zpci_reenable_device(zdev);
788
789
return rc;
790
}
791
792
/**
793
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
794
* @fid: Function ID of the device to be created
795
* @fh: Current Function Handle of the device to be created
796
* @state: Initial state after creation either Standby or Configured
797
*
798
* Allocates a new struct zpci_dev and queries the platform for its details.
799
* If successful the device can subsequently be added to the zPCI subsystem
800
* using zpci_add_device().
801
*
802
* Returns: the zdev on success or an error pointer otherwise
803
*/
804
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
805
{
806
struct zpci_dev *zdev;
807
int rc;
808
809
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
810
if (!zdev)
811
return ERR_PTR(-ENOMEM);
812
813
/* FID and Function Handle are the static/dynamic identifiers */
814
zdev->fid = fid;
815
zdev->fh = fh;
816
817
/* Query function properties and update zdev */
818
rc = clp_query_pci_fn(zdev);
819
if (rc)
820
goto error;
821
zdev->state = state;
822
823
mutex_init(&zdev->state_lock);
824
mutex_init(&zdev->fmb_lock);
825
mutex_init(&zdev->kzdev_lock);
826
827
return zdev;
828
829
error:
830
zpci_dbg(0, "crt fid:%x, rc:%d\n", fid, rc);
831
kfree(zdev);
832
return ERR_PTR(rc);
833
}
834
835
/**
836
* zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem
837
* @zdev: The zPCI device to be added
838
*
839
* A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating
840
* a new one as necessary. A hotplug slot is created and events start to be handled.
841
* If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used.
842
* If adding the struct zpci_dev fails the device was not added and should be freed.
843
*
844
* Return: 0 on success, or an error code otherwise
845
*/
846
int zpci_add_device(struct zpci_dev *zdev)
847
{
848
int rc;
849
850
mutex_lock(&zpci_add_remove_lock);
851
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
852
rc = zpci_init_iommu(zdev);
853
if (rc)
854
goto error;
855
856
rc = zpci_bus_device_register(zdev, &pci_root_ops);
857
if (rc)
858
goto error_destroy_iommu;
859
860
kref_init(&zdev->kref);
861
spin_lock(&zpci_list_lock);
862
list_add_tail(&zdev->entry, &zpci_list);
863
spin_unlock(&zpci_list_lock);
864
mutex_unlock(&zpci_add_remove_lock);
865
return 0;
866
867
error_destroy_iommu:
868
zpci_destroy_iommu(zdev);
869
error:
870
zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
871
mutex_unlock(&zpci_add_remove_lock);
872
return rc;
873
}
874
875
bool zpci_is_device_configured(struct zpci_dev *zdev)
876
{
877
enum zpci_state state = zdev->state;
878
879
return state != ZPCI_FN_STATE_RESERVED &&
880
state != ZPCI_FN_STATE_STANDBY;
881
}
882
883
/**
884
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
885
* @zdev: The zpci_dev to be configured
886
* @fh: The general function handle supplied by the platform
887
*
888
* Given a device in the configuration state Configured, enables, scans and
889
* adds it to the common code PCI subsystem if possible. If any failure occurs,
890
* the zpci_dev is left disabled.
891
*
892
* Return: 0 on success, or an error code otherwise
893
*/
894
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
895
{
896
zpci_update_fh(zdev, fh);
897
return zpci_bus_scan_device(zdev);
898
}
899
900
/**
901
* zpci_deconfigure_device() - Deconfigure a zpci_dev
902
* @zdev: The zpci_dev to configure
903
*
904
* Deconfigure a zPCI function that is currently configured and possibly known
905
* to the common code PCI subsystem.
906
* If any failure occurs the device is left as is.
907
*
908
* Return: 0 on success, or an error code otherwise
909
*/
910
int zpci_deconfigure_device(struct zpci_dev *zdev)
911
{
912
int rc;
913
914
lockdep_assert_held(&zdev->state_lock);
915
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
916
return 0;
917
918
if (zdev->zbus->bus)
919
zpci_bus_remove_device(zdev, false);
920
921
if (zdev_enabled(zdev)) {
922
rc = zpci_disable_device(zdev);
923
if (rc)
924
return rc;
925
}
926
927
rc = sclp_pci_deconfigure(zdev->fid);
928
zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
929
if (rc)
930
return rc;
931
zdev->state = ZPCI_FN_STATE_STANDBY;
932
933
return 0;
934
}
935
936
/**
937
* zpci_device_reserved() - Mark device as reserved
938
* @zdev: the zpci_dev that was reserved
939
*
940
* Handle the case that a given zPCI function was reserved by another system.
941
*/
942
void zpci_device_reserved(struct zpci_dev *zdev)
943
{
944
lockdep_assert_held(&zdev->state_lock);
945
/* We may declare the device reserved multiple times */
946
if (zdev->state == ZPCI_FN_STATE_RESERVED)
947
return;
948
zdev->state = ZPCI_FN_STATE_RESERVED;
949
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
950
/*
951
* The underlying device is gone. Allow the zdev to be freed
952
* as soon as all other references are gone by accounting for
953
* the removal as a dropped reference.
954
*/
955
zpci_zdev_put(zdev);
956
}
957
958
void zpci_release_device(struct kref *kref)
959
{
960
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
961
962
lockdep_assert_held(&zpci_add_remove_lock);
963
WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED);
964
/*
965
* We already hold zpci_list_lock thanks to kref_put_lock().
966
* This makes sure no new reference can be taken from the list.
967
*/
968
list_del(&zdev->entry);
969
spin_unlock(&zpci_list_lock);
970
971
if (zdev->has_hp_slot)
972
zpci_exit_slot(zdev);
973
974
if (zdev->has_resources)
975
zpci_cleanup_bus_resources(zdev);
976
977
zpci_bus_device_unregister(zdev);
978
zpci_destroy_iommu(zdev);
979
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
980
kfree_rcu(zdev, rcu);
981
}
982
983
int zpci_report_error(struct pci_dev *pdev,
984
struct zpci_report_error_header *report)
985
{
986
struct zpci_dev *zdev = to_zpci(pdev);
987
988
return sclp_pci_report(report, zdev->fh, zdev->fid);
989
}
990
EXPORT_SYMBOL(zpci_report_error);
991
992
/**
993
* zpci_clear_error_state() - Clears the zPCI error state of the device
994
* @zdev: The zdev for which the zPCI error state should be reset
995
*
996
* Clear the zPCI error state of the device. If clearing the zPCI error state
997
* fails the device is left in the error state. In this case it may make sense
998
* to call zpci_io_perm_failure() on the associated pdev if it exists.
999
*
1000
* Returns: 0 on success, -EIO otherwise
1001
*/
1002
int zpci_clear_error_state(struct zpci_dev *zdev)
1003
{
1004
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
1005
struct zpci_fib fib = {0};
1006
u8 status;
1007
int cc;
1008
1009
cc = zpci_mod_fc(req, &fib, &status);
1010
if (cc) {
1011
zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1012
return -EIO;
1013
}
1014
1015
return 0;
1016
}
1017
1018
/**
1019
* zpci_reset_load_store_blocked() - Re-enables L/S from error state
1020
* @zdev: The zdev for which to unblock load/store access
1021
*
1022
* Re-enables load/store access for a PCI function in the error state while
1023
* keeping DMA blocked. In this state drivers can poke MMIO space to determine
1024
* if error recovery is possible while catching any rogue DMA access from the
1025
* device.
1026
*
1027
* Returns: 0 on success, -EIO otherwise
1028
*/
1029
int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
1030
{
1031
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1032
struct zpci_fib fib = {0};
1033
u8 status;
1034
int cc;
1035
1036
cc = zpci_mod_fc(req, &fib, &status);
1037
if (cc) {
1038
zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1039
return -EIO;
1040
}
1041
1042
return 0;
1043
}
1044
1045
static int zpci_mem_init(void)
1046
{
1047
BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1048
__alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1049
1050
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1051
__alignof__(struct zpci_fmb), 0, NULL);
1052
if (!zdev_fmb_cache)
1053
goto error_fmb;
1054
1055
zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1056
sizeof(*zpci_iomap_start), GFP_KERNEL);
1057
if (!zpci_iomap_start)
1058
goto error_iomap;
1059
1060
zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1061
sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1062
if (!zpci_iomap_bitmap)
1063
goto error_iomap_bitmap;
1064
1065
if (static_branch_likely(&have_mio))
1066
clp_setup_writeback_mio();
1067
1068
return 0;
1069
error_iomap_bitmap:
1070
kfree(zpci_iomap_start);
1071
error_iomap:
1072
kmem_cache_destroy(zdev_fmb_cache);
1073
error_fmb:
1074
return -ENOMEM;
1075
}
1076
1077
static void zpci_mem_exit(void)
1078
{
1079
kfree(zpci_iomap_bitmap);
1080
kfree(zpci_iomap_start);
1081
kmem_cache_destroy(zdev_fmb_cache);
1082
}
1083
1084
static unsigned int s390_pci_probe __initdata = 1;
1085
unsigned int s390_pci_force_floating __initdata;
1086
static unsigned int s390_pci_initialized;
1087
1088
char * __init pcibios_setup(char *str)
1089
{
1090
if (!strcmp(str, "off")) {
1091
s390_pci_probe = 0;
1092
return NULL;
1093
}
1094
if (!strcmp(str, "nomio")) {
1095
clear_machine_feature(MFEATURE_PCI_MIO);
1096
return NULL;
1097
}
1098
if (!strcmp(str, "force_floating")) {
1099
s390_pci_force_floating = 1;
1100
return NULL;
1101
}
1102
if (!strcmp(str, "norid")) {
1103
s390_pci_no_rid = 1;
1104
return NULL;
1105
}
1106
return str;
1107
}
1108
1109
bool zpci_is_enabled(void)
1110
{
1111
return s390_pci_initialized;
1112
}
1113
1114
static int zpci_cmp_rid(void *priv, const struct list_head *a,
1115
const struct list_head *b)
1116
{
1117
struct zpci_dev *za = container_of(a, struct zpci_dev, entry);
1118
struct zpci_dev *zb = container_of(b, struct zpci_dev, entry);
1119
1120
/*
1121
* PCI functions without RID available maintain original order
1122
* between themselves but sort before those with RID.
1123
*/
1124
if (za->rid == zb->rid)
1125
return za->rid_available > zb->rid_available;
1126
/*
1127
* PCI functions with RID sort by RID ascending.
1128
*/
1129
return za->rid > zb->rid;
1130
}
1131
1132
static void zpci_add_devices(struct list_head *scan_list)
1133
{
1134
struct zpci_dev *zdev, *tmp;
1135
1136
list_sort(NULL, scan_list, &zpci_cmp_rid);
1137
list_for_each_entry_safe(zdev, tmp, scan_list, entry) {
1138
list_del_init(&zdev->entry);
1139
if (zpci_add_device(zdev))
1140
kfree(zdev);
1141
}
1142
}
1143
1144
int zpci_scan_devices(void)
1145
{
1146
LIST_HEAD(scan_list);
1147
int rc;
1148
1149
rc = clp_scan_pci_devices(&scan_list);
1150
if (rc)
1151
return rc;
1152
1153
zpci_add_devices(&scan_list);
1154
zpci_bus_scan_busses();
1155
return 0;
1156
}
1157
1158
static int __init pci_base_init(void)
1159
{
1160
int rc;
1161
1162
if (!s390_pci_probe)
1163
return 0;
1164
1165
if (!test_facility(69) || !test_facility(71)) {
1166
pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1167
return 0;
1168
}
1169
1170
if (test_machine_feature(MFEATURE_PCI_MIO)) {
1171
static_branch_enable(&have_mio);
1172
system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
1173
}
1174
1175
rc = zpci_debug_init();
1176
if (rc)
1177
goto out;
1178
1179
rc = zpci_mem_init();
1180
if (rc)
1181
goto out_mem;
1182
1183
rc = zpci_irq_init();
1184
if (rc)
1185
goto out_irq;
1186
1187
rc = zpci_scan_devices();
1188
if (rc)
1189
goto out_find;
1190
1191
s390_pci_initialized = 1;
1192
return 0;
1193
1194
out_find:
1195
zpci_irq_exit();
1196
out_irq:
1197
zpci_mem_exit();
1198
out_mem:
1199
zpci_debug_exit();
1200
out:
1201
return rc;
1202
}
1203
subsys_initcall_sync(pci_base_init);
1204
1205