Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/pci/pci.c
51681 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright IBM Corp. 2012
4
*
5
* Author(s):
6
* Jan Glauber <[email protected]>
7
*
8
* The System z PCI code is a rewrite from a prototype by
9
* the following people (Kudoz!):
10
* Alexander Schmidt
11
* Christoph Raisch
12
* Hannes Hering
13
* Hoang-Nam Nguyen
14
* Jan-Bernd Themann
15
* Stefan Roscher
16
* Thomas Klein
17
*/
18
19
#define pr_fmt(fmt) "zpci: " fmt
20
21
#include <linux/kernel.h>
22
#include <linux/slab.h>
23
#include <linux/err.h>
24
#include <linux/export.h>
25
#include <linux/delay.h>
26
#include <linux/seq_file.h>
27
#include <linux/jump_label.h>
28
#include <linux/pci.h>
29
#include <linux/printk.h>
30
#include <linux/lockdep.h>
31
#include <linux/list_sort.h>
32
33
#include <asm/machine.h>
34
#include <asm/isc.h>
35
#include <asm/airq.h>
36
#include <asm/facility.h>
37
#include <asm/pci_insn.h>
38
#include <asm/pci_clp.h>
39
#include <asm/pci_dma.h>
40
41
#include "pci_bus.h"
42
#include "pci_iov.h"
43
44
/* list of all detected zpci devices */
45
static LIST_HEAD(zpci_list);
46
static DEFINE_SPINLOCK(zpci_list_lock);
47
static DEFINE_MUTEX(zpci_add_remove_lock);
48
49
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
50
static DEFINE_SPINLOCK(zpci_domain_lock);
51
52
#define ZPCI_IOMAP_ENTRIES \
53
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
54
ZPCI_IOMAP_MAX_ENTRIES)
55
56
unsigned int s390_pci_no_rid;
57
58
static DEFINE_SPINLOCK(zpci_iomap_lock);
59
static unsigned long *zpci_iomap_bitmap;
60
struct zpci_iomap_entry *zpci_iomap_start;
61
EXPORT_SYMBOL_GPL(zpci_iomap_start);
62
63
DEFINE_STATIC_KEY_FALSE(have_mio);
64
65
static struct kmem_cache *zdev_fmb_cache;
66
67
/* AEN structures that must be preserved over KVM module re-insertion */
68
union zpci_sic_iib *zpci_aipb;
69
EXPORT_SYMBOL_GPL(zpci_aipb);
70
struct airq_iv *zpci_aif_sbv;
71
EXPORT_SYMBOL_GPL(zpci_aif_sbv);
72
73
void zpci_zdev_put(struct zpci_dev *zdev)
74
{
75
if (!zdev)
76
return;
77
mutex_lock(&zpci_add_remove_lock);
78
kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock);
79
mutex_unlock(&zpci_add_remove_lock);
80
}
81
82
struct zpci_dev *get_zdev_by_fid(u32 fid)
83
{
84
struct zpci_dev *tmp, *zdev = NULL;
85
86
spin_lock(&zpci_list_lock);
87
list_for_each_entry(tmp, &zpci_list, entry) {
88
if (tmp->fid == fid) {
89
zdev = tmp;
90
zpci_zdev_get(zdev);
91
break;
92
}
93
}
94
spin_unlock(&zpci_list_lock);
95
return zdev;
96
}
97
98
void zpci_remove_reserved_devices(void)
99
{
100
struct zpci_dev *tmp, *zdev;
101
enum zpci_state state;
102
LIST_HEAD(remove);
103
104
spin_lock(&zpci_list_lock);
105
list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
106
if (zdev->state == ZPCI_FN_STATE_STANDBY &&
107
!clp_get_state(zdev->fid, &state) &&
108
state == ZPCI_FN_STATE_RESERVED)
109
list_move_tail(&zdev->entry, &remove);
110
}
111
spin_unlock(&zpci_list_lock);
112
113
list_for_each_entry_safe(zdev, tmp, &remove, entry)
114
zpci_device_reserved(zdev);
115
}
116
117
int pci_domain_nr(struct pci_bus *bus)
118
{
119
return ((struct zpci_bus *) bus->sysdata)->domain_nr;
120
}
121
EXPORT_SYMBOL_GPL(pci_domain_nr);
122
123
int pci_proc_domain(struct pci_bus *bus)
124
{
125
return pci_domain_nr(bus);
126
}
127
EXPORT_SYMBOL_GPL(pci_proc_domain);
128
129
/* Modify PCI: Register I/O address translation parameters */
130
int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
131
u64 base, u64 limit, u64 iota, u8 *status)
132
{
133
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
134
struct zpci_fib fib = {0};
135
u8 cc;
136
137
fib.pba = base;
138
/* Work around off by one in ISM virt device */
139
if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
140
fib.pal = limit + (1 << 12);
141
else
142
fib.pal = limit;
143
fib.iota = iota;
144
fib.gd = zdev->gisa;
145
cc = zpci_mod_fc(req, &fib, status);
146
if (cc)
147
zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
148
return cc;
149
}
150
EXPORT_SYMBOL_GPL(zpci_register_ioat);
151
152
/* Modify PCI: Unregister I/O address translation parameters */
153
int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
154
{
155
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
156
struct zpci_fib fib = {0};
157
u8 cc, status;
158
159
fib.gd = zdev->gisa;
160
161
cc = zpci_mod_fc(req, &fib, &status);
162
if (cc)
163
zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
164
return cc;
165
}
166
167
/* Modify PCI: Set PCI function measurement parameters */
168
int zpci_fmb_enable_device(struct zpci_dev *zdev)
169
{
170
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
171
struct zpci_iommu_ctrs *ctrs;
172
struct zpci_fib fib = {0};
173
unsigned long flags;
174
u8 cc, status;
175
176
if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
177
return -EINVAL;
178
179
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
180
if (!zdev->fmb)
181
return -ENOMEM;
182
WARN_ON((u64) zdev->fmb & 0xf);
183
184
/* reset software counters */
185
spin_lock_irqsave(&zdev->dom_lock, flags);
186
ctrs = zpci_get_iommu_ctrs(zdev);
187
if (ctrs) {
188
atomic64_set(&ctrs->mapped_pages, 0);
189
atomic64_set(&ctrs->unmapped_pages, 0);
190
atomic64_set(&ctrs->global_rpcits, 0);
191
atomic64_set(&ctrs->sync_map_rpcits, 0);
192
atomic64_set(&ctrs->sync_rpcits, 0);
193
}
194
spin_unlock_irqrestore(&zdev->dom_lock, flags);
195
196
197
fib.fmb_addr = virt_to_phys(zdev->fmb);
198
fib.gd = zdev->gisa;
199
cc = zpci_mod_fc(req, &fib, &status);
200
if (cc) {
201
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
202
zdev->fmb = NULL;
203
}
204
return cc ? -EIO : 0;
205
}
206
207
/* Modify PCI: Disable PCI function measurement */
208
int zpci_fmb_disable_device(struct zpci_dev *zdev)
209
{
210
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
211
struct zpci_fib fib = {0};
212
u8 cc, status;
213
214
if (!zdev->fmb)
215
return -EINVAL;
216
217
fib.gd = zdev->gisa;
218
219
/* Function measurement is disabled if fmb address is zero */
220
cc = zpci_mod_fc(req, &fib, &status);
221
if (cc == 3) /* Function already gone. */
222
cc = 0;
223
224
if (!cc) {
225
kmem_cache_free(zdev_fmb_cache, zdev->fmb);
226
zdev->fmb = NULL;
227
}
228
return cc ? -EIO : 0;
229
}
230
231
static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
232
{
233
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
234
int rc = -ENODEV;
235
u64 data;
236
237
if (!zdev_enabled(zdev))
238
goto out_err;
239
240
rc = __zpci_load(&data, req, offset);
241
if (rc)
242
goto out_err;
243
data = le64_to_cpu((__force __le64)data);
244
data >>= (8 - len) * 8;
245
*val = (u32)data;
246
return 0;
247
248
out_err:
249
PCI_SET_ERROR_RESPONSE(val);
250
return rc;
251
}
252
253
static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
254
{
255
u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
256
int rc = -ENODEV;
257
u64 data = val;
258
259
if (!zdev_enabled(zdev))
260
return rc;
261
262
data <<= (8 - len) * 8;
263
data = (__force u64) cpu_to_le64(data);
264
rc = __zpci_store(data, req, offset);
265
return rc;
266
}
267
268
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
269
resource_size_t size,
270
resource_size_t align)
271
{
272
return 0;
273
}
274
275
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
276
pgprot_t prot)
277
{
278
/*
279
* When PCI MIO instructions are unavailable the "physical" address
280
* encodes a hint for accessing the PCI memory space it represents.
281
* Just pass it unchanged such that ioread/iowrite can decode it.
282
*/
283
if (!static_branch_unlikely(&have_mio))
284
return (void __iomem *)phys_addr;
285
286
return generic_ioremap_prot(phys_addr, size, prot);
287
}
288
EXPORT_SYMBOL(ioremap_prot);
289
290
void iounmap(volatile void __iomem *addr)
291
{
292
if (static_branch_likely(&have_mio))
293
generic_iounmap(addr);
294
}
295
EXPORT_SYMBOL(iounmap);
296
297
/* Create a virtual mapping cookie for a PCI BAR */
298
static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
299
unsigned long offset, unsigned long max)
300
{
301
struct zpci_dev *zdev = to_zpci(pdev);
302
int idx;
303
304
idx = zdev->bars[bar].map_idx;
305
spin_lock(&zpci_iomap_lock);
306
/* Detect overrun */
307
WARN_ON(!++zpci_iomap_start[idx].count);
308
zpci_iomap_start[idx].fh = zdev->fh;
309
zpci_iomap_start[idx].bar = bar;
310
spin_unlock(&zpci_iomap_lock);
311
312
return (void __iomem *) ZPCI_ADDR(idx) + offset;
313
}
314
315
static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
316
unsigned long offset,
317
unsigned long max)
318
{
319
unsigned long barsize = pci_resource_len(pdev, bar);
320
struct zpci_dev *zdev = to_zpci(pdev);
321
void __iomem *iova;
322
323
iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
324
return iova ? iova + offset : iova;
325
}
326
327
void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
328
unsigned long offset, unsigned long max)
329
{
330
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
331
return NULL;
332
333
if (static_branch_likely(&have_mio))
334
return pci_iomap_range_mio(pdev, bar, offset, max);
335
else
336
return pci_iomap_range_fh(pdev, bar, offset, max);
337
}
338
EXPORT_SYMBOL(pci_iomap_range);
339
340
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
341
{
342
return pci_iomap_range(dev, bar, 0, maxlen);
343
}
344
EXPORT_SYMBOL(pci_iomap);
345
346
static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
347
unsigned long offset, unsigned long max)
348
{
349
unsigned long barsize = pci_resource_len(pdev, bar);
350
struct zpci_dev *zdev = to_zpci(pdev);
351
void __iomem *iova;
352
353
iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
354
return iova ? iova + offset : iova;
355
}
356
357
void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
358
unsigned long offset, unsigned long max)
359
{
360
if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
361
return NULL;
362
363
if (static_branch_likely(&have_mio))
364
return pci_iomap_wc_range_mio(pdev, bar, offset, max);
365
else
366
return pci_iomap_range_fh(pdev, bar, offset, max);
367
}
368
EXPORT_SYMBOL(pci_iomap_wc_range);
369
370
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
371
{
372
return pci_iomap_wc_range(dev, bar, 0, maxlen);
373
}
374
EXPORT_SYMBOL(pci_iomap_wc);
375
376
static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
377
{
378
unsigned int idx = ZPCI_IDX(addr);
379
380
spin_lock(&zpci_iomap_lock);
381
/* Detect underrun */
382
WARN_ON(!zpci_iomap_start[idx].count);
383
if (!--zpci_iomap_start[idx].count) {
384
zpci_iomap_start[idx].fh = 0;
385
zpci_iomap_start[idx].bar = 0;
386
}
387
spin_unlock(&zpci_iomap_lock);
388
}
389
390
static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
391
{
392
iounmap(addr);
393
}
394
395
void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
396
{
397
if (static_branch_likely(&have_mio))
398
pci_iounmap_mio(pdev, addr);
399
else
400
pci_iounmap_fh(pdev, addr);
401
}
402
EXPORT_SYMBOL(pci_iounmap);
403
404
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
405
int size, u32 *val)
406
{
407
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
408
409
if (!zdev || zpci_cfg_load(zdev, where, val, size))
410
return PCIBIOS_DEVICE_NOT_FOUND;
411
return PCIBIOS_SUCCESSFUL;
412
}
413
414
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
415
int size, u32 val)
416
{
417
struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
418
419
if (!zdev || zpci_cfg_store(zdev, where, val, size))
420
return PCIBIOS_DEVICE_NOT_FOUND;
421
return PCIBIOS_SUCCESSFUL;
422
}
423
424
static struct pci_ops pci_root_ops = {
425
.read = pci_read,
426
.write = pci_write,
427
};
428
429
static void zpci_map_resources(struct pci_dev *pdev)
430
{
431
struct zpci_dev *zdev = to_zpci(pdev);
432
resource_size_t len;
433
int i;
434
435
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
436
len = pci_resource_len(pdev, i);
437
if (!len)
438
continue;
439
440
if (zpci_use_mio(zdev))
441
pdev->resource[i].start =
442
(resource_size_t __force) zdev->bars[i].mio_wt;
443
else
444
pdev->resource[i].start = (resource_size_t __force)
445
pci_iomap_range_fh(pdev, i, 0, 0);
446
pdev->resource[i].end = pdev->resource[i].start + len - 1;
447
}
448
449
zpci_iov_map_resources(pdev);
450
}
451
452
static void zpci_unmap_resources(struct pci_dev *pdev)
453
{
454
struct zpci_dev *zdev = to_zpci(pdev);
455
resource_size_t len;
456
int i;
457
458
if (zpci_use_mio(zdev))
459
return;
460
461
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
462
len = pci_resource_len(pdev, i);
463
if (!len)
464
continue;
465
pci_iounmap_fh(pdev, (void __iomem __force *)
466
pdev->resource[i].start);
467
}
468
}
469
470
static int zpci_alloc_iomap(struct zpci_dev *zdev)
471
{
472
unsigned long entry;
473
474
spin_lock(&zpci_iomap_lock);
475
entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
476
if (entry == ZPCI_IOMAP_ENTRIES) {
477
spin_unlock(&zpci_iomap_lock);
478
return -ENOSPC;
479
}
480
set_bit(entry, zpci_iomap_bitmap);
481
spin_unlock(&zpci_iomap_lock);
482
return entry;
483
}
484
485
static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
486
{
487
spin_lock(&zpci_iomap_lock);
488
memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
489
clear_bit(entry, zpci_iomap_bitmap);
490
spin_unlock(&zpci_iomap_lock);
491
}
492
493
static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
494
{
495
int bar, idx;
496
497
spin_lock(&zpci_iomap_lock);
498
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
499
if (!zdev->bars[bar].size)
500
continue;
501
idx = zdev->bars[bar].map_idx;
502
if (!zpci_iomap_start[idx].count)
503
continue;
504
WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
505
}
506
spin_unlock(&zpci_iomap_lock);
507
}
508
509
void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
510
{
511
if (!fh || zdev->fh == fh)
512
return;
513
514
zdev->fh = fh;
515
if (zpci_use_mio(zdev))
516
return;
517
if (zdev->has_resources && zdev_enabled(zdev))
518
zpci_do_update_iomap_fh(zdev, fh);
519
}
520
521
static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
522
unsigned long size, unsigned long flags)
523
{
524
struct resource *r;
525
526
r = kzalloc(sizeof(*r), GFP_KERNEL);
527
if (!r)
528
return NULL;
529
530
r->start = start;
531
r->end = r->start + size - 1;
532
r->flags = flags;
533
r->name = zdev->res_name;
534
535
if (request_resource(&iomem_resource, r)) {
536
kfree(r);
537
return NULL;
538
}
539
return r;
540
}
541
542
int zpci_setup_bus_resources(struct zpci_dev *zdev)
543
{
544
unsigned long addr, size, flags;
545
struct resource *res;
546
int i, entry;
547
548
snprintf(zdev->res_name, sizeof(zdev->res_name),
549
"PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
550
551
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
552
if (!zdev->bars[i].size)
553
continue;
554
entry = zpci_alloc_iomap(zdev);
555
if (entry < 0)
556
return entry;
557
zdev->bars[i].map_idx = entry;
558
559
/* only MMIO is supported */
560
flags = IORESOURCE_MEM;
561
if (zdev->bars[i].val & 8)
562
flags |= IORESOURCE_PREFETCH;
563
if (zdev->bars[i].val & 4)
564
flags |= IORESOURCE_MEM_64;
565
566
if (zpci_use_mio(zdev))
567
addr = (unsigned long) zdev->bars[i].mio_wt;
568
else
569
addr = ZPCI_ADDR(entry);
570
size = 1UL << zdev->bars[i].size;
571
572
res = __alloc_res(zdev, addr, size, flags);
573
if (!res) {
574
zpci_free_iomap(zdev, entry);
575
return -ENOMEM;
576
}
577
zdev->bars[i].res = res;
578
}
579
zdev->has_resources = 1;
580
581
return 0;
582
}
583
584
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
585
{
586
struct resource *res;
587
int i;
588
589
pci_lock_rescan_remove();
590
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
591
res = zdev->bars[i].res;
592
if (!res)
593
continue;
594
595
release_resource(res);
596
pci_bus_remove_resource(zdev->zbus->bus, res);
597
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
598
zdev->bars[i].res = NULL;
599
kfree(res);
600
}
601
zdev->has_resources = 0;
602
pci_unlock_rescan_remove();
603
}
604
605
int pcibios_device_add(struct pci_dev *pdev)
606
{
607
struct zpci_dev *zdev = to_zpci(pdev);
608
struct resource *res;
609
int i;
610
611
/* The pdev has a reference to the zdev via its bus */
612
zpci_zdev_get(zdev);
613
if (pdev->is_physfn)
614
pdev->no_vf_scan = 1;
615
616
zpci_map_resources(pdev);
617
618
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
619
res = &pdev->resource[i];
620
if (res->parent || !res->flags)
621
continue;
622
pci_claim_resource(pdev, i);
623
}
624
625
return 0;
626
}
627
628
void pcibios_release_device(struct pci_dev *pdev)
629
{
630
struct zpci_dev *zdev = to_zpci(pdev);
631
632
zpci_unmap_resources(pdev);
633
zpci_zdev_put(zdev);
634
}
635
636
int pcibios_enable_device(struct pci_dev *pdev, int mask)
637
{
638
struct zpci_dev *zdev = to_zpci(pdev);
639
640
zpci_debug_init_device(zdev, dev_name(&pdev->dev));
641
zpci_fmb_enable_device(zdev);
642
643
return pci_enable_resources(pdev, mask);
644
}
645
646
void pcibios_disable_device(struct pci_dev *pdev)
647
{
648
struct zpci_dev *zdev = to_zpci(pdev);
649
650
zpci_fmb_disable_device(zdev);
651
zpci_debug_exit_device(zdev);
652
}
653
654
static int __zpci_register_domain(int domain)
655
{
656
spin_lock(&zpci_domain_lock);
657
if (test_bit(domain, zpci_domain)) {
658
spin_unlock(&zpci_domain_lock);
659
pr_err("Domain %04x is already assigned\n", domain);
660
return -EEXIST;
661
}
662
set_bit(domain, zpci_domain);
663
spin_unlock(&zpci_domain_lock);
664
return domain;
665
}
666
667
static int __zpci_alloc_domain(void)
668
{
669
int domain;
670
671
spin_lock(&zpci_domain_lock);
672
/*
673
* We can always auto allocate domains below ZPCI_NR_DEVICES.
674
* There is either a free domain or we have reached the maximum in
675
* which case we would have bailed earlier.
676
*/
677
domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
678
set_bit(domain, zpci_domain);
679
spin_unlock(&zpci_domain_lock);
680
return domain;
681
}
682
683
int zpci_alloc_domain(int domain)
684
{
685
if (zpci_unique_uid) {
686
if (domain)
687
return __zpci_register_domain(domain);
688
pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
689
update_uid_checking(false);
690
}
691
return __zpci_alloc_domain();
692
}
693
694
void zpci_free_domain(int domain)
695
{
696
spin_lock(&zpci_domain_lock);
697
clear_bit(domain, zpci_domain);
698
spin_unlock(&zpci_domain_lock);
699
}
700
701
702
int zpci_enable_device(struct zpci_dev *zdev)
703
{
704
u32 fh = zdev->fh;
705
int rc = 0;
706
707
if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
708
rc = -EIO;
709
else
710
zpci_update_fh(zdev, fh);
711
return rc;
712
}
713
EXPORT_SYMBOL_GPL(zpci_enable_device);
714
715
int zpci_reenable_device(struct zpci_dev *zdev)
716
{
717
u8 status;
718
int rc;
719
720
rc = zpci_enable_device(zdev);
721
if (rc)
722
return rc;
723
724
if (zdev->msi_nr_irqs > 0) {
725
rc = zpci_set_irq(zdev);
726
if (rc)
727
return rc;
728
}
729
730
rc = zpci_iommu_register_ioat(zdev, &status);
731
if (rc)
732
zpci_disable_device(zdev);
733
734
return rc;
735
}
736
EXPORT_SYMBOL_GPL(zpci_reenable_device);
737
738
int zpci_disable_device(struct zpci_dev *zdev)
739
{
740
u32 fh = zdev->fh;
741
int cc, rc = 0;
742
743
cc = clp_disable_fh(zdev, &fh);
744
if (!cc) {
745
zpci_update_fh(zdev, fh);
746
} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
747
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
748
zdev->fid);
749
/* Function is already disabled - update handle */
750
rc = clp_refresh_fh(zdev->fid, &fh);
751
if (!rc) {
752
zpci_update_fh(zdev, fh);
753
rc = -EINVAL;
754
}
755
} else {
756
rc = -EIO;
757
}
758
return rc;
759
}
760
EXPORT_SYMBOL_GPL(zpci_disable_device);
761
762
/**
763
* zpci_hot_reset_device - perform a reset of the given zPCI function
764
* @zdev: the slot which should be reset
765
*
766
* Performs a low level reset of the zPCI function. The reset is low level in
767
* the sense that the zPCI function can be reset without detaching it from the
768
* common PCI subsystem. The reset may be performed while under control of
769
* either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
770
* table is reinstated at the end of the reset.
771
*
772
* After the reset the functions internal state is reset to an initial state
773
* equivalent to its state during boot when first probing a driver.
774
* Consequently after reset the PCI function requires re-initialization via the
775
* common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
776
* and enabling the function via e.g. pci_enable_device_flags(). The caller
777
* must guard against concurrent reset attempts.
778
*
779
* In most cases this function should not be called directly but through
780
* pci_reset_function() or pci_reset_bus() which handle the save/restore and
781
* locking - asserted by lockdep.
782
*
783
* Return: 0 on success and an error value otherwise
784
*/
785
int zpci_hot_reset_device(struct zpci_dev *zdev)
786
{
787
int rc;
788
789
lockdep_assert_held(&zdev->state_lock);
790
zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
791
if (zdev_enabled(zdev)) {
792
/* Disables device access, DMAs and IRQs (reset state) */
793
rc = zpci_disable_device(zdev);
794
/*
795
* Due to a z/VM vs LPAR inconsistency in the error state the
796
* FH may indicate an enabled device but disable says the
797
* device is already disabled don't treat it as an error here.
798
*/
799
if (rc == -EINVAL)
800
rc = 0;
801
if (rc)
802
return rc;
803
}
804
805
rc = zpci_reenable_device(zdev);
806
807
return rc;
808
}
809
810
/**
811
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
812
* @fid: Function ID of the device to be created
813
* @fh: Current Function Handle of the device to be created
814
* @state: Initial state after creation either Standby or Configured
815
*
816
* Allocates a new struct zpci_dev and queries the platform for its details.
817
* If successful the device can subsequently be added to the zPCI subsystem
818
* using zpci_add_device().
819
*
820
* Returns: the zdev on success or an error pointer otherwise
821
*/
822
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
823
{
824
struct zpci_dev *zdev;
825
int rc;
826
827
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
828
if (!zdev)
829
return ERR_PTR(-ENOMEM);
830
831
/* FID and Function Handle are the static/dynamic identifiers */
832
zdev->fid = fid;
833
zdev->fh = fh;
834
835
/* Query function properties and update zdev */
836
rc = clp_query_pci_fn(zdev);
837
if (rc)
838
goto error;
839
zdev->state = state;
840
841
mutex_init(&zdev->state_lock);
842
mutex_init(&zdev->fmb_lock);
843
mutex_init(&zdev->kzdev_lock);
844
845
return zdev;
846
847
error:
848
zpci_dbg(0, "crt fid:%x, rc:%d\n", fid, rc);
849
kfree(zdev);
850
return ERR_PTR(rc);
851
}
852
853
/**
854
* zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem
855
* @zdev: The zPCI device to be added
856
*
857
* A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating
858
* a new one as necessary. A hotplug slot is created and events start to be handled.
859
* If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used.
860
* If adding the struct zpci_dev fails the device was not added and should be freed.
861
*
862
* Return: 0 on success, or an error code otherwise
863
*/
864
int zpci_add_device(struct zpci_dev *zdev)
865
{
866
int rc;
867
868
mutex_lock(&zpci_add_remove_lock);
869
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
870
rc = zpci_init_iommu(zdev);
871
if (rc)
872
goto error;
873
874
rc = zpci_bus_device_register(zdev, &pci_root_ops);
875
if (rc)
876
goto error_destroy_iommu;
877
878
kref_init(&zdev->kref);
879
spin_lock(&zpci_list_lock);
880
list_add_tail(&zdev->entry, &zpci_list);
881
spin_unlock(&zpci_list_lock);
882
mutex_unlock(&zpci_add_remove_lock);
883
return 0;
884
885
error_destroy_iommu:
886
zpci_destroy_iommu(zdev);
887
error:
888
zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
889
mutex_unlock(&zpci_add_remove_lock);
890
return rc;
891
}
892
893
bool zpci_is_device_configured(struct zpci_dev *zdev)
894
{
895
enum zpci_state state = zdev->state;
896
897
return state != ZPCI_FN_STATE_RESERVED &&
898
state != ZPCI_FN_STATE_STANDBY;
899
}
900
901
/**
902
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
903
* @zdev: The zpci_dev to be configured
904
* @fh: The general function handle supplied by the platform
905
*
906
* Given a device in the configuration state Configured, enables, scans and
907
* adds it to the common code PCI subsystem if possible. If any failure occurs,
908
* the zpci_dev is left disabled.
909
*
910
* Return: 0 on success, or an error code otherwise
911
*/
912
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
913
{
914
zpci_update_fh(zdev, fh);
915
return zpci_bus_scan_device(zdev);
916
}
917
918
/**
919
* zpci_deconfigure_device() - Deconfigure a zpci_dev
920
* @zdev: The zpci_dev to configure
921
*
922
* Deconfigure a zPCI function that is currently configured and possibly known
923
* to the common code PCI subsystem.
924
* If any failure occurs the device is left as is.
925
*
926
* Return: 0 on success, or an error code otherwise
927
*/
928
int zpci_deconfigure_device(struct zpci_dev *zdev)
929
{
930
int rc;
931
932
lockdep_assert_held(&zdev->state_lock);
933
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
934
return 0;
935
936
if (zdev->zbus->bus)
937
zpci_bus_remove_device(zdev, false);
938
939
if (zdev_enabled(zdev)) {
940
rc = zpci_disable_device(zdev);
941
if (rc)
942
return rc;
943
}
944
945
rc = sclp_pci_deconfigure(zdev->fid);
946
zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
947
if (rc)
948
return rc;
949
zdev->state = ZPCI_FN_STATE_STANDBY;
950
951
return 0;
952
}
953
954
/**
955
* zpci_device_reserved() - Mark device as reserved
956
* @zdev: the zpci_dev that was reserved
957
*
958
* Handle the case that a given zPCI function was reserved by another system.
959
*/
960
void zpci_device_reserved(struct zpci_dev *zdev)
961
{
962
lockdep_assert_held(&zdev->state_lock);
963
/* We may declare the device reserved multiple times */
964
if (zdev->state == ZPCI_FN_STATE_RESERVED)
965
return;
966
zdev->state = ZPCI_FN_STATE_RESERVED;
967
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
968
/*
969
* The underlying device is gone. Allow the zdev to be freed
970
* as soon as all other references are gone by accounting for
971
* the removal as a dropped reference.
972
*/
973
zpci_zdev_put(zdev);
974
}
975
976
void zpci_release_device(struct kref *kref)
977
__releases(&zpci_list_lock)
978
{
979
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
980
981
lockdep_assert_held(&zpci_add_remove_lock);
982
WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED);
983
/*
984
* We already hold zpci_list_lock thanks to kref_put_lock().
985
* This makes sure no new reference can be taken from the list.
986
*/
987
list_del(&zdev->entry);
988
spin_unlock(&zpci_list_lock);
989
990
if (zdev->has_hp_slot)
991
zpci_exit_slot(zdev);
992
993
if (zdev->has_resources)
994
zpci_cleanup_bus_resources(zdev);
995
996
zpci_bus_device_unregister(zdev);
997
zpci_destroy_iommu(zdev);
998
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
999
kfree_rcu(zdev, rcu);
1000
}
1001
1002
int zpci_report_error(struct pci_dev *pdev,
1003
struct zpci_report_error_header *report)
1004
{
1005
struct zpci_dev *zdev = to_zpci(pdev);
1006
1007
return sclp_pci_report(report, zdev->fh, zdev->fid);
1008
}
1009
EXPORT_SYMBOL(zpci_report_error);
1010
1011
/**
1012
* zpci_clear_error_state() - Clears the zPCI error state of the device
1013
* @zdev: The zdev for which the zPCI error state should be reset
1014
*
1015
* Clear the zPCI error state of the device. If clearing the zPCI error state
1016
* fails the device is left in the error state. In this case it may make sense
1017
* to call zpci_io_perm_failure() on the associated pdev if it exists.
1018
*
1019
* Returns: 0 on success, -EIO otherwise
1020
*/
1021
int zpci_clear_error_state(struct zpci_dev *zdev)
1022
{
1023
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
1024
struct zpci_fib fib = {0};
1025
u8 status;
1026
int cc;
1027
1028
cc = zpci_mod_fc(req, &fib, &status);
1029
if (cc) {
1030
zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1031
return -EIO;
1032
}
1033
1034
return 0;
1035
}
1036
1037
/**
1038
* zpci_reset_load_store_blocked() - Re-enables L/S from error state
1039
* @zdev: The zdev for which to unblock load/store access
1040
*
1041
* Re-enables load/store access for a PCI function in the error state while
1042
* keeping DMA blocked. In this state drivers can poke MMIO space to determine
1043
* if error recovery is possible while catching any rogue DMA access from the
1044
* device.
1045
*
1046
* Returns: 0 on success, -EIO otherwise
1047
*/
1048
int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
1049
{
1050
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1051
struct zpci_fib fib = {0};
1052
u8 status;
1053
int cc;
1054
1055
cc = zpci_mod_fc(req, &fib, &status);
1056
if (cc) {
1057
zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1058
return -EIO;
1059
}
1060
1061
return 0;
1062
}
1063
1064
static int zpci_mem_init(void)
1065
{
1066
BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1067
__alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1068
BUILD_BUG_ON((CONFIG_ILLEGAL_POINTER_VALUE + 0x10000 > ZPCI_IOMAP_ADDR_BASE) &&
1069
(CONFIG_ILLEGAL_POINTER_VALUE <= ZPCI_IOMAP_ADDR_MAX));
1070
1071
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1072
__alignof__(struct zpci_fmb), 0, NULL);
1073
if (!zdev_fmb_cache)
1074
goto error_fmb;
1075
1076
zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1077
sizeof(*zpci_iomap_start), GFP_KERNEL);
1078
if (!zpci_iomap_start)
1079
goto error_iomap;
1080
1081
zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1082
sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1083
if (!zpci_iomap_bitmap)
1084
goto error_iomap_bitmap;
1085
1086
if (static_branch_likely(&have_mio))
1087
clp_setup_writeback_mio();
1088
1089
return 0;
1090
error_iomap_bitmap:
1091
kfree(zpci_iomap_start);
1092
error_iomap:
1093
kmem_cache_destroy(zdev_fmb_cache);
1094
error_fmb:
1095
return -ENOMEM;
1096
}
1097
1098
static void zpci_mem_exit(void)
1099
{
1100
kfree(zpci_iomap_bitmap);
1101
kfree(zpci_iomap_start);
1102
kmem_cache_destroy(zdev_fmb_cache);
1103
}
1104
1105
static unsigned int s390_pci_probe __initdata = 1;
1106
unsigned int s390_pci_force_floating __initdata;
1107
static unsigned int s390_pci_initialized;
1108
1109
char * __init pcibios_setup(char *str)
1110
{
1111
if (!strcmp(str, "off")) {
1112
s390_pci_probe = 0;
1113
return NULL;
1114
}
1115
if (!strcmp(str, "nomio")) {
1116
clear_machine_feature(MFEATURE_PCI_MIO);
1117
return NULL;
1118
}
1119
if (!strcmp(str, "force_floating")) {
1120
s390_pci_force_floating = 1;
1121
return NULL;
1122
}
1123
if (!strcmp(str, "norid")) {
1124
s390_pci_no_rid = 1;
1125
return NULL;
1126
}
1127
return str;
1128
}
1129
1130
bool zpci_is_enabled(void)
1131
{
1132
return s390_pci_initialized;
1133
}
1134
1135
static int zpci_cmp_rid(void *priv, const struct list_head *a,
1136
const struct list_head *b)
1137
{
1138
struct zpci_dev *za = container_of(a, struct zpci_dev, entry);
1139
struct zpci_dev *zb = container_of(b, struct zpci_dev, entry);
1140
1141
/*
1142
* PCI functions without RID available maintain original order
1143
* between themselves but sort before those with RID.
1144
*/
1145
if (za->rid == zb->rid)
1146
return za->rid_available > zb->rid_available;
1147
/*
1148
* PCI functions with RID sort by RID ascending.
1149
*/
1150
return za->rid > zb->rid;
1151
}
1152
1153
static void zpci_add_devices(struct list_head *scan_list)
1154
{
1155
struct zpci_dev *zdev, *tmp;
1156
1157
list_sort(NULL, scan_list, &zpci_cmp_rid);
1158
list_for_each_entry_safe(zdev, tmp, scan_list, entry) {
1159
list_del_init(&zdev->entry);
1160
if (zpci_add_device(zdev))
1161
kfree(zdev);
1162
}
1163
}
1164
1165
int zpci_scan_devices(void)
1166
{
1167
struct zpci_bus *zbus;
1168
LIST_HEAD(scan_list);
1169
int rc;
1170
1171
rc = clp_scan_pci_devices(&scan_list);
1172
if (rc)
1173
return rc;
1174
1175
zpci_add_devices(&scan_list);
1176
zpci_bus_for_each(zbus) {
1177
zpci_bus_scan_bus(zbus);
1178
cond_resched();
1179
}
1180
return 0;
1181
}
1182
1183
static int __init pci_base_init(void)
1184
{
1185
int rc;
1186
1187
if (!s390_pci_probe)
1188
return 0;
1189
1190
if (!test_facility(69) || !test_facility(71)) {
1191
pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1192
return 0;
1193
}
1194
1195
if (test_machine_feature(MFEATURE_PCI_MIO)) {
1196
static_branch_enable(&have_mio);
1197
system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
1198
}
1199
1200
rc = zpci_debug_init();
1201
if (rc)
1202
goto out;
1203
1204
rc = zpci_mem_init();
1205
if (rc)
1206
goto out_mem;
1207
1208
rc = zpci_irq_init();
1209
if (rc)
1210
goto out_irq;
1211
1212
rc = zpci_scan_devices();
1213
if (rc)
1214
goto out_find;
1215
1216
rc = zpci_fw_sysfs_init();
1217
if (rc)
1218
goto out_find;
1219
1220
s390_pci_initialized = 1;
1221
return 0;
1222
1223
out_find:
1224
zpci_irq_exit();
1225
out_irq:
1226
zpci_mem_exit();
1227
out_mem:
1228
zpci_debug_exit();
1229
out:
1230
return rc;
1231
}
1232
subsys_initcall_sync(pci_base_init);
1233
1234