Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/pci/pci_clp.c
53363 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright IBM Corp. 2012
4
*
5
* Author(s):
6
* Jan Glauber <[email protected]>
7
*/
8
9
#define pr_fmt(fmt) "zpci: " fmt
10
11
#include <linux/kernel.h>
12
#include <linux/miscdevice.h>
13
#include <linux/slab.h>
14
#include <linux/err.h>
15
#include <linux/delay.h>
16
#include <linux/pci.h>
17
#include <linux/uaccess.h>
18
#include <asm/asm-extable.h>
19
#include <asm/pci_debug.h>
20
#include <asm/pci_clp.h>
21
#include <asm/asm.h>
22
#include <asm/clp.h>
23
#include <uapi/asm/clp.h>
24
25
#include "pci_bus.h"
26
27
bool zpci_unique_uid;
28
29
void update_uid_checking(bool new)
30
{
31
if (zpci_unique_uid != new)
32
zpci_dbg(3, "uid checking:%d\n", new);
33
34
zpci_unique_uid = new;
35
}
36
37
static inline void zpci_err_clp(unsigned int rsp, int rc)
38
{
39
struct {
40
unsigned int rsp;
41
int rc;
42
} __packed data = {rsp, rc};
43
44
zpci_err_hex(&data, sizeof(data));
45
}
46
47
/*
48
* Call Logical Processor with c=1, lps=0 and command 1
49
* to get the bit mask of installed logical processors
50
*/
51
static inline int clp_get_ilp(unsigned long *ilp)
52
{
53
unsigned long mask;
54
int cc, exception;
55
56
exception = 1;
57
asm_inline volatile (
58
" .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
59
"0: lhi %[exc],0\n"
60
"1:\n"
61
CC_IPM(cc)
62
EX_TABLE(0b, 1b)
63
: CC_OUT(cc, cc), [mask] "=d" (mask), [exc] "+d" (exception)
64
: [cmd] "a" (1)
65
: CC_CLOBBER);
66
*ilp = mask;
67
return exception ? 3 : CC_TRANSFORM(cc);
68
}
69
70
/*
71
* Call Logical Processor with c=0, the give constant lps and an lpcb request.
72
*/
73
static __always_inline int clp_req(void *data, unsigned int lps)
74
{
75
struct { u8 _[CLP_BLK_SIZE]; } *req = data;
76
int cc, exception;
77
u64 ignored;
78
79
exception = 1;
80
asm_inline volatile (
81
" .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
82
"0: lhi %[exc],0\n"
83
"1:\n"
84
CC_IPM(cc)
85
EX_TABLE(0b, 1b)
86
: CC_OUT(cc, cc), [ign] "=d" (ignored), "+m" (*req), [exc] "+d" (exception)
87
: [req] "a" (req), [lps] "i" (lps)
88
: CC_CLOBBER);
89
return exception ? 3 : CC_TRANSFORM(cc);
90
}
91
92
static void *clp_alloc_block(gfp_t gfp_mask)
93
{
94
return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
95
}
96
97
static void clp_free_block(void *ptr)
98
{
99
free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
100
}
101
102
static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
103
struct clp_rsp_query_pci_grp *response)
104
{
105
zdev->tlb_refresh = response->refresh;
106
zdev->dma_mask = response->dasm;
107
zdev->msi_addr = response->msia;
108
zdev->max_msi = response->noi;
109
zdev->fmb_update = response->mui;
110
zdev->version = response->version;
111
zdev->maxstbl = response->maxstbl;
112
zdev->dtsm = response->dtsm;
113
zdev->rtr_avail = response->rtr;
114
115
switch (response->version) {
116
case 1:
117
zdev->max_bus_speed = PCIE_SPEED_5_0GT;
118
break;
119
default:
120
zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
121
break;
122
}
123
}
124
125
static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
126
{
127
struct clp_req_rsp_query_pci_grp *rrb;
128
int rc;
129
130
rrb = clp_alloc_block(GFP_KERNEL);
131
if (!rrb)
132
return -ENOMEM;
133
134
memset(rrb, 0, sizeof(*rrb));
135
rrb->request.hdr.len = sizeof(rrb->request);
136
rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
137
rrb->response.hdr.len = sizeof(rrb->response);
138
rrb->request.pfgid = pfgid;
139
140
rc = clp_req(rrb, CLP_LPS_PCI);
141
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
142
clp_store_query_pci_fngrp(zdev, &rrb->response);
143
else {
144
zpci_err("Q PCI FGRP:\n");
145
zpci_err_clp(rrb->response.hdr.rsp, rc);
146
rc = -EIO;
147
}
148
clp_free_block(rrb);
149
return rc;
150
}
151
152
static int clp_store_query_pci_fn(struct zpci_dev *zdev,
153
struct clp_rsp_query_pci *response)
154
{
155
int i;
156
157
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
158
zdev->bars[i].val = le32_to_cpu(response->bar[i]);
159
zdev->bars[i].size = response->bar_size[i];
160
}
161
zdev->start_dma = response->sdma;
162
zdev->end_dma = response->edma;
163
zdev->pchid = response->pchid;
164
zdev->pfgid = response->pfgid;
165
zdev->pft = response->pft;
166
zdev->vfn = response->vfn;
167
zdev->port = response->port;
168
zdev->fidparm = response->fidparm;
169
zdev->uid = response->uid;
170
zdev->fmb_length = sizeof(u32) * response->fmb_len;
171
zdev->is_physfn = response->is_physfn;
172
zdev->rid_available = response->rid_avail;
173
if (zdev->rid_available)
174
zdev->rid = response->rid;
175
zdev->tid_avail = response->tid_avail;
176
if (zdev->tid_avail)
177
zdev->tid = response->tid;
178
179
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
180
if (response->util_str_avail) {
181
memcpy(zdev->util_str, response->util_str,
182
sizeof(zdev->util_str));
183
zdev->util_str_avail = 1;
184
}
185
zdev->mio_capable = response->mio_addr_avail;
186
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
187
if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
188
continue;
189
190
zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
191
zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
192
}
193
return 0;
194
}
195
196
int clp_query_pci_fn(struct zpci_dev *zdev)
197
{
198
struct clp_req_rsp_query_pci *rrb;
199
int rc;
200
201
rrb = clp_alloc_block(GFP_KERNEL);
202
if (!rrb)
203
return -ENOMEM;
204
205
memset(rrb, 0, sizeof(*rrb));
206
rrb->request.hdr.len = sizeof(rrb->request);
207
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
208
rrb->response.hdr.len = sizeof(rrb->response);
209
rrb->request.fh = zdev->fh;
210
211
rc = clp_req(rrb, CLP_LPS_PCI);
212
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
213
rc = clp_store_query_pci_fn(zdev, &rrb->response);
214
if (rc)
215
goto out;
216
rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
217
} else {
218
zpci_err("Q PCI FN:\n");
219
zpci_err_clp(rrb->response.hdr.rsp, rc);
220
rc = -EIO;
221
}
222
out:
223
clp_free_block(rrb);
224
return rc;
225
}
226
227
/**
228
* clp_set_pci_fn() - Execute a command on a PCI function
229
* @zdev: Function that will be affected
230
* @fh: Out parameter for updated function handle
231
* @nr_dma_as: DMA address space number
232
* @command: The command code to execute
233
*
234
* Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
235
* > 0 for non-success platform responses
236
*/
237
static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
238
{
239
struct clp_req_rsp_set_pci *rrb;
240
int rc, retries = 100;
241
u32 gisa = 0;
242
243
*fh = 0;
244
rrb = clp_alloc_block(GFP_KERNEL);
245
if (!rrb)
246
return -ENOMEM;
247
248
if (command != CLP_SET_DISABLE_PCI_FN)
249
gisa = zdev->gisa;
250
251
do {
252
memset(rrb, 0, sizeof(*rrb));
253
rrb->request.hdr.len = sizeof(rrb->request);
254
rrb->request.hdr.cmd = CLP_SET_PCI_FN;
255
rrb->response.hdr.len = sizeof(rrb->response);
256
rrb->request.fh = zdev->fh;
257
rrb->request.oc = command;
258
rrb->request.ndas = nr_dma_as;
259
rrb->request.gisa = gisa;
260
261
rc = clp_req(rrb, CLP_LPS_PCI);
262
if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
263
retries--;
264
if (retries < 0)
265
break;
266
msleep(20);
267
}
268
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
269
270
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
271
*fh = rrb->response.fh;
272
} else {
273
zpci_err("Set PCI FN:\n");
274
zpci_err_clp(rrb->response.hdr.rsp, rc);
275
if (!rc)
276
rc = rrb->response.hdr.rsp;
277
}
278
clp_free_block(rrb);
279
return rc;
280
}
281
282
int clp_setup_writeback_mio(void)
283
{
284
struct clp_req_rsp_slpc_pci *rrb;
285
u8 wb_bit_pos;
286
int rc;
287
288
rrb = clp_alloc_block(GFP_KERNEL);
289
if (!rrb)
290
return -ENOMEM;
291
292
memset(rrb, 0, sizeof(*rrb));
293
rrb->request.hdr.len = sizeof(rrb->request);
294
rrb->request.hdr.cmd = CLP_SLPC;
295
rrb->response.hdr.len = sizeof(rrb->response);
296
297
rc = clp_req(rrb, CLP_LPS_PCI);
298
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
299
if (rrb->response.vwb) {
300
wb_bit_pos = rrb->response.mio_wb;
301
set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
302
zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
303
} else {
304
zpci_dbg(3, "wb bit: n.a.\n");
305
}
306
307
} else {
308
zpci_err("SLPC PCI:\n");
309
zpci_err_clp(rrb->response.hdr.rsp, rc);
310
rc = -EIO;
311
}
312
clp_free_block(rrb);
313
return rc;
314
}
315
316
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
317
{
318
int rc;
319
320
rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
321
zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
322
if (!rc && zpci_use_mio(zdev)) {
323
rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
324
zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
325
zdev->fid, *fh, rc);
326
if (rc)
327
clp_disable_fh(zdev, fh);
328
}
329
return rc;
330
}
331
332
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
333
{
334
int rc;
335
336
if (!zdev_enabled(zdev))
337
return 0;
338
339
rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
340
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
341
return rc;
342
}
343
344
static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
345
u64 *resume_token, int *nentries)
346
{
347
int rc;
348
349
memset(rrb, 0, sizeof(*rrb));
350
rrb->request.hdr.len = sizeof(rrb->request);
351
rrb->request.hdr.cmd = CLP_LIST_PCI;
352
/* store as many entries as possible */
353
rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
354
rrb->request.resume_token = *resume_token;
355
356
/* Get PCI function handle list */
357
rc = clp_req(rrb, CLP_LPS_PCI);
358
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
359
zpci_err("List PCI FN:\n");
360
zpci_err_clp(rrb->response.hdr.rsp, rc);
361
return -EIO;
362
}
363
364
update_uid_checking(rrb->response.uid_checking);
365
WARN_ON_ONCE(rrb->response.entry_size !=
366
sizeof(struct clp_fh_list_entry));
367
368
*nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
369
rrb->response.entry_size;
370
*resume_token = rrb->response.resume_token;
371
372
return rc;
373
}
374
375
static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
376
void (*cb)(struct clp_fh_list_entry *, void *))
377
{
378
u64 resume_token = 0;
379
int nentries, i, rc;
380
381
do {
382
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
383
if (rc)
384
return rc;
385
for (i = 0; i < nentries; i++)
386
cb(&rrb->response.fh_list[i], data);
387
} while (resume_token);
388
389
return rc;
390
}
391
392
static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
393
struct clp_fh_list_entry *entry)
394
{
395
struct clp_fh_list_entry *fh_list;
396
u64 resume_token = 0;
397
int nentries, i, rc;
398
399
do {
400
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
401
if (rc)
402
return rc;
403
fh_list = rrb->response.fh_list;
404
for (i = 0; i < nentries; i++) {
405
if (fh_list[i].fid == fid) {
406
*entry = fh_list[i];
407
return 0;
408
}
409
}
410
} while (resume_token);
411
412
return -ENODEV;
413
}
414
415
static void __clp_add(struct clp_fh_list_entry *entry, void *data)
416
{
417
struct list_head *scan_list = data;
418
struct zpci_dev *zdev;
419
420
if (!entry->vendor_id)
421
return;
422
423
zdev = get_zdev_by_fid(entry->fid);
424
if (zdev) {
425
zpci_zdev_put(zdev);
426
return;
427
}
428
zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
429
if (IS_ERR(zdev))
430
return;
431
list_add_tail(&zdev->entry, scan_list);
432
}
433
434
int clp_scan_pci_devices(struct list_head *scan_list)
435
{
436
struct clp_req_rsp_list_pci *rrb;
437
int rc;
438
439
rrb = clp_alloc_block(GFP_KERNEL);
440
if (!rrb)
441
return -ENOMEM;
442
443
rc = clp_list_pci(rrb, scan_list, __clp_add);
444
445
clp_free_block(rrb);
446
return rc;
447
}
448
449
/*
450
* Get the current function handle of the function matching @fid
451
*/
452
int clp_refresh_fh(u32 fid, u32 *fh)
453
{
454
struct clp_req_rsp_list_pci *rrb;
455
struct clp_fh_list_entry entry;
456
int rc;
457
458
rrb = clp_alloc_block(GFP_NOWAIT);
459
if (!rrb)
460
return -ENOMEM;
461
462
rc = clp_find_pci(rrb, fid, &entry);
463
if (!rc)
464
*fh = entry.fh;
465
466
clp_free_block(rrb);
467
return rc;
468
}
469
470
int clp_get_state(u32 fid, enum zpci_state *state)
471
{
472
struct clp_req_rsp_list_pci *rrb;
473
struct clp_fh_list_entry entry;
474
int rc;
475
476
rrb = clp_alloc_block(GFP_ATOMIC);
477
if (!rrb)
478
return -ENOMEM;
479
480
rc = clp_find_pci(rrb, fid, &entry);
481
if (!rc) {
482
*state = entry.config_state;
483
} else if (rc == -ENODEV) {
484
*state = ZPCI_FN_STATE_RESERVED;
485
rc = 0;
486
}
487
488
clp_free_block(rrb);
489
return rc;
490
}
491
492
static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
493
{
494
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
495
496
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
497
lpcb->response.hdr.len > limit)
498
return -EINVAL;
499
return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
500
}
501
502
static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
503
{
504
switch (lpcb->cmd) {
505
case 0x0001: /* store logical-processor characteristics */
506
return clp_base_slpc(req, (void *) lpcb);
507
default:
508
return -EINVAL;
509
}
510
}
511
512
static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
513
{
514
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
515
516
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
517
lpcb->response.hdr.len > limit)
518
return -EINVAL;
519
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
520
}
521
522
static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
523
{
524
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
525
526
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
527
lpcb->response.hdr.len > limit)
528
return -EINVAL;
529
if (lpcb->request.reserved2 != 0)
530
return -EINVAL;
531
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
532
}
533
534
static int clp_pci_query(struct clp_req *req,
535
struct clp_req_rsp_query_pci *lpcb)
536
{
537
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
538
539
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
540
lpcb->response.hdr.len > limit)
541
return -EINVAL;
542
if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
543
return -EINVAL;
544
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
545
}
546
547
static int clp_pci_query_grp(struct clp_req *req,
548
struct clp_req_rsp_query_pci_grp *lpcb)
549
{
550
unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
551
552
if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
553
lpcb->response.hdr.len > limit)
554
return -EINVAL;
555
if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
556
lpcb->request.reserved4 != 0)
557
return -EINVAL;
558
return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
559
}
560
561
static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
562
{
563
switch (lpcb->cmd) {
564
case 0x0001: /* store logical-processor characteristics */
565
return clp_pci_slpc(req, (void *) lpcb);
566
case 0x0002: /* list PCI functions */
567
return clp_pci_list(req, (void *) lpcb);
568
case 0x0003: /* query PCI function */
569
return clp_pci_query(req, (void *) lpcb);
570
case 0x0004: /* query PCI function group */
571
return clp_pci_query_grp(req, (void *) lpcb);
572
default:
573
return -EINVAL;
574
}
575
}
576
577
static int clp_normal_command(struct clp_req *req)
578
{
579
struct clp_req_hdr *lpcb;
580
void __user *uptr;
581
int rc;
582
583
rc = -EINVAL;
584
if (req->lps != 0 && req->lps != 2)
585
goto out;
586
587
rc = -ENOMEM;
588
lpcb = clp_alloc_block(GFP_KERNEL);
589
if (!lpcb)
590
goto out;
591
592
rc = -EFAULT;
593
uptr = (void __force __user *)(unsigned long) req->data_p;
594
if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
595
goto out_free;
596
597
rc = -EINVAL;
598
if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
599
goto out_free;
600
601
switch (req->lps) {
602
case 0:
603
rc = clp_base_command(req, lpcb);
604
break;
605
case 2:
606
rc = clp_pci_command(req, lpcb);
607
break;
608
}
609
if (rc)
610
goto out_free;
611
612
rc = -EFAULT;
613
if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
614
goto out_free;
615
616
rc = 0;
617
618
out_free:
619
clp_free_block(lpcb);
620
out:
621
return rc;
622
}
623
624
static int clp_immediate_command(struct clp_req *req)
625
{
626
void __user *uptr;
627
unsigned long ilp;
628
int exists;
629
630
if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
631
return -EINVAL;
632
633
uptr = (void __force __user *)(unsigned long) req->data_p;
634
if (req->cmd == 0) {
635
/* Command code 0: test for a specific processor */
636
exists = test_bit_inv(req->lps, &ilp);
637
return put_user(exists, (int __user *) uptr);
638
}
639
/* Command code 1: return bit mask of installed processors */
640
return put_user(ilp, (unsigned long __user *) uptr);
641
}
642
643
static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
644
unsigned long arg)
645
{
646
struct clp_req req;
647
void __user *argp;
648
649
if (cmd != CLP_SYNC)
650
return -EINVAL;
651
652
argp = (void __user *)arg;
653
if (copy_from_user(&req, argp, sizeof(req)))
654
return -EFAULT;
655
if (req.r != 0)
656
return -EINVAL;
657
return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
658
}
659
660
static int clp_misc_release(struct inode *inode, struct file *filp)
661
{
662
return 0;
663
}
664
665
static const struct file_operations clp_misc_fops = {
666
.owner = THIS_MODULE,
667
.open = nonseekable_open,
668
.release = clp_misc_release,
669
.unlocked_ioctl = clp_misc_ioctl,
670
};
671
672
static struct miscdevice clp_misc_device = {
673
.minor = MISC_DYNAMIC_MINOR,
674
.name = "clp",
675
.fops = &clp_misc_fops,
676
};
677
678
builtin_misc_device(clp_misc_device);
679
680