Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/um/drivers/virtio_pcidev.c
38189 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2020 Intel Corporation
4
* Author: Johannes Berg <[email protected]>
5
*/
6
#include <linux/module.h>
7
#include <linux/pci.h>
8
#include <linux/virtio.h>
9
#include <linux/virtio_config.h>
10
#include <linux/logic_iomem.h>
11
#include <linux/of_platform.h>
12
#include <linux/irqdomain.h>
13
#include <linux/virtio_pcidev.h>
14
#include <linux/virtio-uml.h>
15
#include <linux/delay.h>
16
#include <linux/msi.h>
17
#include <linux/unaligned.h>
18
#include <irq_kern.h>
19
20
#include "virt-pci.h"
21
22
#define to_virtio_pcidev(_pdev) \
23
container_of(_pdev, struct virtio_pcidev_device, pdev)
24
25
/* for MSI-X we have a 32-bit payload */
26
#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
27
#define NUM_IRQ_MSGS 10
28
29
struct virtio_pcidev_message_buffer {
30
struct virtio_pcidev_msg hdr;
31
u8 data[8];
32
};
33
34
struct virtio_pcidev_device {
35
struct um_pci_device pdev;
36
struct virtio_device *vdev;
37
38
struct virtqueue *cmd_vq, *irq_vq;
39
40
#define VIRTIO_PCIDEV_WRITE_BUFS 20
41
struct virtio_pcidev_message_buffer bufs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
42
void *extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
43
DECLARE_BITMAP(used_bufs, VIRTIO_PCIDEV_WRITE_BUFS);
44
45
#define VIRTIO_PCIDEV_STAT_WAITING 0
46
unsigned long status;
47
48
bool platform;
49
};
50
51
static unsigned int virtio_pcidev_max_delay_us = 40000;
52
module_param_named(max_delay_us, virtio_pcidev_max_delay_us, uint, 0644);
53
54
static int virtio_pcidev_get_buf(struct virtio_pcidev_device *dev, bool *posted)
55
{
56
int i;
57
58
for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
59
if (!test_and_set_bit(i, dev->used_bufs))
60
return i;
61
}
62
63
*posted = false;
64
return VIRTIO_PCIDEV_WRITE_BUFS;
65
}
66
67
static void virtio_pcidev_free_buf(struct virtio_pcidev_device *dev, void *buf)
68
{
69
int i;
70
71
if (buf == &dev->bufs[VIRTIO_PCIDEV_WRITE_BUFS]) {
72
kfree(dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS]);
73
dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS] = NULL;
74
return;
75
}
76
77
for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
78
if (buf == &dev->bufs[i]) {
79
kfree(dev->extra_ptrs[i]);
80
dev->extra_ptrs[i] = NULL;
81
WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
82
return;
83
}
84
}
85
86
WARN_ON(1);
87
}
88
89
static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev,
90
struct virtio_pcidev_msg *cmd,
91
unsigned int cmd_size,
92
const void *extra, unsigned int extra_size,
93
void *out, unsigned int out_size)
94
{
95
struct scatterlist out_sg, extra_sg, in_sg;
96
struct scatterlist *sgs_list[] = {
97
[0] = &out_sg,
98
[1] = extra ? &extra_sg : &in_sg,
99
[2] = extra ? &in_sg : NULL,
100
};
101
struct virtio_pcidev_message_buffer *buf;
102
int delay_count = 0;
103
bool bounce_out;
104
int ret, len;
105
int buf_idx;
106
bool posted;
107
108
if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
109
return -EINVAL;
110
111
switch (cmd->op) {
112
case VIRTIO_PCIDEV_OP_CFG_WRITE:
113
case VIRTIO_PCIDEV_OP_MMIO_WRITE:
114
case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
115
/* in PCI, writes are posted, so don't wait */
116
posted = !out;
117
WARN_ON(!posted);
118
break;
119
default:
120
posted = false;
121
break;
122
}
123
124
bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
125
out && out_size <= sizeof(buf->data);
126
127
buf_idx = virtio_pcidev_get_buf(dev, &posted);
128
buf = &dev->bufs[buf_idx];
129
memcpy(buf, cmd, cmd_size);
130
131
if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
132
dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
133
GFP_ATOMIC);
134
135
if (!dev->extra_ptrs[buf_idx]) {
136
virtio_pcidev_free_buf(dev, buf);
137
return -ENOMEM;
138
}
139
extra = dev->extra_ptrs[buf_idx];
140
} else if (extra && extra_size <= sizeof(buf) - cmd_size) {
141
memcpy((u8 *)buf + cmd_size, extra, extra_size);
142
cmd_size += extra_size;
143
extra_size = 0;
144
extra = NULL;
145
cmd = (void *)buf;
146
} else {
147
cmd = (void *)buf;
148
}
149
150
sg_init_one(&out_sg, cmd, cmd_size);
151
if (extra)
152
sg_init_one(&extra_sg, extra, extra_size);
153
/* allow stack for small buffers */
154
if (bounce_out)
155
sg_init_one(&in_sg, buf->data, out_size);
156
else if (out)
157
sg_init_one(&in_sg, out, out_size);
158
159
/* add to internal virtio queue */
160
ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
161
extra ? 2 : 1,
162
out ? 1 : 0,
163
cmd, GFP_ATOMIC);
164
if (ret) {
165
virtio_pcidev_free_buf(dev, buf);
166
return ret;
167
}
168
169
if (posted) {
170
virtqueue_kick(dev->cmd_vq);
171
return 0;
172
}
173
174
/* kick and poll for getting a response on the queue */
175
set_bit(VIRTIO_PCIDEV_STAT_WAITING, &dev->status);
176
virtqueue_kick(dev->cmd_vq);
177
ret = 0;
178
179
while (1) {
180
void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
181
182
if (completed == buf)
183
break;
184
185
if (completed)
186
virtio_pcidev_free_buf(dev, completed);
187
188
if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
189
++delay_count > virtio_pcidev_max_delay_us,
190
"um virt-pci delay: %d", delay_count)) {
191
ret = -EIO;
192
break;
193
}
194
udelay(1);
195
}
196
clear_bit(VIRTIO_PCIDEV_STAT_WAITING, &dev->status);
197
198
if (bounce_out)
199
memcpy(out, buf->data, out_size);
200
201
virtio_pcidev_free_buf(dev, buf);
202
203
return ret;
204
}
205
206
static unsigned long virtio_pcidev_cfgspace_read(struct um_pci_device *pdev,
207
unsigned int offset, int size)
208
{
209
struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
210
struct virtio_pcidev_msg hdr = {
211
.op = VIRTIO_PCIDEV_OP_CFG_READ,
212
.size = size,
213
.addr = offset,
214
};
215
/* max 8, we might not use it all */
216
u8 data[8];
217
218
memset(data, 0xff, sizeof(data));
219
220
/* size has been checked in um_pci_cfgspace_read() */
221
if (virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
222
return ULONG_MAX;
223
224
switch (size) {
225
case 1:
226
return data[0];
227
case 2:
228
return le16_to_cpup((void *)data);
229
case 4:
230
return le32_to_cpup((void *)data);
231
#ifdef CONFIG_64BIT
232
case 8:
233
return le64_to_cpup((void *)data);
234
#endif
235
default:
236
return ULONG_MAX;
237
}
238
}
239
240
static void virtio_pcidev_cfgspace_write(struct um_pci_device *pdev,
241
unsigned int offset, int size,
242
unsigned long val)
243
{
244
struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
245
struct {
246
struct virtio_pcidev_msg hdr;
247
/* maximum size - we may only use parts of it */
248
u8 data[8];
249
} msg = {
250
.hdr = {
251
.op = VIRTIO_PCIDEV_OP_CFG_WRITE,
252
.size = size,
253
.addr = offset,
254
},
255
};
256
257
/* size has been checked in um_pci_cfgspace_write() */
258
switch (size) {
259
case 1:
260
msg.data[0] = (u8)val;
261
break;
262
case 2:
263
put_unaligned_le16(val, (void *)msg.data);
264
break;
265
case 4:
266
put_unaligned_le32(val, (void *)msg.data);
267
break;
268
#ifdef CONFIG_64BIT
269
case 8:
270
put_unaligned_le64(val, (void *)msg.data);
271
break;
272
#endif
273
}
274
275
WARN_ON(virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
276
}
277
278
static void virtio_pcidev_bar_copy_from(struct um_pci_device *pdev,
279
int bar, void *buffer,
280
unsigned int offset, int size)
281
{
282
struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
283
struct virtio_pcidev_msg hdr = {
284
.op = VIRTIO_PCIDEV_OP_MMIO_READ,
285
.bar = bar,
286
.size = size,
287
.addr = offset,
288
};
289
290
memset(buffer, 0xff, size);
291
292
virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
293
}
294
295
static unsigned long virtio_pcidev_bar_read(struct um_pci_device *pdev, int bar,
296
unsigned int offset, int size)
297
{
298
/* 8 is maximum size - we may only use parts of it */
299
u8 data[8];
300
301
/* size has been checked in um_pci_bar_read() */
302
virtio_pcidev_bar_copy_from(pdev, bar, data, offset, size);
303
304
switch (size) {
305
case 1:
306
return data[0];
307
case 2:
308
return le16_to_cpup((void *)data);
309
case 4:
310
return le32_to_cpup((void *)data);
311
#ifdef CONFIG_64BIT
312
case 8:
313
return le64_to_cpup((void *)data);
314
#endif
315
default:
316
return ULONG_MAX;
317
}
318
}
319
320
static void virtio_pcidev_bar_copy_to(struct um_pci_device *pdev,
321
int bar, unsigned int offset,
322
const void *buffer, int size)
323
{
324
struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
325
struct virtio_pcidev_msg hdr = {
326
.op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
327
.bar = bar,
328
.size = size,
329
.addr = offset,
330
};
331
332
virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
333
}
334
335
static void virtio_pcidev_bar_write(struct um_pci_device *pdev, int bar,
336
unsigned int offset, int size,
337
unsigned long val)
338
{
339
/* maximum size - we may only use parts of it */
340
u8 data[8];
341
342
/* size has been checked in um_pci_bar_write() */
343
switch (size) {
344
case 1:
345
data[0] = (u8)val;
346
break;
347
case 2:
348
put_unaligned_le16(val, (void *)data);
349
break;
350
case 4:
351
put_unaligned_le32(val, (void *)data);
352
break;
353
#ifdef CONFIG_64BIT
354
case 8:
355
put_unaligned_le64(val, (void *)data);
356
break;
357
#endif
358
}
359
360
virtio_pcidev_bar_copy_to(pdev, bar, offset, data, size);
361
}
362
363
static void virtio_pcidev_bar_set(struct um_pci_device *pdev, int bar,
364
unsigned int offset, u8 value, int size)
365
{
366
struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
367
struct {
368
struct virtio_pcidev_msg hdr;
369
u8 data;
370
} msg = {
371
.hdr = {
372
.op = VIRTIO_PCIDEV_OP_CFG_WRITE,
373
.bar = bar,
374
.size = size,
375
.addr = offset,
376
},
377
.data = value,
378
};
379
380
virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
381
}
382
383
static const struct um_pci_ops virtio_pcidev_um_pci_ops = {
384
.cfgspace_read = virtio_pcidev_cfgspace_read,
385
.cfgspace_write = virtio_pcidev_cfgspace_write,
386
.bar_read = virtio_pcidev_bar_read,
387
.bar_write = virtio_pcidev_bar_write,
388
.bar_copy_from = virtio_pcidev_bar_copy_from,
389
.bar_copy_to = virtio_pcidev_bar_copy_to,
390
.bar_set = virtio_pcidev_bar_set,
391
};
392
393
static void virtio_pcidev_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
394
{
395
struct scatterlist sg[1];
396
397
sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
398
if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
399
kfree(buf);
400
else if (kick)
401
virtqueue_kick(vq);
402
}
403
404
static void virtio_pcidev_handle_irq_message(struct virtqueue *vq,
405
struct virtio_pcidev_msg *msg)
406
{
407
struct virtio_device *vdev = vq->vdev;
408
struct virtio_pcidev_device *dev = vdev->priv;
409
410
if (!dev->pdev.irq)
411
return;
412
413
/* we should properly chain interrupts, but on ARCH=um we don't care */
414
415
switch (msg->op) {
416
case VIRTIO_PCIDEV_OP_INT:
417
generic_handle_irq(dev->pdev.irq);
418
break;
419
case VIRTIO_PCIDEV_OP_MSI:
420
/* our MSI message is just the interrupt number */
421
if (msg->size == sizeof(u32))
422
generic_handle_irq(le32_to_cpup((void *)msg->data));
423
else
424
generic_handle_irq(le16_to_cpup((void *)msg->data));
425
break;
426
case VIRTIO_PCIDEV_OP_PME:
427
/* nothing to do - we already woke up due to the message */
428
break;
429
default:
430
dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
431
break;
432
}
433
}
434
435
static void virtio_pcidev_cmd_vq_cb(struct virtqueue *vq)
436
{
437
struct virtio_device *vdev = vq->vdev;
438
struct virtio_pcidev_device *dev = vdev->priv;
439
void *cmd;
440
int len;
441
442
if (test_bit(VIRTIO_PCIDEV_STAT_WAITING, &dev->status))
443
return;
444
445
while ((cmd = virtqueue_get_buf(vq, &len)))
446
virtio_pcidev_free_buf(dev, cmd);
447
}
448
449
static void virtio_pcidev_irq_vq_cb(struct virtqueue *vq)
450
{
451
struct virtio_pcidev_msg *msg;
452
int len;
453
454
while ((msg = virtqueue_get_buf(vq, &len))) {
455
if (len >= sizeof(*msg))
456
virtio_pcidev_handle_irq_message(vq, msg);
457
458
/* recycle the message buffer */
459
virtio_pcidev_irq_vq_addbuf(vq, msg, true);
460
}
461
}
462
463
static int virtio_pcidev_init_vqs(struct virtio_pcidev_device *dev)
464
{
465
struct virtqueue_info vqs_info[] = {
466
{ "cmd", virtio_pcidev_cmd_vq_cb },
467
{ "irq", virtio_pcidev_irq_vq_cb },
468
};
469
struct virtqueue *vqs[2];
470
int err, i;
471
472
err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
473
if (err)
474
return err;
475
476
dev->cmd_vq = vqs[0];
477
dev->irq_vq = vqs[1];
478
479
virtio_device_ready(dev->vdev);
480
481
for (i = 0; i < NUM_IRQ_MSGS; i++) {
482
void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
483
484
if (msg)
485
virtio_pcidev_irq_vq_addbuf(dev->irq_vq, msg, false);
486
}
487
488
virtqueue_kick(dev->irq_vq);
489
490
return 0;
491
}
492
493
static void __virtio_pcidev_virtio_platform_remove(struct virtio_device *vdev,
494
struct virtio_pcidev_device *dev)
495
{
496
um_pci_platform_device_unregister(&dev->pdev);
497
498
virtio_reset_device(vdev);
499
vdev->config->del_vqs(vdev);
500
501
kfree(dev);
502
}
503
504
static int virtio_pcidev_virtio_platform_probe(struct virtio_device *vdev,
505
struct virtio_pcidev_device *dev)
506
{
507
int err;
508
509
dev->platform = true;
510
511
err = virtio_pcidev_init_vqs(dev);
512
if (err)
513
goto err_free;
514
515
err = um_pci_platform_device_register(&dev->pdev);
516
if (err)
517
goto err_reset;
518
519
err = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
520
if (err)
521
goto err_unregister;
522
523
return 0;
524
525
err_unregister:
526
um_pci_platform_device_unregister(&dev->pdev);
527
err_reset:
528
virtio_reset_device(vdev);
529
vdev->config->del_vqs(vdev);
530
err_free:
531
kfree(dev);
532
return err;
533
}
534
535
static int virtio_pcidev_virtio_probe(struct virtio_device *vdev)
536
{
537
struct virtio_pcidev_device *dev;
538
int err;
539
540
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
541
if (!dev)
542
return -ENOMEM;
543
544
dev->vdev = vdev;
545
vdev->priv = dev;
546
547
dev->pdev.ops = &virtio_pcidev_um_pci_ops;
548
549
if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
550
return virtio_pcidev_virtio_platform_probe(vdev, dev);
551
552
err = virtio_pcidev_init_vqs(dev);
553
if (err)
554
goto err_free;
555
556
err = um_pci_device_register(&dev->pdev);
557
if (err)
558
goto err_reset;
559
560
device_set_wakeup_enable(&vdev->dev, true);
561
562
/*
563
* In order to do suspend-resume properly, don't allow VQs
564
* to be suspended.
565
*/
566
virtio_uml_set_no_vq_suspend(vdev, true);
567
568
return 0;
569
570
err_reset:
571
virtio_reset_device(vdev);
572
vdev->config->del_vqs(vdev);
573
err_free:
574
kfree(dev);
575
return err;
576
}
577
578
static void virtio_pcidev_virtio_remove(struct virtio_device *vdev)
579
{
580
struct virtio_pcidev_device *dev = vdev->priv;
581
582
if (dev->platform) {
583
of_platform_depopulate(&vdev->dev);
584
__virtio_pcidev_virtio_platform_remove(vdev, dev);
585
return;
586
}
587
588
device_set_wakeup_enable(&vdev->dev, false);
589
590
um_pci_device_unregister(&dev->pdev);
591
592
/* Stop all virtqueues */
593
virtio_reset_device(vdev);
594
dev->cmd_vq = NULL;
595
dev->irq_vq = NULL;
596
vdev->config->del_vqs(vdev);
597
598
kfree(dev);
599
}
600
601
static void virtio_pcidev_virtio_shutdown(struct virtio_device *vdev)
602
{
603
/* nothing to do, we just don't want queue shutdown */
604
}
605
606
static struct virtio_device_id id_table[] = {
607
{ CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
608
{ 0 },
609
};
610
MODULE_DEVICE_TABLE(virtio, id_table);
611
612
static struct virtio_driver virtio_pcidev_virtio_driver = {
613
.driver.name = "virtio-pci",
614
.id_table = id_table,
615
.probe = virtio_pcidev_virtio_probe,
616
.remove = virtio_pcidev_virtio_remove,
617
.shutdown = virtio_pcidev_virtio_shutdown,
618
};
619
620
static int __init virtio_pcidev_init(void)
621
{
622
if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
623
"No virtio device ID configured for PCI - no PCI support\n"))
624
return 0;
625
626
return register_virtio_driver(&virtio_pcidev_virtio_driver);
627
}
628
late_initcall(virtio_pcidev_init);
629
630
static void __exit virtio_pcidev_exit(void)
631
{
632
unregister_virtio_driver(&virtio_pcidev_virtio_driver);
633
}
634
module_exit(virtio_pcidev_exit);
635
636