Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/samples/vfio-mdev/mdpy.c
49439 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Mediated virtual PCI display host device driver
4
*
5
* See mdpy-defs.h for device specs
6
*
7
* (c) Gerd Hoffmann <[email protected]>
8
*
9
* based on mtty driver which is:
10
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11
* Author: Neo Jia <[email protected]>
12
* Kirti Wankhede <[email protected]>
13
*
14
* This program is free software; you can redistribute it and/or modify
15
* it under the terms of the GNU General Public License version 2 as
16
* published by the Free Software Foundation.
17
*/
18
#include <linux/init.h>
19
#include <linux/module.h>
20
#include <linux/kernel.h>
21
#include <linux/slab.h>
22
#include <linux/vmalloc.h>
23
#include <linux/cdev.h>
24
#include <linux/vfio.h>
25
#include <linux/iommu.h>
26
#include <linux/sysfs.h>
27
#include <linux/mdev.h>
28
#include <linux/pci.h>
29
#include <drm/drm_fourcc.h>
30
#include "mdpy-defs.h"
31
32
#define MDPY_NAME "mdpy"
33
#define MDPY_CLASS_NAME "mdpy"
34
35
#define MDPY_CONFIG_SPACE_SIZE 0xff
36
#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
37
#define MDPY_DISPLAY_REGION 16
38
39
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
40
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
41
42
43
MODULE_DESCRIPTION("Mediated virtual PCI display host device driver");
44
MODULE_LICENSE("GPL v2");
45
46
#define MDPY_TYPE_1 "vga"
47
#define MDPY_TYPE_2 "xga"
48
#define MDPY_TYPE_3 "hd"
49
50
static struct mdpy_type {
51
struct mdev_type type;
52
u32 format;
53
u32 bytepp;
54
u32 width;
55
u32 height;
56
} mdpy_types[] = {
57
{
58
.type.sysfs_name = MDPY_TYPE_1,
59
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
60
.format = DRM_FORMAT_XRGB8888,
61
.bytepp = 4,
62
.width = 640,
63
.height = 480,
64
}, {
65
.type.sysfs_name = MDPY_TYPE_2,
66
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
67
.format = DRM_FORMAT_XRGB8888,
68
.bytepp = 4,
69
.width = 1024,
70
.height = 768,
71
}, {
72
.type.sysfs_name = MDPY_TYPE_3,
73
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
74
.format = DRM_FORMAT_XRGB8888,
75
.bytepp = 4,
76
.width = 1920,
77
.height = 1080,
78
},
79
};
80
81
static struct mdev_type *mdpy_mdev_types[] = {
82
&mdpy_types[0].type,
83
&mdpy_types[1].type,
84
&mdpy_types[2].type,
85
};
86
87
static dev_t mdpy_devt;
88
static const struct class mdpy_class = {
89
.name = MDPY_CLASS_NAME,
90
};
91
static struct cdev mdpy_cdev;
92
static struct device mdpy_dev;
93
static struct mdev_parent mdpy_parent;
94
static const struct vfio_device_ops mdpy_dev_ops;
95
96
/* State of each mdev device */
97
struct mdev_state {
98
struct vfio_device vdev;
99
u8 *vconfig;
100
u32 bar_mask;
101
struct mutex ops_lock;
102
struct mdev_device *mdev;
103
struct vfio_device_info dev_info;
104
105
const struct mdpy_type *type;
106
u32 memsize;
107
void *memblk;
108
};
109
110
static void mdpy_create_config_space(struct mdev_state *mdev_state)
111
{
112
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
113
MDPY_PCI_VENDOR_ID);
114
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
115
MDPY_PCI_DEVICE_ID);
116
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
117
MDPY_PCI_SUBVENDOR_ID);
118
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
119
MDPY_PCI_SUBDEVICE_ID);
120
121
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
122
PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
123
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
124
PCI_STATUS_CAP_LIST);
125
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
126
PCI_CLASS_DISPLAY_OTHER);
127
mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
128
129
STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
130
PCI_BASE_ADDRESS_SPACE_MEMORY |
131
PCI_BASE_ADDRESS_MEM_TYPE_32 |
132
PCI_BASE_ADDRESS_MEM_PREFETCH);
133
mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
134
135
/* vendor specific capability for the config registers */
136
mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
137
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
138
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
139
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
140
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
141
mdev_state->type->format);
142
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
143
mdev_state->type->width);
144
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
145
mdev_state->type->height);
146
}
147
148
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
149
char *buf, u32 count)
150
{
151
struct device *dev = mdev_dev(mdev_state->mdev);
152
u32 cfg_addr;
153
154
switch (offset) {
155
case PCI_BASE_ADDRESS_0:
156
cfg_addr = *(u32 *)buf;
157
158
if (cfg_addr == 0xffffffff) {
159
cfg_addr = (cfg_addr & mdev_state->bar_mask);
160
} else {
161
cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
162
if (cfg_addr)
163
dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
164
}
165
166
cfg_addr |= (mdev_state->vconfig[offset] &
167
~PCI_BASE_ADDRESS_MEM_MASK);
168
STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
169
break;
170
}
171
}
172
173
static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
174
size_t count, loff_t pos, bool is_write)
175
{
176
int ret = 0;
177
178
mutex_lock(&mdev_state->ops_lock);
179
180
if (pos < MDPY_CONFIG_SPACE_SIZE) {
181
if (is_write)
182
handle_pci_cfg_write(mdev_state, pos, buf, count);
183
else
184
memcpy(buf, (mdev_state->vconfig + pos), count);
185
186
} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
187
(pos + count <=
188
MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
189
pos -= MDPY_MEMORY_BAR_OFFSET;
190
if (is_write)
191
memcpy(mdev_state->memblk, buf, count);
192
else
193
memcpy(buf, mdev_state->memblk, count);
194
195
} else {
196
dev_info(mdev_state->vdev.dev,
197
"%s: %s @0x%llx (unhandled)\n", __func__,
198
is_write ? "WR" : "RD", pos);
199
ret = -1;
200
goto accessfailed;
201
}
202
203
ret = count;
204
205
206
accessfailed:
207
mutex_unlock(&mdev_state->ops_lock);
208
209
return ret;
210
}
211
212
static int mdpy_reset(struct mdev_state *mdev_state)
213
{
214
u32 stride, i;
215
216
/* initialize with gray gradient */
217
stride = mdev_state->type->width * mdev_state->type->bytepp;
218
for (i = 0; i < mdev_state->type->height; i++)
219
memset(mdev_state->memblk + i * stride,
220
i * 255 / mdev_state->type->height,
221
stride);
222
return 0;
223
}
224
225
static int mdpy_init_dev(struct vfio_device *vdev)
226
{
227
struct mdev_state *mdev_state =
228
container_of(vdev, struct mdev_state, vdev);
229
struct mdev_device *mdev = to_mdev_device(vdev->dev);
230
const struct mdpy_type *type =
231
container_of(mdev->type, struct mdpy_type, type);
232
u32 fbsize;
233
int ret = -ENOMEM;
234
235
mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
236
if (!mdev_state->vconfig)
237
return ret;
238
239
fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
240
241
mdev_state->memblk = vmalloc_user(fbsize);
242
if (!mdev_state->memblk)
243
goto out_vconfig;
244
245
mutex_init(&mdev_state->ops_lock);
246
mdev_state->mdev = mdev;
247
mdev_state->type = type;
248
mdev_state->memsize = fbsize;
249
mdpy_create_config_space(mdev_state);
250
mdpy_reset(mdev_state);
251
252
dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
253
type->width, type->height);
254
return 0;
255
256
out_vconfig:
257
kfree(mdev_state->vconfig);
258
return ret;
259
}
260
261
static int mdpy_probe(struct mdev_device *mdev)
262
{
263
struct mdev_state *mdev_state;
264
int ret;
265
266
mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
267
&mdpy_dev_ops);
268
if (IS_ERR(mdev_state))
269
return PTR_ERR(mdev_state);
270
271
ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
272
if (ret)
273
goto err_put_vdev;
274
dev_set_drvdata(&mdev->dev, mdev_state);
275
return 0;
276
277
err_put_vdev:
278
vfio_put_device(&mdev_state->vdev);
279
return ret;
280
}
281
282
static void mdpy_release_dev(struct vfio_device *vdev)
283
{
284
struct mdev_state *mdev_state =
285
container_of(vdev, struct mdev_state, vdev);
286
287
vfree(mdev_state->memblk);
288
kfree(mdev_state->vconfig);
289
}
290
291
static void mdpy_remove(struct mdev_device *mdev)
292
{
293
struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
294
295
dev_info(&mdev->dev, "%s\n", __func__);
296
297
vfio_unregister_group_dev(&mdev_state->vdev);
298
vfio_put_device(&mdev_state->vdev);
299
}
300
301
static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
302
size_t count, loff_t *ppos)
303
{
304
struct mdev_state *mdev_state =
305
container_of(vdev, struct mdev_state, vdev);
306
unsigned int done = 0;
307
int ret;
308
309
while (count) {
310
size_t filled;
311
312
if (count >= 4 && !(*ppos % 4)) {
313
u32 val;
314
315
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
316
*ppos, false);
317
if (ret <= 0)
318
goto read_err;
319
320
if (copy_to_user(buf, &val, sizeof(val)))
321
goto read_err;
322
323
filled = 4;
324
} else if (count >= 2 && !(*ppos % 2)) {
325
u16 val;
326
327
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
328
*ppos, false);
329
if (ret <= 0)
330
goto read_err;
331
332
if (copy_to_user(buf, &val, sizeof(val)))
333
goto read_err;
334
335
filled = 2;
336
} else {
337
u8 val;
338
339
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
340
*ppos, false);
341
if (ret <= 0)
342
goto read_err;
343
344
if (copy_to_user(buf, &val, sizeof(val)))
345
goto read_err;
346
347
filled = 1;
348
}
349
350
count -= filled;
351
done += filled;
352
*ppos += filled;
353
buf += filled;
354
}
355
356
return done;
357
358
read_err:
359
return -EFAULT;
360
}
361
362
static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
363
size_t count, loff_t *ppos)
364
{
365
struct mdev_state *mdev_state =
366
container_of(vdev, struct mdev_state, vdev);
367
unsigned int done = 0;
368
int ret;
369
370
while (count) {
371
size_t filled;
372
373
if (count >= 4 && !(*ppos % 4)) {
374
u32 val;
375
376
if (copy_from_user(&val, buf, sizeof(val)))
377
goto write_err;
378
379
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
380
*ppos, true);
381
if (ret <= 0)
382
goto write_err;
383
384
filled = 4;
385
} else if (count >= 2 && !(*ppos % 2)) {
386
u16 val;
387
388
if (copy_from_user(&val, buf, sizeof(val)))
389
goto write_err;
390
391
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
392
*ppos, true);
393
if (ret <= 0)
394
goto write_err;
395
396
filled = 2;
397
} else {
398
u8 val;
399
400
if (copy_from_user(&val, buf, sizeof(val)))
401
goto write_err;
402
403
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
404
*ppos, true);
405
if (ret <= 0)
406
goto write_err;
407
408
filled = 1;
409
}
410
count -= filled;
411
done += filled;
412
*ppos += filled;
413
buf += filled;
414
}
415
416
return done;
417
write_err:
418
return -EFAULT;
419
}
420
421
static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
422
{
423
struct mdev_state *mdev_state =
424
container_of(vdev, struct mdev_state, vdev);
425
426
if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
427
return -EINVAL;
428
if (vma->vm_end < vma->vm_start)
429
return -EINVAL;
430
if (vma->vm_end - vma->vm_start > mdev_state->memsize)
431
return -EINVAL;
432
if ((vma->vm_flags & VM_SHARED) == 0)
433
return -EINVAL;
434
435
return remap_vmalloc_range(vma, mdev_state->memblk, 0);
436
}
437
438
static int mdpy_ioctl_get_region_info(struct vfio_device *vdev,
439
struct vfio_region_info *region_info,
440
struct vfio_info_cap *caps)
441
{
442
struct mdev_state *mdev_state =
443
container_of(vdev, struct mdev_state, vdev);
444
445
if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
446
region_info->index != MDPY_DISPLAY_REGION)
447
return -EINVAL;
448
449
switch (region_info->index) {
450
case VFIO_PCI_CONFIG_REGION_INDEX:
451
region_info->offset = 0;
452
region_info->size = MDPY_CONFIG_SPACE_SIZE;
453
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
454
VFIO_REGION_INFO_FLAG_WRITE);
455
break;
456
case VFIO_PCI_BAR0_REGION_INDEX:
457
case MDPY_DISPLAY_REGION:
458
region_info->offset = MDPY_MEMORY_BAR_OFFSET;
459
region_info->size = mdev_state->memsize;
460
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
461
VFIO_REGION_INFO_FLAG_WRITE |
462
VFIO_REGION_INFO_FLAG_MMAP);
463
break;
464
default:
465
region_info->size = 0;
466
region_info->offset = 0;
467
region_info->flags = 0;
468
}
469
470
return 0;
471
}
472
473
static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
474
{
475
irq_info->count = 0;
476
return 0;
477
}
478
479
static int mdpy_get_device_info(struct vfio_device_info *dev_info)
480
{
481
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
482
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
483
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
484
return 0;
485
}
486
487
static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
488
struct vfio_device_gfx_plane_info *plane)
489
{
490
if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
491
if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
492
VFIO_GFX_PLANE_TYPE_REGION))
493
return 0;
494
return -EINVAL;
495
}
496
497
if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
498
return -EINVAL;
499
500
plane->drm_format = mdev_state->type->format;
501
plane->width = mdev_state->type->width;
502
plane->height = mdev_state->type->height;
503
plane->stride = (mdev_state->type->width *
504
mdev_state->type->bytepp);
505
plane->size = mdev_state->memsize;
506
plane->region_index = MDPY_DISPLAY_REGION;
507
508
/* unused */
509
plane->drm_format_mod = 0;
510
plane->x_pos = 0;
511
plane->y_pos = 0;
512
plane->x_hot = 0;
513
plane->y_hot = 0;
514
515
return 0;
516
}
517
518
static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
519
unsigned long arg)
520
{
521
int ret = 0;
522
unsigned long minsz;
523
struct mdev_state *mdev_state =
524
container_of(vdev, struct mdev_state, vdev);
525
526
switch (cmd) {
527
case VFIO_DEVICE_GET_INFO:
528
{
529
struct vfio_device_info info;
530
531
minsz = offsetofend(struct vfio_device_info, num_irqs);
532
533
if (copy_from_user(&info, (void __user *)arg, minsz))
534
return -EFAULT;
535
536
if (info.argsz < minsz)
537
return -EINVAL;
538
539
ret = mdpy_get_device_info(&info);
540
if (ret)
541
return ret;
542
543
memcpy(&mdev_state->dev_info, &info, sizeof(info));
544
545
if (copy_to_user((void __user *)arg, &info, minsz))
546
return -EFAULT;
547
548
return 0;
549
}
550
551
case VFIO_DEVICE_GET_IRQ_INFO:
552
{
553
struct vfio_irq_info info;
554
555
minsz = offsetofend(struct vfio_irq_info, count);
556
557
if (copy_from_user(&info, (void __user *)arg, minsz))
558
return -EFAULT;
559
560
if ((info.argsz < minsz) ||
561
(info.index >= mdev_state->dev_info.num_irqs))
562
return -EINVAL;
563
564
ret = mdpy_get_irq_info(&info);
565
if (ret)
566
return ret;
567
568
if (copy_to_user((void __user *)arg, &info, minsz))
569
return -EFAULT;
570
571
return 0;
572
}
573
574
case VFIO_DEVICE_QUERY_GFX_PLANE:
575
{
576
struct vfio_device_gfx_plane_info plane = {};
577
578
minsz = offsetofend(struct vfio_device_gfx_plane_info,
579
region_index);
580
581
if (copy_from_user(&plane, (void __user *)arg, minsz))
582
return -EFAULT;
583
584
if (plane.argsz < minsz)
585
return -EINVAL;
586
587
ret = mdpy_query_gfx_plane(mdev_state, &plane);
588
if (ret)
589
return ret;
590
591
if (copy_to_user((void __user *)arg, &plane, minsz))
592
return -EFAULT;
593
594
return 0;
595
}
596
597
case VFIO_DEVICE_SET_IRQS:
598
return -EINVAL;
599
600
case VFIO_DEVICE_RESET:
601
return mdpy_reset(mdev_state);
602
}
603
return -ENOTTY;
604
}
605
606
static ssize_t
607
resolution_show(struct device *dev, struct device_attribute *attr,
608
char *buf)
609
{
610
struct mdev_state *mdev_state = dev_get_drvdata(dev);
611
612
return sprintf(buf, "%dx%d\n",
613
mdev_state->type->width,
614
mdev_state->type->height);
615
}
616
static DEVICE_ATTR_RO(resolution);
617
618
static struct attribute *mdev_dev_attrs[] = {
619
&dev_attr_resolution.attr,
620
NULL,
621
};
622
623
static const struct attribute_group mdev_dev_group = {
624
.name = "vendor",
625
.attrs = mdev_dev_attrs,
626
};
627
628
static const struct attribute_group *mdev_dev_groups[] = {
629
&mdev_dev_group,
630
NULL,
631
};
632
633
static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
634
{
635
struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
636
637
return sprintf(buf, "virtual display, %dx%d framebuffer\n",
638
type->width, type->height);
639
}
640
641
static const struct vfio_device_ops mdpy_dev_ops = {
642
.init = mdpy_init_dev,
643
.release = mdpy_release_dev,
644
.read = mdpy_read,
645
.write = mdpy_write,
646
.ioctl = mdpy_ioctl,
647
.get_region_info_caps = mdpy_ioctl_get_region_info,
648
.mmap = mdpy_mmap,
649
.bind_iommufd = vfio_iommufd_emulated_bind,
650
.unbind_iommufd = vfio_iommufd_emulated_unbind,
651
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
652
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
653
};
654
655
static struct mdev_driver mdpy_driver = {
656
.device_api = VFIO_DEVICE_API_PCI_STRING,
657
.max_instances = 4,
658
.driver = {
659
.name = "mdpy",
660
.owner = THIS_MODULE,
661
.mod_name = KBUILD_MODNAME,
662
.dev_groups = mdev_dev_groups,
663
},
664
.probe = mdpy_probe,
665
.remove = mdpy_remove,
666
.show_description = mdpy_show_description,
667
};
668
669
static const struct file_operations vd_fops = {
670
.owner = THIS_MODULE,
671
};
672
673
static void mdpy_device_release(struct device *dev)
674
{
675
/* nothing */
676
}
677
678
static int __init mdpy_dev_init(void)
679
{
680
int ret = 0;
681
682
ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
683
if (ret < 0) {
684
pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
685
return ret;
686
}
687
cdev_init(&mdpy_cdev, &vd_fops);
688
cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
689
pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
690
691
ret = mdev_register_driver(&mdpy_driver);
692
if (ret)
693
goto err_cdev;
694
695
ret = class_register(&mdpy_class);
696
if (ret)
697
goto err_driver;
698
mdpy_dev.class = &mdpy_class;
699
mdpy_dev.release = mdpy_device_release;
700
dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
701
702
ret = device_register(&mdpy_dev);
703
if (ret)
704
goto err_put;
705
706
ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
707
mdpy_mdev_types,
708
ARRAY_SIZE(mdpy_mdev_types));
709
if (ret)
710
goto err_device;
711
712
return 0;
713
714
err_device:
715
device_del(&mdpy_dev);
716
err_put:
717
put_device(&mdpy_dev);
718
class_unregister(&mdpy_class);
719
err_driver:
720
mdev_unregister_driver(&mdpy_driver);
721
err_cdev:
722
cdev_del(&mdpy_cdev);
723
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
724
return ret;
725
}
726
727
static void __exit mdpy_dev_exit(void)
728
{
729
mdpy_dev.bus = NULL;
730
mdev_unregister_parent(&mdpy_parent);
731
732
device_unregister(&mdpy_dev);
733
mdev_unregister_driver(&mdpy_driver);
734
cdev_del(&mdpy_cdev);
735
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
736
class_unregister(&mdpy_class);
737
}
738
739
module_param_named(count, mdpy_driver.max_instances, int, 0444);
740
MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
741
742
module_init(mdpy_dev_init)
743
module_exit(mdpy_dev_exit)
744
745