Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/samples/vfio-mdev/mdpy.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Mediated virtual PCI display host device driver
4
*
5
* See mdpy-defs.h for device specs
6
*
7
* (c) Gerd Hoffmann <[email protected]>
8
*
9
* based on mtty driver which is:
10
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11
* Author: Neo Jia <[email protected]>
12
* Kirti Wankhede <[email protected]>
13
*
14
* This program is free software; you can redistribute it and/or modify
15
* it under the terms of the GNU General Public License version 2 as
16
* published by the Free Software Foundation.
17
*/
18
#include <linux/init.h>
19
#include <linux/module.h>
20
#include <linux/kernel.h>
21
#include <linux/slab.h>
22
#include <linux/vmalloc.h>
23
#include <linux/cdev.h>
24
#include <linux/vfio.h>
25
#include <linux/iommu.h>
26
#include <linux/sysfs.h>
27
#include <linux/mdev.h>
28
#include <linux/pci.h>
29
#include <drm/drm_fourcc.h>
30
#include "mdpy-defs.h"
31
32
#define MDPY_NAME "mdpy"
33
#define MDPY_CLASS_NAME "mdpy"
34
35
#define MDPY_CONFIG_SPACE_SIZE 0xff
36
#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
37
#define MDPY_DISPLAY_REGION 16
38
39
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
40
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
41
42
43
MODULE_DESCRIPTION("Mediated virtual PCI display host device driver");
44
MODULE_LICENSE("GPL v2");
45
46
#define MDPY_TYPE_1 "vga"
47
#define MDPY_TYPE_2 "xga"
48
#define MDPY_TYPE_3 "hd"
49
50
static struct mdpy_type {
51
struct mdev_type type;
52
u32 format;
53
u32 bytepp;
54
u32 width;
55
u32 height;
56
} mdpy_types[] = {
57
{
58
.type.sysfs_name = MDPY_TYPE_1,
59
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
60
.format = DRM_FORMAT_XRGB8888,
61
.bytepp = 4,
62
.width = 640,
63
.height = 480,
64
}, {
65
.type.sysfs_name = MDPY_TYPE_2,
66
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
67
.format = DRM_FORMAT_XRGB8888,
68
.bytepp = 4,
69
.width = 1024,
70
.height = 768,
71
}, {
72
.type.sysfs_name = MDPY_TYPE_3,
73
.type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
74
.format = DRM_FORMAT_XRGB8888,
75
.bytepp = 4,
76
.width = 1920,
77
.height = 1080,
78
},
79
};
80
81
static struct mdev_type *mdpy_mdev_types[] = {
82
&mdpy_types[0].type,
83
&mdpy_types[1].type,
84
&mdpy_types[2].type,
85
};
86
87
static dev_t mdpy_devt;
88
static const struct class mdpy_class = {
89
.name = MDPY_CLASS_NAME,
90
};
91
static struct cdev mdpy_cdev;
92
static struct device mdpy_dev;
93
static struct mdev_parent mdpy_parent;
94
static const struct vfio_device_ops mdpy_dev_ops;
95
96
/* State of each mdev device */
97
struct mdev_state {
98
struct vfio_device vdev;
99
u8 *vconfig;
100
u32 bar_mask;
101
struct mutex ops_lock;
102
struct mdev_device *mdev;
103
struct vfio_device_info dev_info;
104
105
const struct mdpy_type *type;
106
u32 memsize;
107
void *memblk;
108
};
109
110
static void mdpy_create_config_space(struct mdev_state *mdev_state)
111
{
112
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
113
MDPY_PCI_VENDOR_ID);
114
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
115
MDPY_PCI_DEVICE_ID);
116
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
117
MDPY_PCI_SUBVENDOR_ID);
118
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
119
MDPY_PCI_SUBDEVICE_ID);
120
121
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
122
PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
123
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
124
PCI_STATUS_CAP_LIST);
125
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
126
PCI_CLASS_DISPLAY_OTHER);
127
mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
128
129
STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
130
PCI_BASE_ADDRESS_SPACE_MEMORY |
131
PCI_BASE_ADDRESS_MEM_TYPE_32 |
132
PCI_BASE_ADDRESS_MEM_PREFETCH);
133
mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
134
135
/* vendor specific capability for the config registers */
136
mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
137
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
138
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
139
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
140
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
141
mdev_state->type->format);
142
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
143
mdev_state->type->width);
144
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
145
mdev_state->type->height);
146
}
147
148
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
149
char *buf, u32 count)
150
{
151
struct device *dev = mdev_dev(mdev_state->mdev);
152
u32 cfg_addr;
153
154
switch (offset) {
155
case PCI_BASE_ADDRESS_0:
156
cfg_addr = *(u32 *)buf;
157
158
if (cfg_addr == 0xffffffff) {
159
cfg_addr = (cfg_addr & mdev_state->bar_mask);
160
} else {
161
cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
162
if (cfg_addr)
163
dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
164
}
165
166
cfg_addr |= (mdev_state->vconfig[offset] &
167
~PCI_BASE_ADDRESS_MEM_MASK);
168
STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
169
break;
170
}
171
}
172
173
static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
174
size_t count, loff_t pos, bool is_write)
175
{
176
int ret = 0;
177
178
mutex_lock(&mdev_state->ops_lock);
179
180
if (pos < MDPY_CONFIG_SPACE_SIZE) {
181
if (is_write)
182
handle_pci_cfg_write(mdev_state, pos, buf, count);
183
else
184
memcpy(buf, (mdev_state->vconfig + pos), count);
185
186
} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
187
(pos + count <=
188
MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
189
pos -= MDPY_MEMORY_BAR_OFFSET;
190
if (is_write)
191
memcpy(mdev_state->memblk, buf, count);
192
else
193
memcpy(buf, mdev_state->memblk, count);
194
195
} else {
196
dev_info(mdev_state->vdev.dev,
197
"%s: %s @0x%llx (unhandled)\n", __func__,
198
is_write ? "WR" : "RD", pos);
199
ret = -1;
200
goto accessfailed;
201
}
202
203
ret = count;
204
205
206
accessfailed:
207
mutex_unlock(&mdev_state->ops_lock);
208
209
return ret;
210
}
211
212
static int mdpy_reset(struct mdev_state *mdev_state)
213
{
214
u32 stride, i;
215
216
/* initialize with gray gradient */
217
stride = mdev_state->type->width * mdev_state->type->bytepp;
218
for (i = 0; i < mdev_state->type->height; i++)
219
memset(mdev_state->memblk + i * stride,
220
i * 255 / mdev_state->type->height,
221
stride);
222
return 0;
223
}
224
225
static int mdpy_init_dev(struct vfio_device *vdev)
226
{
227
struct mdev_state *mdev_state =
228
container_of(vdev, struct mdev_state, vdev);
229
struct mdev_device *mdev = to_mdev_device(vdev->dev);
230
const struct mdpy_type *type =
231
container_of(mdev->type, struct mdpy_type, type);
232
u32 fbsize;
233
int ret = -ENOMEM;
234
235
mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
236
if (!mdev_state->vconfig)
237
return ret;
238
239
fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
240
241
mdev_state->memblk = vmalloc_user(fbsize);
242
if (!mdev_state->memblk)
243
goto out_vconfig;
244
245
mutex_init(&mdev_state->ops_lock);
246
mdev_state->mdev = mdev;
247
mdev_state->type = type;
248
mdev_state->memsize = fbsize;
249
mdpy_create_config_space(mdev_state);
250
mdpy_reset(mdev_state);
251
252
dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
253
type->width, type->height);
254
return 0;
255
256
out_vconfig:
257
kfree(mdev_state->vconfig);
258
return ret;
259
}
260
261
static int mdpy_probe(struct mdev_device *mdev)
262
{
263
struct mdev_state *mdev_state;
264
int ret;
265
266
mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
267
&mdpy_dev_ops);
268
if (IS_ERR(mdev_state))
269
return PTR_ERR(mdev_state);
270
271
ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
272
if (ret)
273
goto err_put_vdev;
274
dev_set_drvdata(&mdev->dev, mdev_state);
275
return 0;
276
277
err_put_vdev:
278
vfio_put_device(&mdev_state->vdev);
279
return ret;
280
}
281
282
static void mdpy_release_dev(struct vfio_device *vdev)
283
{
284
struct mdev_state *mdev_state =
285
container_of(vdev, struct mdev_state, vdev);
286
287
vfree(mdev_state->memblk);
288
kfree(mdev_state->vconfig);
289
}
290
291
static void mdpy_remove(struct mdev_device *mdev)
292
{
293
struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
294
295
dev_info(&mdev->dev, "%s\n", __func__);
296
297
vfio_unregister_group_dev(&mdev_state->vdev);
298
vfio_put_device(&mdev_state->vdev);
299
}
300
301
static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
302
size_t count, loff_t *ppos)
303
{
304
struct mdev_state *mdev_state =
305
container_of(vdev, struct mdev_state, vdev);
306
unsigned int done = 0;
307
int ret;
308
309
while (count) {
310
size_t filled;
311
312
if (count >= 4 && !(*ppos % 4)) {
313
u32 val;
314
315
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
316
*ppos, false);
317
if (ret <= 0)
318
goto read_err;
319
320
if (copy_to_user(buf, &val, sizeof(val)))
321
goto read_err;
322
323
filled = 4;
324
} else if (count >= 2 && !(*ppos % 2)) {
325
u16 val;
326
327
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
328
*ppos, false);
329
if (ret <= 0)
330
goto read_err;
331
332
if (copy_to_user(buf, &val, sizeof(val)))
333
goto read_err;
334
335
filled = 2;
336
} else {
337
u8 val;
338
339
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
340
*ppos, false);
341
if (ret <= 0)
342
goto read_err;
343
344
if (copy_to_user(buf, &val, sizeof(val)))
345
goto read_err;
346
347
filled = 1;
348
}
349
350
count -= filled;
351
done += filled;
352
*ppos += filled;
353
buf += filled;
354
}
355
356
return done;
357
358
read_err:
359
return -EFAULT;
360
}
361
362
static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
363
size_t count, loff_t *ppos)
364
{
365
struct mdev_state *mdev_state =
366
container_of(vdev, struct mdev_state, vdev);
367
unsigned int done = 0;
368
int ret;
369
370
while (count) {
371
size_t filled;
372
373
if (count >= 4 && !(*ppos % 4)) {
374
u32 val;
375
376
if (copy_from_user(&val, buf, sizeof(val)))
377
goto write_err;
378
379
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
380
*ppos, true);
381
if (ret <= 0)
382
goto write_err;
383
384
filled = 4;
385
} else if (count >= 2 && !(*ppos % 2)) {
386
u16 val;
387
388
if (copy_from_user(&val, buf, sizeof(val)))
389
goto write_err;
390
391
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
392
*ppos, true);
393
if (ret <= 0)
394
goto write_err;
395
396
filled = 2;
397
} else {
398
u8 val;
399
400
if (copy_from_user(&val, buf, sizeof(val)))
401
goto write_err;
402
403
ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
404
*ppos, true);
405
if (ret <= 0)
406
goto write_err;
407
408
filled = 1;
409
}
410
count -= filled;
411
done += filled;
412
*ppos += filled;
413
buf += filled;
414
}
415
416
return done;
417
write_err:
418
return -EFAULT;
419
}
420
421
static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
422
{
423
struct mdev_state *mdev_state =
424
container_of(vdev, struct mdev_state, vdev);
425
426
if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
427
return -EINVAL;
428
if (vma->vm_end < vma->vm_start)
429
return -EINVAL;
430
if (vma->vm_end - vma->vm_start > mdev_state->memsize)
431
return -EINVAL;
432
if ((vma->vm_flags & VM_SHARED) == 0)
433
return -EINVAL;
434
435
return remap_vmalloc_range(vma, mdev_state->memblk, 0);
436
}
437
438
static int mdpy_get_region_info(struct mdev_state *mdev_state,
439
struct vfio_region_info *region_info,
440
u16 *cap_type_id, void **cap_type)
441
{
442
if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
443
region_info->index != MDPY_DISPLAY_REGION)
444
return -EINVAL;
445
446
switch (region_info->index) {
447
case VFIO_PCI_CONFIG_REGION_INDEX:
448
region_info->offset = 0;
449
region_info->size = MDPY_CONFIG_SPACE_SIZE;
450
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
451
VFIO_REGION_INFO_FLAG_WRITE);
452
break;
453
case VFIO_PCI_BAR0_REGION_INDEX:
454
case MDPY_DISPLAY_REGION:
455
region_info->offset = MDPY_MEMORY_BAR_OFFSET;
456
region_info->size = mdev_state->memsize;
457
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
458
VFIO_REGION_INFO_FLAG_WRITE |
459
VFIO_REGION_INFO_FLAG_MMAP);
460
break;
461
default:
462
region_info->size = 0;
463
region_info->offset = 0;
464
region_info->flags = 0;
465
}
466
467
return 0;
468
}
469
470
static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
471
{
472
irq_info->count = 0;
473
return 0;
474
}
475
476
static int mdpy_get_device_info(struct vfio_device_info *dev_info)
477
{
478
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
479
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
480
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
481
return 0;
482
}
483
484
static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
485
struct vfio_device_gfx_plane_info *plane)
486
{
487
if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
488
if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
489
VFIO_GFX_PLANE_TYPE_REGION))
490
return 0;
491
return -EINVAL;
492
}
493
494
if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
495
return -EINVAL;
496
497
plane->drm_format = mdev_state->type->format;
498
plane->width = mdev_state->type->width;
499
plane->height = mdev_state->type->height;
500
plane->stride = (mdev_state->type->width *
501
mdev_state->type->bytepp);
502
plane->size = mdev_state->memsize;
503
plane->region_index = MDPY_DISPLAY_REGION;
504
505
/* unused */
506
plane->drm_format_mod = 0;
507
plane->x_pos = 0;
508
plane->y_pos = 0;
509
plane->x_hot = 0;
510
plane->y_hot = 0;
511
512
return 0;
513
}
514
515
static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
516
unsigned long arg)
517
{
518
int ret = 0;
519
unsigned long minsz;
520
struct mdev_state *mdev_state =
521
container_of(vdev, struct mdev_state, vdev);
522
523
switch (cmd) {
524
case VFIO_DEVICE_GET_INFO:
525
{
526
struct vfio_device_info info;
527
528
minsz = offsetofend(struct vfio_device_info, num_irqs);
529
530
if (copy_from_user(&info, (void __user *)arg, minsz))
531
return -EFAULT;
532
533
if (info.argsz < minsz)
534
return -EINVAL;
535
536
ret = mdpy_get_device_info(&info);
537
if (ret)
538
return ret;
539
540
memcpy(&mdev_state->dev_info, &info, sizeof(info));
541
542
if (copy_to_user((void __user *)arg, &info, minsz))
543
return -EFAULT;
544
545
return 0;
546
}
547
case VFIO_DEVICE_GET_REGION_INFO:
548
{
549
struct vfio_region_info info;
550
u16 cap_type_id = 0;
551
void *cap_type = NULL;
552
553
minsz = offsetofend(struct vfio_region_info, offset);
554
555
if (copy_from_user(&info, (void __user *)arg, minsz))
556
return -EFAULT;
557
558
if (info.argsz < minsz)
559
return -EINVAL;
560
561
ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
562
&cap_type);
563
if (ret)
564
return ret;
565
566
if (copy_to_user((void __user *)arg, &info, minsz))
567
return -EFAULT;
568
569
return 0;
570
}
571
572
case VFIO_DEVICE_GET_IRQ_INFO:
573
{
574
struct vfio_irq_info info;
575
576
minsz = offsetofend(struct vfio_irq_info, count);
577
578
if (copy_from_user(&info, (void __user *)arg, minsz))
579
return -EFAULT;
580
581
if ((info.argsz < minsz) ||
582
(info.index >= mdev_state->dev_info.num_irqs))
583
return -EINVAL;
584
585
ret = mdpy_get_irq_info(&info);
586
if (ret)
587
return ret;
588
589
if (copy_to_user((void __user *)arg, &info, minsz))
590
return -EFAULT;
591
592
return 0;
593
}
594
595
case VFIO_DEVICE_QUERY_GFX_PLANE:
596
{
597
struct vfio_device_gfx_plane_info plane = {};
598
599
minsz = offsetofend(struct vfio_device_gfx_plane_info,
600
region_index);
601
602
if (copy_from_user(&plane, (void __user *)arg, minsz))
603
return -EFAULT;
604
605
if (plane.argsz < minsz)
606
return -EINVAL;
607
608
ret = mdpy_query_gfx_plane(mdev_state, &plane);
609
if (ret)
610
return ret;
611
612
if (copy_to_user((void __user *)arg, &plane, minsz))
613
return -EFAULT;
614
615
return 0;
616
}
617
618
case VFIO_DEVICE_SET_IRQS:
619
return -EINVAL;
620
621
case VFIO_DEVICE_RESET:
622
return mdpy_reset(mdev_state);
623
}
624
return -ENOTTY;
625
}
626
627
static ssize_t
628
resolution_show(struct device *dev, struct device_attribute *attr,
629
char *buf)
630
{
631
struct mdev_state *mdev_state = dev_get_drvdata(dev);
632
633
return sprintf(buf, "%dx%d\n",
634
mdev_state->type->width,
635
mdev_state->type->height);
636
}
637
static DEVICE_ATTR_RO(resolution);
638
639
static struct attribute *mdev_dev_attrs[] = {
640
&dev_attr_resolution.attr,
641
NULL,
642
};
643
644
static const struct attribute_group mdev_dev_group = {
645
.name = "vendor",
646
.attrs = mdev_dev_attrs,
647
};
648
649
static const struct attribute_group *mdev_dev_groups[] = {
650
&mdev_dev_group,
651
NULL,
652
};
653
654
static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
655
{
656
struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
657
658
return sprintf(buf, "virtual display, %dx%d framebuffer\n",
659
type->width, type->height);
660
}
661
662
static const struct vfio_device_ops mdpy_dev_ops = {
663
.init = mdpy_init_dev,
664
.release = mdpy_release_dev,
665
.read = mdpy_read,
666
.write = mdpy_write,
667
.ioctl = mdpy_ioctl,
668
.mmap = mdpy_mmap,
669
.bind_iommufd = vfio_iommufd_emulated_bind,
670
.unbind_iommufd = vfio_iommufd_emulated_unbind,
671
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
672
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
673
};
674
675
static struct mdev_driver mdpy_driver = {
676
.device_api = VFIO_DEVICE_API_PCI_STRING,
677
.max_instances = 4,
678
.driver = {
679
.name = "mdpy",
680
.owner = THIS_MODULE,
681
.mod_name = KBUILD_MODNAME,
682
.dev_groups = mdev_dev_groups,
683
},
684
.probe = mdpy_probe,
685
.remove = mdpy_remove,
686
.show_description = mdpy_show_description,
687
};
688
689
static const struct file_operations vd_fops = {
690
.owner = THIS_MODULE,
691
};
692
693
static void mdpy_device_release(struct device *dev)
694
{
695
/* nothing */
696
}
697
698
static int __init mdpy_dev_init(void)
699
{
700
int ret = 0;
701
702
ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
703
if (ret < 0) {
704
pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
705
return ret;
706
}
707
cdev_init(&mdpy_cdev, &vd_fops);
708
cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
709
pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
710
711
ret = mdev_register_driver(&mdpy_driver);
712
if (ret)
713
goto err_cdev;
714
715
ret = class_register(&mdpy_class);
716
if (ret)
717
goto err_driver;
718
mdpy_dev.class = &mdpy_class;
719
mdpy_dev.release = mdpy_device_release;
720
dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
721
722
ret = device_register(&mdpy_dev);
723
if (ret)
724
goto err_put;
725
726
ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
727
mdpy_mdev_types,
728
ARRAY_SIZE(mdpy_mdev_types));
729
if (ret)
730
goto err_device;
731
732
return 0;
733
734
err_device:
735
device_del(&mdpy_dev);
736
err_put:
737
put_device(&mdpy_dev);
738
class_unregister(&mdpy_class);
739
err_driver:
740
mdev_unregister_driver(&mdpy_driver);
741
err_cdev:
742
cdev_del(&mdpy_cdev);
743
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
744
return ret;
745
}
746
747
static void __exit mdpy_dev_exit(void)
748
{
749
mdpy_dev.bus = NULL;
750
mdev_unregister_parent(&mdpy_parent);
751
752
device_unregister(&mdpy_dev);
753
mdev_unregister_driver(&mdpy_driver);
754
cdev_del(&mdpy_cdev);
755
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
756
class_unregister(&mdpy_class);
757
}
758
759
module_param_named(count, mdpy_driver.max_instances, int, 0444);
760
MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
761
762
module_init(mdpy_dev_init)
763
module_exit(mdpy_dev_exit)
764
765