Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/macintosh/adb.c
15109 views
1
/*
2
* Device driver for the Apple Desktop Bus
3
* and the /dev/adb device on macintoshes.
4
*
5
* Copyright (C) 1996 Paul Mackerras.
6
*
7
* Modified to declare controllers as structures, added
8
* client notification of bus reset and handles PowerBook
9
* sleep, by Benjamin Herrenschmidt.
10
*
11
* To do:
12
*
13
* - /sys/bus/adb to list the devices and infos
14
* - more /dev/adb to allow userland to receive the
15
* flow of auto-polling datas from a given device.
16
* - move bus probe to a kernel thread
17
*/
18
19
#include <linux/types.h>
20
#include <linux/errno.h>
21
#include <linux/kernel.h>
22
#include <linux/slab.h>
23
#include <linux/module.h>
24
#include <linux/fs.h>
25
#include <linux/mm.h>
26
#include <linux/sched.h>
27
#include <linux/adb.h>
28
#include <linux/cuda.h>
29
#include <linux/pmu.h>
30
#include <linux/notifier.h>
31
#include <linux/wait.h>
32
#include <linux/init.h>
33
#include <linux/delay.h>
34
#include <linux/spinlock.h>
35
#include <linux/completion.h>
36
#include <linux/device.h>
37
#include <linux/kthread.h>
38
#include <linux/platform_device.h>
39
#include <linux/mutex.h>
40
41
#include <asm/uaccess.h>
42
#ifdef CONFIG_PPC
43
#include <asm/prom.h>
44
#include <asm/machdep.h>
45
#endif
46
47
48
EXPORT_SYMBOL(adb_client_list);
49
50
extern struct adb_driver via_macii_driver;
51
extern struct adb_driver via_maciisi_driver;
52
extern struct adb_driver via_cuda_driver;
53
extern struct adb_driver adb_iop_driver;
54
extern struct adb_driver via_pmu_driver;
55
extern struct adb_driver macio_adb_driver;
56
57
static DEFINE_MUTEX(adb_mutex);
58
static struct adb_driver *adb_driver_list[] = {
59
#ifdef CONFIG_ADB_MACII
60
&via_macii_driver,
61
#endif
62
#ifdef CONFIG_ADB_MACIISI
63
&via_maciisi_driver,
64
#endif
65
#ifdef CONFIG_ADB_CUDA
66
&via_cuda_driver,
67
#endif
68
#ifdef CONFIG_ADB_IOP
69
&adb_iop_driver,
70
#endif
71
#if defined(CONFIG_ADB_PMU) || defined(CONFIG_ADB_PMU68K)
72
&via_pmu_driver,
73
#endif
74
#ifdef CONFIG_ADB_MACIO
75
&macio_adb_driver,
76
#endif
77
NULL
78
};
79
80
static struct class *adb_dev_class;
81
82
static struct adb_driver *adb_controller;
83
BLOCKING_NOTIFIER_HEAD(adb_client_list);
84
static int adb_got_sleep;
85
static int adb_inited;
86
static DEFINE_SEMAPHORE(adb_probe_mutex);
87
static int sleepy_trackpad;
88
static int autopoll_devs;
89
int __adb_probe_sync;
90
91
static int adb_scan_bus(void);
92
static int do_adb_reset_bus(void);
93
static void adbdev_init(void);
94
static int try_handler_change(int, int);
95
96
static struct adb_handler {
97
void (*handler)(unsigned char *, int, int);
98
int original_address;
99
int handler_id;
100
int busy;
101
} adb_handler[16];
102
103
/*
104
* The adb_handler_mutex mutex protects all accesses to the original_address
105
* and handler_id fields of adb_handler[i] for all i, and changes to the
106
* handler field.
107
* Accesses to the handler field are protected by the adb_handler_lock
108
* rwlock. It is held across all calls to any handler, so that by the
109
* time adb_unregister returns, we know that the old handler isn't being
110
* called.
111
*/
112
static DEFINE_MUTEX(adb_handler_mutex);
113
static DEFINE_RWLOCK(adb_handler_lock);
114
115
#if 0
116
static void printADBreply(struct adb_request *req)
117
{
118
int i;
119
120
printk("adb reply (%d)", req->reply_len);
121
for(i = 0; i < req->reply_len; i++)
122
printk(" %x", req->reply[i]);
123
printk("\n");
124
125
}
126
#endif
127
128
static int adb_scan_bus(void)
129
{
130
int i, highFree=0, noMovement;
131
int devmask = 0;
132
struct adb_request req;
133
134
/* assumes adb_handler[] is all zeroes at this point */
135
for (i = 1; i < 16; i++) {
136
/* see if there is anything at address i */
137
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
138
(i << 4) | 0xf);
139
if (req.reply_len > 1)
140
/* one or more devices at this address */
141
adb_handler[i].original_address = i;
142
else if (i > highFree)
143
highFree = i;
144
}
145
146
/* Note we reset noMovement to 0 each time we move a device */
147
for (noMovement = 1; noMovement < 2 && highFree > 0; noMovement++) {
148
for (i = 1; i < 16; i++) {
149
if (adb_handler[i].original_address == 0)
150
continue;
151
/*
152
* Send a "talk register 3" command to address i
153
* to provoke a collision if there is more than
154
* one device at this address.
155
*/
156
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
157
(i << 4) | 0xf);
158
/*
159
* Move the device(s) which didn't detect a
160
* collision to address `highFree'. Hopefully
161
* this only moves one device.
162
*/
163
adb_request(&req, NULL, ADBREQ_SYNC, 3,
164
(i<< 4) | 0xb, (highFree | 0x60), 0xfe);
165
/*
166
* See if anybody actually moved. This is suggested
167
* by HW TechNote 01:
168
*
169
* http://developer.apple.com/technotes/hw/hw_01.html
170
*/
171
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
172
(highFree << 4) | 0xf);
173
if (req.reply_len <= 1) continue;
174
/*
175
* Test whether there are any device(s) left
176
* at address i.
177
*/
178
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
179
(i << 4) | 0xf);
180
if (req.reply_len > 1) {
181
/*
182
* There are still one or more devices
183
* left at address i. Register the one(s)
184
* we moved to `highFree', and find a new
185
* value for highFree.
186
*/
187
adb_handler[highFree].original_address =
188
adb_handler[i].original_address;
189
while (highFree > 0 &&
190
adb_handler[highFree].original_address)
191
highFree--;
192
if (highFree <= 0)
193
break;
194
195
noMovement = 0;
196
}
197
else {
198
/*
199
* No devices left at address i; move the
200
* one(s) we moved to `highFree' back to i.
201
*/
202
adb_request(&req, NULL, ADBREQ_SYNC, 3,
203
(highFree << 4) | 0xb,
204
(i | 0x60), 0xfe);
205
}
206
}
207
}
208
209
/* Now fill in the handler_id field of the adb_handler entries. */
210
printk(KERN_DEBUG "adb devices:");
211
for (i = 1; i < 16; i++) {
212
if (adb_handler[i].original_address == 0)
213
continue;
214
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
215
(i << 4) | 0xf);
216
adb_handler[i].handler_id = req.reply[2];
217
printk(" [%d]: %d %x", i, adb_handler[i].original_address,
218
adb_handler[i].handler_id);
219
devmask |= 1 << i;
220
}
221
printk("\n");
222
return devmask;
223
}
224
225
/*
226
* This kernel task handles ADB probing. It dies once probing is
227
* completed.
228
*/
229
static int
230
adb_probe_task(void *x)
231
{
232
printk(KERN_INFO "adb: starting probe task...\n");
233
do_adb_reset_bus();
234
printk(KERN_INFO "adb: finished probe task...\n");
235
236
up(&adb_probe_mutex);
237
238
return 0;
239
}
240
241
static void
242
__adb_probe_task(struct work_struct *bullshit)
243
{
244
kthread_run(adb_probe_task, NULL, "kadbprobe");
245
}
246
247
static DECLARE_WORK(adb_reset_work, __adb_probe_task);
248
249
int
250
adb_reset_bus(void)
251
{
252
if (__adb_probe_sync) {
253
do_adb_reset_bus();
254
return 0;
255
}
256
257
down(&adb_probe_mutex);
258
schedule_work(&adb_reset_work);
259
return 0;
260
}
261
262
#ifdef CONFIG_PM
263
/*
264
* notify clients before sleep
265
*/
266
static int adb_suspend(struct platform_device *dev, pm_message_t state)
267
{
268
adb_got_sleep = 1;
269
/* We need to get a lock on the probe thread */
270
down(&adb_probe_mutex);
271
/* Stop autopoll */
272
if (adb_controller->autopoll)
273
adb_controller->autopoll(0);
274
blocking_notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL);
275
276
return 0;
277
}
278
279
/*
280
* reset bus after sleep
281
*/
282
static int adb_resume(struct platform_device *dev)
283
{
284
adb_got_sleep = 0;
285
up(&adb_probe_mutex);
286
adb_reset_bus();
287
288
return 0;
289
}
290
#endif /* CONFIG_PM */
291
292
static int __init adb_init(void)
293
{
294
struct adb_driver *driver;
295
int i;
296
297
#ifdef CONFIG_PPC32
298
if (!machine_is(chrp) && !machine_is(powermac))
299
return 0;
300
#endif
301
#ifdef CONFIG_MAC
302
if (!MACH_IS_MAC)
303
return 0;
304
#endif
305
306
/* xmon may do early-init */
307
if (adb_inited)
308
return 0;
309
adb_inited = 1;
310
311
adb_controller = NULL;
312
313
i = 0;
314
while ((driver = adb_driver_list[i++]) != NULL) {
315
if (!driver->probe()) {
316
adb_controller = driver;
317
break;
318
}
319
}
320
if (adb_controller != NULL && adb_controller->init &&
321
adb_controller->init())
322
adb_controller = NULL;
323
if (adb_controller == NULL) {
324
printk(KERN_WARNING "Warning: no ADB interface detected\n");
325
} else {
326
#ifdef CONFIG_PPC
327
if (of_machine_is_compatible("AAPL,PowerBook1998") ||
328
of_machine_is_compatible("PowerBook1,1"))
329
sleepy_trackpad = 1;
330
#endif /* CONFIG_PPC */
331
332
adbdev_init();
333
adb_reset_bus();
334
}
335
return 0;
336
}
337
338
device_initcall(adb_init);
339
340
static int
341
do_adb_reset_bus(void)
342
{
343
int ret;
344
345
if (adb_controller == NULL)
346
return -ENXIO;
347
348
if (adb_controller->autopoll)
349
adb_controller->autopoll(0);
350
351
blocking_notifier_call_chain(&adb_client_list,
352
ADB_MSG_PRE_RESET, NULL);
353
354
if (sleepy_trackpad) {
355
/* Let the trackpad settle down */
356
msleep(500);
357
}
358
359
mutex_lock(&adb_handler_mutex);
360
write_lock_irq(&adb_handler_lock);
361
memset(adb_handler, 0, sizeof(adb_handler));
362
write_unlock_irq(&adb_handler_lock);
363
364
/* That one is still a bit synchronous, oh well... */
365
if (adb_controller->reset_bus)
366
ret = adb_controller->reset_bus();
367
else
368
ret = 0;
369
370
if (sleepy_trackpad) {
371
/* Let the trackpad settle down */
372
msleep(1500);
373
}
374
375
if (!ret) {
376
autopoll_devs = adb_scan_bus();
377
if (adb_controller->autopoll)
378
adb_controller->autopoll(autopoll_devs);
379
}
380
mutex_unlock(&adb_handler_mutex);
381
382
blocking_notifier_call_chain(&adb_client_list,
383
ADB_MSG_POST_RESET, NULL);
384
385
return ret;
386
}
387
388
void
389
adb_poll(void)
390
{
391
if ((adb_controller == NULL)||(adb_controller->poll == NULL))
392
return;
393
adb_controller->poll();
394
}
395
396
static void adb_sync_req_done(struct adb_request *req)
397
{
398
struct completion *comp = req->arg;
399
400
complete(comp);
401
}
402
403
int
404
adb_request(struct adb_request *req, void (*done)(struct adb_request *),
405
int flags, int nbytes, ...)
406
{
407
va_list list;
408
int i;
409
int rc;
410
struct completion comp;
411
412
if ((adb_controller == NULL) || (adb_controller->send_request == NULL))
413
return -ENXIO;
414
if (nbytes < 1)
415
return -EINVAL;
416
417
req->nbytes = nbytes+1;
418
req->done = done;
419
req->reply_expected = flags & ADBREQ_REPLY;
420
req->data[0] = ADB_PACKET;
421
va_start(list, nbytes);
422
for (i = 0; i < nbytes; ++i)
423
req->data[i+1] = va_arg(list, int);
424
va_end(list);
425
426
if (flags & ADBREQ_NOSEND)
427
return 0;
428
429
/* Synchronous requests block using an on-stack completion */
430
if (flags & ADBREQ_SYNC) {
431
WARN_ON(done);
432
req->done = adb_sync_req_done;
433
req->arg = &comp;
434
init_completion(&comp);
435
}
436
437
rc = adb_controller->send_request(req, 0);
438
439
if ((flags & ADBREQ_SYNC) && !rc && !req->complete)
440
wait_for_completion(&comp);
441
442
return rc;
443
}
444
445
/* Ultimately this should return the number of devices with
446
the given default id.
447
And it does it now ! Note: changed behaviour: This function
448
will now register if default_id _and_ handler_id both match
449
but handler_id can be left to 0 to match with default_id only.
450
When handler_id is set, this function will try to adjust
451
the handler_id id it doesn't match. */
452
int
453
adb_register(int default_id, int handler_id, struct adb_ids *ids,
454
void (*handler)(unsigned char *, int, int))
455
{
456
int i;
457
458
mutex_lock(&adb_handler_mutex);
459
ids->nids = 0;
460
for (i = 1; i < 16; i++) {
461
if ((adb_handler[i].original_address == default_id) &&
462
(!handler_id || (handler_id == adb_handler[i].handler_id) ||
463
try_handler_change(i, handler_id))) {
464
if (adb_handler[i].handler != 0) {
465
printk(KERN_ERR
466
"Two handlers for ADB device %d\n",
467
default_id);
468
continue;
469
}
470
write_lock_irq(&adb_handler_lock);
471
adb_handler[i].handler = handler;
472
write_unlock_irq(&adb_handler_lock);
473
ids->id[ids->nids++] = i;
474
}
475
}
476
mutex_unlock(&adb_handler_mutex);
477
return ids->nids;
478
}
479
480
int
481
adb_unregister(int index)
482
{
483
int ret = -ENODEV;
484
485
mutex_lock(&adb_handler_mutex);
486
write_lock_irq(&adb_handler_lock);
487
if (adb_handler[index].handler) {
488
while(adb_handler[index].busy) {
489
write_unlock_irq(&adb_handler_lock);
490
yield();
491
write_lock_irq(&adb_handler_lock);
492
}
493
ret = 0;
494
adb_handler[index].handler = NULL;
495
}
496
write_unlock_irq(&adb_handler_lock);
497
mutex_unlock(&adb_handler_mutex);
498
return ret;
499
}
500
501
void
502
adb_input(unsigned char *buf, int nb, int autopoll)
503
{
504
int i, id;
505
static int dump_adb_input = 0;
506
unsigned long flags;
507
508
void (*handler)(unsigned char *, int, int);
509
510
/* We skip keystrokes and mouse moves when the sleep process
511
* has been started. We stop autopoll, but this is another security
512
*/
513
if (adb_got_sleep)
514
return;
515
516
id = buf[0] >> 4;
517
if (dump_adb_input) {
518
printk(KERN_INFO "adb packet: ");
519
for (i = 0; i < nb; ++i)
520
printk(" %x", buf[i]);
521
printk(", id = %d\n", id);
522
}
523
write_lock_irqsave(&adb_handler_lock, flags);
524
handler = adb_handler[id].handler;
525
if (handler != NULL)
526
adb_handler[id].busy = 1;
527
write_unlock_irqrestore(&adb_handler_lock, flags);
528
if (handler != NULL) {
529
(*handler)(buf, nb, autopoll);
530
wmb();
531
adb_handler[id].busy = 0;
532
}
533
534
}
535
536
/* Try to change handler to new_id. Will return 1 if successful. */
537
static int try_handler_change(int address, int new_id)
538
{
539
struct adb_request req;
540
541
if (adb_handler[address].handler_id == new_id)
542
return 1;
543
adb_request(&req, NULL, ADBREQ_SYNC, 3,
544
ADB_WRITEREG(address, 3), address | 0x20, new_id);
545
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
546
ADB_READREG(address, 3));
547
if (req.reply_len < 2)
548
return 0;
549
if (req.reply[2] != new_id)
550
return 0;
551
adb_handler[address].handler_id = req.reply[2];
552
553
return 1;
554
}
555
556
int
557
adb_try_handler_change(int address, int new_id)
558
{
559
int ret;
560
561
mutex_lock(&adb_handler_mutex);
562
ret = try_handler_change(address, new_id);
563
mutex_unlock(&adb_handler_mutex);
564
return ret;
565
}
566
567
int
568
adb_get_infos(int address, int *original_address, int *handler_id)
569
{
570
mutex_lock(&adb_handler_mutex);
571
*original_address = adb_handler[address].original_address;
572
*handler_id = adb_handler[address].handler_id;
573
mutex_unlock(&adb_handler_mutex);
574
575
return (*original_address != 0);
576
}
577
578
579
/*
580
* /dev/adb device driver.
581
*/
582
583
#define ADB_MAJOR 56 /* major number for /dev/adb */
584
585
struct adbdev_state {
586
spinlock_t lock;
587
atomic_t n_pending;
588
struct adb_request *completed;
589
wait_queue_head_t wait_queue;
590
int inuse;
591
};
592
593
static void adb_write_done(struct adb_request *req)
594
{
595
struct adbdev_state *state = (struct adbdev_state *) req->arg;
596
unsigned long flags;
597
598
if (!req->complete) {
599
req->reply_len = 0;
600
req->complete = 1;
601
}
602
spin_lock_irqsave(&state->lock, flags);
603
atomic_dec(&state->n_pending);
604
if (!state->inuse) {
605
kfree(req);
606
if (atomic_read(&state->n_pending) == 0) {
607
spin_unlock_irqrestore(&state->lock, flags);
608
kfree(state);
609
return;
610
}
611
} else {
612
struct adb_request **ap = &state->completed;
613
while (*ap != NULL)
614
ap = &(*ap)->next;
615
req->next = NULL;
616
*ap = req;
617
wake_up_interruptible(&state->wait_queue);
618
}
619
spin_unlock_irqrestore(&state->lock, flags);
620
}
621
622
static int
623
do_adb_query(struct adb_request *req)
624
{
625
int ret = -EINVAL;
626
627
switch(req->data[1])
628
{
629
case ADB_QUERY_GETDEVINFO:
630
if (req->nbytes < 3)
631
break;
632
mutex_lock(&adb_handler_mutex);
633
req->reply[0] = adb_handler[req->data[2]].original_address;
634
req->reply[1] = adb_handler[req->data[2]].handler_id;
635
mutex_unlock(&adb_handler_mutex);
636
req->complete = 1;
637
req->reply_len = 2;
638
adb_write_done(req);
639
ret = 0;
640
break;
641
}
642
return ret;
643
}
644
645
static int adb_open(struct inode *inode, struct file *file)
646
{
647
struct adbdev_state *state;
648
int ret = 0;
649
650
mutex_lock(&adb_mutex);
651
if (iminor(inode) > 0 || adb_controller == NULL) {
652
ret = -ENXIO;
653
goto out;
654
}
655
state = kmalloc(sizeof(struct adbdev_state), GFP_KERNEL);
656
if (state == 0) {
657
ret = -ENOMEM;
658
goto out;
659
}
660
file->private_data = state;
661
spin_lock_init(&state->lock);
662
atomic_set(&state->n_pending, 0);
663
state->completed = NULL;
664
init_waitqueue_head(&state->wait_queue);
665
state->inuse = 1;
666
667
out:
668
mutex_unlock(&adb_mutex);
669
return ret;
670
}
671
672
static int adb_release(struct inode *inode, struct file *file)
673
{
674
struct adbdev_state *state = file->private_data;
675
unsigned long flags;
676
677
mutex_lock(&adb_mutex);
678
if (state) {
679
file->private_data = NULL;
680
spin_lock_irqsave(&state->lock, flags);
681
if (atomic_read(&state->n_pending) == 0
682
&& state->completed == NULL) {
683
spin_unlock_irqrestore(&state->lock, flags);
684
kfree(state);
685
} else {
686
state->inuse = 0;
687
spin_unlock_irqrestore(&state->lock, flags);
688
}
689
}
690
mutex_unlock(&adb_mutex);
691
return 0;
692
}
693
694
static ssize_t adb_read(struct file *file, char __user *buf,
695
size_t count, loff_t *ppos)
696
{
697
int ret = 0;
698
struct adbdev_state *state = file->private_data;
699
struct adb_request *req;
700
wait_queue_t wait = __WAITQUEUE_INITIALIZER(wait,current);
701
unsigned long flags;
702
703
if (count < 2)
704
return -EINVAL;
705
if (count > sizeof(req->reply))
706
count = sizeof(req->reply);
707
if (!access_ok(VERIFY_WRITE, buf, count))
708
return -EFAULT;
709
710
req = NULL;
711
spin_lock_irqsave(&state->lock, flags);
712
add_wait_queue(&state->wait_queue, &wait);
713
current->state = TASK_INTERRUPTIBLE;
714
715
for (;;) {
716
req = state->completed;
717
if (req != NULL)
718
state->completed = req->next;
719
else if (atomic_read(&state->n_pending) == 0)
720
ret = -EIO;
721
if (req != NULL || ret != 0)
722
break;
723
724
if (file->f_flags & O_NONBLOCK) {
725
ret = -EAGAIN;
726
break;
727
}
728
if (signal_pending(current)) {
729
ret = -ERESTARTSYS;
730
break;
731
}
732
spin_unlock_irqrestore(&state->lock, flags);
733
schedule();
734
spin_lock_irqsave(&state->lock, flags);
735
}
736
737
current->state = TASK_RUNNING;
738
remove_wait_queue(&state->wait_queue, &wait);
739
spin_unlock_irqrestore(&state->lock, flags);
740
741
if (ret)
742
return ret;
743
744
ret = req->reply_len;
745
if (ret > count)
746
ret = count;
747
if (ret > 0 && copy_to_user(buf, req->reply, ret))
748
ret = -EFAULT;
749
750
kfree(req);
751
return ret;
752
}
753
754
static ssize_t adb_write(struct file *file, const char __user *buf,
755
size_t count, loff_t *ppos)
756
{
757
int ret/*, i*/;
758
struct adbdev_state *state = file->private_data;
759
struct adb_request *req;
760
761
if (count < 2 || count > sizeof(req->data))
762
return -EINVAL;
763
if (adb_controller == NULL)
764
return -ENXIO;
765
if (!access_ok(VERIFY_READ, buf, count))
766
return -EFAULT;
767
768
req = kmalloc(sizeof(struct adb_request),
769
GFP_KERNEL);
770
if (req == NULL)
771
return -ENOMEM;
772
773
req->nbytes = count;
774
req->done = adb_write_done;
775
req->arg = (void *) state;
776
req->complete = 0;
777
778
ret = -EFAULT;
779
if (copy_from_user(req->data, buf, count))
780
goto out;
781
782
atomic_inc(&state->n_pending);
783
784
/* If a probe is in progress or we are sleeping, wait for it to complete */
785
down(&adb_probe_mutex);
786
787
/* Queries are special requests sent to the ADB driver itself */
788
if (req->data[0] == ADB_QUERY) {
789
if (count > 1)
790
ret = do_adb_query(req);
791
else
792
ret = -EINVAL;
793
up(&adb_probe_mutex);
794
}
795
/* Special case for ADB_BUSRESET request, all others are sent to
796
the controller */
797
else if ((req->data[0] == ADB_PACKET)&&(count > 1)
798
&&(req->data[1] == ADB_BUSRESET)) {
799
ret = do_adb_reset_bus();
800
up(&adb_probe_mutex);
801
atomic_dec(&state->n_pending);
802
if (ret == 0)
803
ret = count;
804
goto out;
805
} else {
806
req->reply_expected = ((req->data[1] & 0xc) == 0xc);
807
if (adb_controller && adb_controller->send_request)
808
ret = adb_controller->send_request(req, 0);
809
else
810
ret = -ENXIO;
811
up(&adb_probe_mutex);
812
}
813
814
if (ret != 0) {
815
atomic_dec(&state->n_pending);
816
goto out;
817
}
818
return count;
819
820
out:
821
kfree(req);
822
return ret;
823
}
824
825
static const struct file_operations adb_fops = {
826
.owner = THIS_MODULE,
827
.llseek = no_llseek,
828
.read = adb_read,
829
.write = adb_write,
830
.open = adb_open,
831
.release = adb_release,
832
};
833
834
static struct platform_driver adb_pfdrv = {
835
.driver = {
836
.name = "adb",
837
},
838
#ifdef CONFIG_PM
839
.suspend = adb_suspend,
840
.resume = adb_resume,
841
#endif
842
};
843
844
static struct platform_device adb_pfdev = {
845
.name = "adb",
846
};
847
848
static int __init
849
adb_dummy_probe(struct platform_device *dev)
850
{
851
if (dev == &adb_pfdev)
852
return 0;
853
return -ENODEV;
854
}
855
856
static void __init
857
adbdev_init(void)
858
{
859
if (register_chrdev(ADB_MAJOR, "adb", &adb_fops)) {
860
printk(KERN_ERR "adb: unable to get major %d\n", ADB_MAJOR);
861
return;
862
}
863
864
adb_dev_class = class_create(THIS_MODULE, "adb");
865
if (IS_ERR(adb_dev_class))
866
return;
867
device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), NULL, "adb");
868
869
platform_device_register(&adb_pfdev);
870
platform_driver_probe(&adb_pfdrv, adb_dummy_probe);
871
}
872
873