Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/firewire/core-cdev.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Char device for device raw access
4
*
5
* Copyright (C) 2005-2007 Kristian Hoegsberg <[email protected]>
6
*/
7
8
#include <linux/bug.h>
9
#include <linux/compat.h>
10
#include <linux/delay.h>
11
#include <linux/device.h>
12
#include <linux/dma-mapping.h>
13
#include <linux/err.h>
14
#include <linux/errno.h>
15
#include <linux/firewire.h>
16
#include <linux/firewire-cdev.h>
17
#include <linux/irqflags.h>
18
#include <linux/jiffies.h>
19
#include <linux/kernel.h>
20
#include <linux/kref.h>
21
#include <linux/mm.h>
22
#include <linux/module.h>
23
#include <linux/mutex.h>
24
#include <linux/poll.h>
25
#include <linux/sched.h> /* required for linux/wait.h */
26
#include <linux/slab.h>
27
#include <linux/spinlock.h>
28
#include <linux/string.h>
29
#include <linux/time.h>
30
#include <linux/uaccess.h>
31
#include <linux/vmalloc.h>
32
#include <linux/wait.h>
33
#include <linux/workqueue.h>
34
35
36
#include "core.h"
37
#include <trace/events/firewire.h>
38
39
#include "packet-header-definitions.h"
40
41
/*
42
* ABI version history is documented in linux/firewire-cdev.h.
43
*/
44
#define FW_CDEV_KERNEL_VERSION 5
45
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
46
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
47
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
48
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
49
50
struct client {
51
u32 version;
52
struct fw_device *device;
53
54
spinlock_t lock;
55
bool in_shutdown;
56
struct xarray resource_xa;
57
struct list_head event_list;
58
wait_queue_head_t wait;
59
wait_queue_head_t tx_flush_wait;
60
u64 bus_reset_closure;
61
62
struct fw_iso_context *iso_context;
63
u64 iso_closure;
64
struct fw_iso_buffer buffer;
65
unsigned long vm_start;
66
bool buffer_is_mapped;
67
68
struct list_head phy_receiver_link;
69
u64 phy_receiver_closure;
70
71
struct list_head link;
72
struct kref kref;
73
};
74
75
static inline void client_get(struct client *client)
76
{
77
kref_get(&client->kref);
78
}
79
80
static void client_release(struct kref *kref)
81
{
82
struct client *client = container_of(kref, struct client, kref);
83
84
fw_device_put(client->device);
85
kfree(client);
86
}
87
88
static void client_put(struct client *client)
89
{
90
kref_put(&client->kref, client_release);
91
}
92
93
struct client_resource;
94
typedef void (*client_resource_release_fn_t)(struct client *,
95
struct client_resource *);
96
struct client_resource {
97
client_resource_release_fn_t release;
98
int handle;
99
};
100
101
struct address_handler_resource {
102
struct client_resource resource;
103
struct fw_address_handler handler;
104
__u64 closure;
105
struct client *client;
106
};
107
108
struct outbound_transaction_resource {
109
struct client_resource resource;
110
struct fw_transaction transaction;
111
};
112
113
struct inbound_transaction_resource {
114
struct client_resource resource;
115
struct fw_card *card;
116
struct fw_request *request;
117
bool is_fcp;
118
void *data;
119
size_t length;
120
};
121
122
struct descriptor_resource {
123
struct client_resource resource;
124
struct fw_descriptor descriptor;
125
u32 data[];
126
};
127
128
struct iso_resource {
129
struct client_resource resource;
130
struct client *client;
131
/* Schedule work and access todo only with client->lock held. */
132
struct delayed_work work;
133
enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
134
ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
135
int generation;
136
u64 channels;
137
s32 bandwidth;
138
struct iso_resource_event *e_alloc, *e_dealloc;
139
};
140
141
static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource)
142
{
143
return container_of(resource, struct address_handler_resource, resource);
144
}
145
146
static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource)
147
{
148
return container_of(resource, struct inbound_transaction_resource, resource);
149
}
150
151
static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource)
152
{
153
return container_of(resource, struct descriptor_resource, resource);
154
}
155
156
static struct iso_resource *to_iso_resource(struct client_resource *resource)
157
{
158
return container_of(resource, struct iso_resource, resource);
159
}
160
161
static void release_iso_resource(struct client *, struct client_resource *);
162
163
static int is_iso_resource(const struct client_resource *resource)
164
{
165
return resource->release == release_iso_resource;
166
}
167
168
static void release_transaction(struct client *client,
169
struct client_resource *resource);
170
171
static int is_outbound_transaction_resource(const struct client_resource *resource)
172
{
173
return resource->release == release_transaction;
174
}
175
176
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
177
{
178
client_get(r->client);
179
if (!queue_delayed_work(fw_workqueue, &r->work, delay))
180
client_put(r->client);
181
}
182
183
/*
184
* dequeue_event() just kfree()'s the event, so the event has to be
185
* the first field in a struct XYZ_event.
186
*/
187
struct event {
188
struct { void *data; size_t size; } v[2];
189
struct list_head link;
190
};
191
192
struct bus_reset_event {
193
struct event event;
194
struct fw_cdev_event_bus_reset reset;
195
};
196
197
struct outbound_transaction_event {
198
struct event event;
199
struct client *client;
200
struct outbound_transaction_resource r;
201
union {
202
struct fw_cdev_event_response without_tstamp;
203
struct fw_cdev_event_response2 with_tstamp;
204
} rsp;
205
};
206
207
struct inbound_transaction_event {
208
struct event event;
209
union {
210
struct fw_cdev_event_request request;
211
struct fw_cdev_event_request2 request2;
212
struct fw_cdev_event_request3 with_tstamp;
213
} req;
214
};
215
216
struct iso_interrupt_event {
217
struct event event;
218
struct fw_cdev_event_iso_interrupt interrupt;
219
};
220
221
struct iso_interrupt_mc_event {
222
struct event event;
223
struct fw_cdev_event_iso_interrupt_mc interrupt;
224
};
225
226
struct iso_resource_event {
227
struct event event;
228
struct fw_cdev_event_iso_resource iso_resource;
229
};
230
231
struct outbound_phy_packet_event {
232
struct event event;
233
struct client *client;
234
struct fw_packet p;
235
union {
236
struct fw_cdev_event_phy_packet without_tstamp;
237
struct fw_cdev_event_phy_packet2 with_tstamp;
238
} phy_packet;
239
};
240
241
struct inbound_phy_packet_event {
242
struct event event;
243
union {
244
struct fw_cdev_event_phy_packet without_tstamp;
245
struct fw_cdev_event_phy_packet2 with_tstamp;
246
} phy_packet;
247
};
248
249
#ifdef CONFIG_COMPAT
250
static void __user *u64_to_uptr(u64 value)
251
{
252
if (in_compat_syscall())
253
return compat_ptr(value);
254
else
255
return (void __user *)(unsigned long)value;
256
}
257
258
static u64 uptr_to_u64(void __user *ptr)
259
{
260
if (in_compat_syscall())
261
return ptr_to_compat(ptr);
262
else
263
return (u64)(unsigned long)ptr;
264
}
265
#else
266
static inline void __user *u64_to_uptr(u64 value)
267
{
268
return (void __user *)(unsigned long)value;
269
}
270
271
static inline u64 uptr_to_u64(void __user *ptr)
272
{
273
return (u64)(unsigned long)ptr;
274
}
275
#endif /* CONFIG_COMPAT */
276
277
static int fw_device_op_open(struct inode *inode, struct file *file)
278
{
279
struct fw_device *device;
280
struct client *client;
281
282
device = fw_device_get_by_devt(inode->i_rdev);
283
if (device == NULL)
284
return -ENODEV;
285
286
if (fw_device_is_shutdown(device)) {
287
fw_device_put(device);
288
return -ENODEV;
289
}
290
291
client = kzalloc(sizeof(*client), GFP_KERNEL);
292
if (client == NULL) {
293
fw_device_put(device);
294
return -ENOMEM;
295
}
296
297
client->device = device;
298
spin_lock_init(&client->lock);
299
xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
300
INIT_LIST_HEAD(&client->event_list);
301
init_waitqueue_head(&client->wait);
302
init_waitqueue_head(&client->tx_flush_wait);
303
INIT_LIST_HEAD(&client->phy_receiver_link);
304
INIT_LIST_HEAD(&client->link);
305
kref_init(&client->kref);
306
307
file->private_data = client;
308
309
return nonseekable_open(inode, file);
310
}
311
312
static void queue_event(struct client *client, struct event *event,
313
void *data0, size_t size0, void *data1, size_t size1)
314
{
315
event->v[0].data = data0;
316
event->v[0].size = size0;
317
event->v[1].data = data1;
318
event->v[1].size = size1;
319
320
scoped_guard(spinlock_irqsave, &client->lock) {
321
if (client->in_shutdown)
322
kfree(event);
323
else
324
list_add_tail(&event->link, &client->event_list);
325
}
326
327
wake_up_interruptible(&client->wait);
328
}
329
330
static int dequeue_event(struct client *client,
331
char __user *buffer, size_t count)
332
{
333
struct event *event;
334
size_t size, total;
335
int i, ret;
336
337
ret = wait_event_interruptible(client->wait,
338
!list_empty(&client->event_list) ||
339
fw_device_is_shutdown(client->device));
340
if (ret < 0)
341
return ret;
342
343
if (list_empty(&client->event_list) &&
344
fw_device_is_shutdown(client->device))
345
return -ENODEV;
346
347
scoped_guard(spinlock_irq, &client->lock) {
348
event = list_first_entry(&client->event_list, struct event, link);
349
list_del(&event->link);
350
}
351
352
total = 0;
353
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
354
size = min(event->v[i].size, count - total);
355
if (copy_to_user(buffer + total, event->v[i].data, size)) {
356
ret = -EFAULT;
357
goto out;
358
}
359
total += size;
360
}
361
ret = total;
362
363
out:
364
kfree(event);
365
366
return ret;
367
}
368
369
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
370
size_t count, loff_t *offset)
371
{
372
struct client *client = file->private_data;
373
374
return dequeue_event(client, buffer, count);
375
}
376
377
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
378
struct client *client)
379
{
380
struct fw_card *card = client->device->card;
381
382
guard(spinlock_irq)(&card->lock);
383
384
event->closure = client->bus_reset_closure;
385
event->type = FW_CDEV_EVENT_BUS_RESET;
386
event->generation = client->device->generation;
387
event->node_id = client->device->node_id;
388
event->local_node_id = card->local_node->node_id;
389
event->bm_node_id = card->bm_node_id;
390
event->irm_node_id = card->irm_node->node_id;
391
event->root_node_id = card->root_node->node_id;
392
}
393
394
static void for_each_client(struct fw_device *device,
395
void (*callback)(struct client *client))
396
{
397
struct client *c;
398
399
guard(mutex)(&device->client_list_mutex);
400
401
list_for_each_entry(c, &device->client_list, link)
402
callback(c);
403
}
404
405
static void queue_bus_reset_event(struct client *client)
406
{
407
struct bus_reset_event *e;
408
struct client_resource *resource;
409
unsigned long index;
410
411
e = kzalloc(sizeof(*e), GFP_KERNEL);
412
if (e == NULL)
413
return;
414
415
fill_bus_reset_event(&e->reset, client);
416
417
queue_event(client, &e->event,
418
&e->reset, sizeof(e->reset), NULL, 0);
419
420
guard(spinlock_irq)(&client->lock);
421
422
xa_for_each(&client->resource_xa, index, resource) {
423
if (is_iso_resource(resource))
424
schedule_iso_resource(to_iso_resource(resource), 0);
425
}
426
}
427
428
void fw_device_cdev_update(struct fw_device *device)
429
{
430
for_each_client(device, queue_bus_reset_event);
431
}
432
433
static void wake_up_client(struct client *client)
434
{
435
wake_up_interruptible(&client->wait);
436
}
437
438
void fw_device_cdev_remove(struct fw_device *device)
439
{
440
for_each_client(device, wake_up_client);
441
}
442
443
union ioctl_arg {
444
struct fw_cdev_get_info get_info;
445
struct fw_cdev_send_request send_request;
446
struct fw_cdev_allocate allocate;
447
struct fw_cdev_deallocate deallocate;
448
struct fw_cdev_send_response send_response;
449
struct fw_cdev_initiate_bus_reset initiate_bus_reset;
450
struct fw_cdev_add_descriptor add_descriptor;
451
struct fw_cdev_remove_descriptor remove_descriptor;
452
struct fw_cdev_create_iso_context create_iso_context;
453
struct fw_cdev_queue_iso queue_iso;
454
struct fw_cdev_start_iso start_iso;
455
struct fw_cdev_stop_iso stop_iso;
456
struct fw_cdev_get_cycle_timer get_cycle_timer;
457
struct fw_cdev_allocate_iso_resource allocate_iso_resource;
458
struct fw_cdev_send_stream_packet send_stream_packet;
459
struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
460
struct fw_cdev_send_phy_packet send_phy_packet;
461
struct fw_cdev_receive_phy_packets receive_phy_packets;
462
struct fw_cdev_set_iso_channels set_iso_channels;
463
struct fw_cdev_flush_iso flush_iso;
464
};
465
466
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
467
{
468
struct fw_cdev_get_info *a = &arg->get_info;
469
struct fw_cdev_event_bus_reset bus_reset;
470
unsigned long ret = 0;
471
472
client->version = a->version;
473
a->version = FW_CDEV_KERNEL_VERSION;
474
a->card = client->device->card->index;
475
476
scoped_guard(rwsem_read, &fw_device_rwsem) {
477
if (a->rom != 0) {
478
size_t want = a->rom_length;
479
size_t have = client->device->config_rom_length * 4;
480
481
ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom,
482
min(want, have));
483
if (ret != 0)
484
return -EFAULT;
485
}
486
a->rom_length = client->device->config_rom_length * 4;
487
}
488
489
guard(mutex)(&client->device->client_list_mutex);
490
491
client->bus_reset_closure = a->bus_reset_closure;
492
if (a->bus_reset != 0) {
493
fill_bus_reset_event(&bus_reset, client);
494
/* unaligned size of bus_reset is 36 bytes */
495
ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
496
}
497
if (ret == 0 && list_empty(&client->link))
498
list_add_tail(&client->link, &client->device->client_list);
499
500
return ret ? -EFAULT : 0;
501
}
502
503
static int add_client_resource(struct client *client, struct client_resource *resource,
504
gfp_t gfp_mask)
505
{
506
int ret;
507
508
scoped_guard(spinlock_irqsave, &client->lock) {
509
u32 index;
510
511
if (client->in_shutdown) {
512
ret = -ECANCELED;
513
} else {
514
if (gfpflags_allow_blocking(gfp_mask)) {
515
ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b,
516
GFP_NOWAIT);
517
} else {
518
ret = xa_alloc_bh(&client->resource_xa, &index, resource,
519
xa_limit_32b, GFP_NOWAIT);
520
}
521
}
522
if (ret >= 0) {
523
resource->handle = index;
524
client_get(client);
525
if (is_iso_resource(resource))
526
schedule_iso_resource(to_iso_resource(resource), 0);
527
}
528
}
529
530
return ret < 0 ? ret : 0;
531
}
532
533
static int release_client_resource(struct client *client, u32 handle,
534
client_resource_release_fn_t release,
535
struct client_resource **return_resource)
536
{
537
unsigned long index = handle;
538
struct client_resource *resource;
539
540
scoped_guard(spinlock_irq, &client->lock) {
541
if (client->in_shutdown)
542
return -EINVAL;
543
544
resource = xa_load(&client->resource_xa, index);
545
if (!resource || resource->release != release)
546
return -EINVAL;
547
548
xa_erase(&client->resource_xa, handle);
549
}
550
551
if (return_resource)
552
*return_resource = resource;
553
else
554
resource->release(client, resource);
555
556
client_put(client);
557
558
return 0;
559
}
560
561
static void release_transaction(struct client *client,
562
struct client_resource *resource)
563
{
564
}
565
566
static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp,
567
u32 response_tstamp, void *payload, size_t length, void *data)
568
{
569
struct outbound_transaction_event *e = data;
570
struct client *client = e->client;
571
unsigned long index = e->r.resource.handle;
572
573
scoped_guard(spinlock_irqsave, &client->lock) {
574
xa_erase(&client->resource_xa, index);
575
if (client->in_shutdown)
576
wake_up(&client->tx_flush_wait);
577
}
578
579
switch (e->rsp.without_tstamp.type) {
580
case FW_CDEV_EVENT_RESPONSE:
581
{
582
struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
583
584
if (length < rsp->length)
585
rsp->length = length;
586
if (rcode == RCODE_COMPLETE)
587
memcpy(rsp->data, payload, rsp->length);
588
589
rsp->rcode = rcode;
590
591
// In the case that sizeof(*rsp) doesn't align with the position of the
592
// data, and the read is short, preserve an extra copy of the data
593
// to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
594
// for short reads and some apps depended on it, this is both safe
595
// and prudent for compatibility.
596
if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
597
queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length);
598
else
599
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
600
601
break;
602
}
603
case FW_CDEV_EVENT_RESPONSE2:
604
{
605
struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
606
607
if (length < rsp->length)
608
rsp->length = length;
609
if (rcode == RCODE_COMPLETE)
610
memcpy(rsp->data, payload, rsp->length);
611
612
rsp->rcode = rcode;
613
rsp->request_tstamp = request_tstamp;
614
rsp->response_tstamp = response_tstamp;
615
616
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
617
618
break;
619
}
620
default:
621
WARN_ON(1);
622
break;
623
}
624
625
// Drop the xarray's reference.
626
client_put(client);
627
}
628
629
static int init_request(struct client *client,
630
struct fw_cdev_send_request *request,
631
int destination_id, int speed)
632
{
633
struct outbound_transaction_event *e;
634
void *payload;
635
int ret;
636
637
if (request->tcode != TCODE_STREAM_DATA &&
638
(request->length > 4096 || request->length > 512 << speed))
639
return -EIO;
640
641
if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
642
request->length < 4)
643
return -EINVAL;
644
645
e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
646
if (e == NULL)
647
return -ENOMEM;
648
e->client = client;
649
650
if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
651
struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
652
653
rsp->type = FW_CDEV_EVENT_RESPONSE;
654
rsp->length = request->length;
655
rsp->closure = request->closure;
656
payload = rsp->data;
657
} else {
658
struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
659
660
rsp->type = FW_CDEV_EVENT_RESPONSE2;
661
rsp->length = request->length;
662
rsp->closure = request->closure;
663
payload = rsp->data;
664
}
665
666
if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) {
667
ret = -EFAULT;
668
goto failed;
669
}
670
671
e->r.resource.release = release_transaction;
672
ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
673
if (ret < 0)
674
goto failed;
675
676
fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode,
677
destination_id, request->generation, speed, request->offset,
678
payload, request->length, complete_transaction, e);
679
return 0;
680
681
failed:
682
kfree(e);
683
684
return ret;
685
}
686
687
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
688
{
689
switch (arg->send_request.tcode) {
690
case TCODE_WRITE_QUADLET_REQUEST:
691
case TCODE_WRITE_BLOCK_REQUEST:
692
case TCODE_READ_QUADLET_REQUEST:
693
case TCODE_READ_BLOCK_REQUEST:
694
case TCODE_LOCK_MASK_SWAP:
695
case TCODE_LOCK_COMPARE_SWAP:
696
case TCODE_LOCK_FETCH_ADD:
697
case TCODE_LOCK_LITTLE_ADD:
698
case TCODE_LOCK_BOUNDED_ADD:
699
case TCODE_LOCK_WRAP_ADD:
700
case TCODE_LOCK_VENDOR_DEPENDENT:
701
break;
702
default:
703
return -EINVAL;
704
}
705
706
return init_request(client, &arg->send_request, client->device->node_id,
707
client->device->max_speed);
708
}
709
710
static void release_request(struct client *client,
711
struct client_resource *resource)
712
{
713
struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource);
714
715
if (r->is_fcp)
716
fw_request_put(r->request);
717
else
718
fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
719
720
fw_card_put(r->card);
721
kfree(r);
722
}
723
724
static void handle_request(struct fw_card *card, struct fw_request *request,
725
int tcode, int destination, int source,
726
int generation, unsigned long long offset,
727
void *payload, size_t length, void *callback_data)
728
{
729
struct address_handler_resource *handler = callback_data;
730
bool is_fcp = is_in_fcp_region(offset, length);
731
struct inbound_transaction_resource *r;
732
struct inbound_transaction_event *e;
733
size_t event_size0;
734
int ret;
735
736
/* card may be different from handler->client->device->card */
737
fw_card_get(card);
738
739
// Extend the lifetime of data for request so that its payload is safely accessible in
740
// the process context for the client.
741
if (is_fcp)
742
fw_request_get(request);
743
744
r = kmalloc(sizeof(*r), GFP_ATOMIC);
745
e = kmalloc(sizeof(*e), GFP_ATOMIC);
746
if (r == NULL || e == NULL)
747
goto failed;
748
749
r->card = card;
750
r->request = request;
751
r->is_fcp = is_fcp;
752
r->data = payload;
753
r->length = length;
754
755
r->resource.release = release_request;
756
ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
757
if (ret < 0)
758
goto failed;
759
760
if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
761
struct fw_cdev_event_request *req = &e->req.request;
762
763
if (tcode & 0x10)
764
tcode = TCODE_LOCK_REQUEST;
765
766
req->type = FW_CDEV_EVENT_REQUEST;
767
req->tcode = tcode;
768
req->offset = offset;
769
req->length = length;
770
req->handle = r->resource.handle;
771
req->closure = handler->closure;
772
event_size0 = sizeof(*req);
773
} else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
774
struct fw_cdev_event_request2 *req = &e->req.request2;
775
776
req->type = FW_CDEV_EVENT_REQUEST2;
777
req->tcode = tcode;
778
req->offset = offset;
779
req->source_node_id = source;
780
req->destination_node_id = destination;
781
req->card = card->index;
782
req->generation = generation;
783
req->length = length;
784
req->handle = r->resource.handle;
785
req->closure = handler->closure;
786
event_size0 = sizeof(*req);
787
} else {
788
struct fw_cdev_event_request3 *req = &e->req.with_tstamp;
789
790
req->type = FW_CDEV_EVENT_REQUEST3;
791
req->tcode = tcode;
792
req->offset = offset;
793
req->source_node_id = source;
794
req->destination_node_id = destination;
795
req->card = card->index;
796
req->generation = generation;
797
req->length = length;
798
req->handle = r->resource.handle;
799
req->closure = handler->closure;
800
req->tstamp = fw_request_get_timestamp(request);
801
event_size0 = sizeof(*req);
802
}
803
804
queue_event(handler->client, &e->event,
805
&e->req, event_size0, r->data, length);
806
return;
807
808
failed:
809
kfree(r);
810
kfree(e);
811
812
if (!is_fcp)
813
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
814
else
815
fw_request_put(request);
816
817
fw_card_put(card);
818
}
819
820
static void release_address_handler(struct client *client,
821
struct client_resource *resource)
822
{
823
struct address_handler_resource *r = to_address_handler_resource(resource);
824
825
fw_core_remove_address_handler(&r->handler);
826
kfree(r);
827
}
828
829
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
830
{
831
struct fw_cdev_allocate *a = &arg->allocate;
832
struct address_handler_resource *r;
833
struct fw_address_region region;
834
int ret;
835
836
r = kmalloc(sizeof(*r), GFP_KERNEL);
837
if (r == NULL)
838
return -ENOMEM;
839
840
region.start = a->offset;
841
if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
842
region.end = a->offset + a->length;
843
else
844
region.end = a->region_end;
845
846
r->handler.length = a->length;
847
r->handler.address_callback = handle_request;
848
r->handler.callback_data = r;
849
r->closure = a->closure;
850
r->client = client;
851
852
ret = fw_core_add_address_handler(&r->handler, &region);
853
if (ret < 0) {
854
kfree(r);
855
return ret;
856
}
857
a->offset = r->handler.offset;
858
859
r->resource.release = release_address_handler;
860
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
861
if (ret < 0) {
862
release_address_handler(client, &r->resource);
863
return ret;
864
}
865
a->handle = r->resource.handle;
866
867
return 0;
868
}
869
870
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
871
{
872
return release_client_resource(client, arg->deallocate.handle,
873
release_address_handler, NULL);
874
}
875
876
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
877
{
878
struct fw_cdev_send_response *a = &arg->send_response;
879
struct client_resource *resource;
880
struct inbound_transaction_resource *r;
881
int ret = 0;
882
883
if (release_client_resource(client, a->handle,
884
release_request, &resource) < 0)
885
return -EINVAL;
886
887
r = to_inbound_transaction_resource(resource);
888
if (r->is_fcp) {
889
fw_request_put(r->request);
890
goto out;
891
}
892
893
if (a->length != fw_get_response_length(r->request)) {
894
ret = -EINVAL;
895
fw_request_put(r->request);
896
goto out;
897
}
898
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
899
ret = -EFAULT;
900
fw_request_put(r->request);
901
goto out;
902
}
903
fw_send_response(r->card, r->request, a->rcode);
904
out:
905
fw_card_put(r->card);
906
kfree(r);
907
908
return ret;
909
}
910
911
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
912
{
913
fw_schedule_bus_reset(client->device->card, true,
914
arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
915
return 0;
916
}
917
918
static void release_descriptor(struct client *client,
919
struct client_resource *resource)
920
{
921
struct descriptor_resource *r = to_descriptor_resource(resource);
922
923
fw_core_remove_descriptor(&r->descriptor);
924
kfree(r);
925
}
926
927
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
928
{
929
struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
930
struct descriptor_resource *r;
931
int ret;
932
933
/* Access policy: Allow this ioctl only on local nodes' device files. */
934
if (!client->device->is_local)
935
return -ENOSYS;
936
937
if (a->length > 256)
938
return -EINVAL;
939
940
r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
941
if (r == NULL)
942
return -ENOMEM;
943
944
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
945
ret = -EFAULT;
946
goto failed;
947
}
948
949
r->descriptor.length = a->length;
950
r->descriptor.immediate = a->immediate;
951
r->descriptor.key = a->key;
952
r->descriptor.data = r->data;
953
954
ret = fw_core_add_descriptor(&r->descriptor);
955
if (ret < 0)
956
goto failed;
957
958
r->resource.release = release_descriptor;
959
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
960
if (ret < 0) {
961
fw_core_remove_descriptor(&r->descriptor);
962
goto failed;
963
}
964
a->handle = r->resource.handle;
965
966
return 0;
967
failed:
968
kfree(r);
969
970
return ret;
971
}
972
973
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
974
{
975
return release_client_resource(client, arg->remove_descriptor.handle,
976
release_descriptor, NULL);
977
}
978
979
static void iso_callback(struct fw_iso_context *context, u32 cycle,
980
size_t header_length, void *header, void *data)
981
{
982
struct client *client = data;
983
struct iso_interrupt_event *e;
984
985
e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL);
986
if (e == NULL)
987
return;
988
989
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
990
e->interrupt.closure = client->iso_closure;
991
e->interrupt.cycle = cycle;
992
e->interrupt.header_length = header_length;
993
memcpy(e->interrupt.header, header, header_length);
994
queue_event(client, &e->event, &e->interrupt,
995
sizeof(e->interrupt) + header_length, NULL, 0);
996
}
997
998
static void iso_mc_callback(struct fw_iso_context *context,
999
dma_addr_t completed, void *data)
1000
{
1001
struct client *client = data;
1002
struct iso_interrupt_mc_event *e;
1003
1004
e = kmalloc(sizeof(*e), GFP_KERNEL);
1005
if (e == NULL)
1006
return;
1007
1008
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
1009
e->interrupt.closure = client->iso_closure;
1010
e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
1011
completed);
1012
queue_event(client, &e->event, &e->interrupt,
1013
sizeof(e->interrupt), NULL, 0);
1014
}
1015
1016
static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
1017
{
1018
if (context->type == FW_ISO_CONTEXT_TRANSMIT)
1019
return DMA_TO_DEVICE;
1020
else
1021
return DMA_FROM_DEVICE;
1022
}
1023
1024
static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
1025
fw_iso_mc_callback_t callback,
1026
void *callback_data)
1027
{
1028
struct fw_iso_context *ctx;
1029
1030
ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
1031
0, 0, 0, NULL, callback_data);
1032
if (!IS_ERR(ctx))
1033
ctx->callback.mc = callback;
1034
1035
return ctx;
1036
}
1037
1038
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1039
{
1040
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
1041
struct fw_iso_context *context;
1042
union fw_iso_callback cb;
1043
int ret;
1044
1045
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
1046
FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
1047
FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
1048
FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
1049
1050
switch (a->type) {
1051
case FW_ISO_CONTEXT_TRANSMIT:
1052
if (a->speed > SCODE_3200 || a->channel > 63)
1053
return -EINVAL;
1054
1055
cb.sc = iso_callback;
1056
break;
1057
1058
case FW_ISO_CONTEXT_RECEIVE:
1059
if (a->header_size < 4 || (a->header_size & 3) ||
1060
a->channel > 63)
1061
return -EINVAL;
1062
1063
cb.sc = iso_callback;
1064
break;
1065
1066
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1067
cb.mc = iso_mc_callback;
1068
break;
1069
1070
default:
1071
return -EINVAL;
1072
}
1073
1074
if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1075
context = fw_iso_mc_context_create(client->device->card, cb.mc,
1076
client);
1077
else
1078
context = fw_iso_context_create(client->device->card, a->type,
1079
a->channel, a->speed,
1080
a->header_size, cb.sc, client);
1081
if (IS_ERR(context))
1082
return PTR_ERR(context);
1083
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1084
context->drop_overflow_headers = true;
1085
1086
// We only support one context at this time.
1087
guard(spinlock_irq)(&client->lock);
1088
1089
if (client->iso_context != NULL) {
1090
fw_iso_context_destroy(context);
1091
1092
return -EBUSY;
1093
}
1094
if (!client->buffer_is_mapped) {
1095
ret = fw_iso_buffer_map_dma(&client->buffer,
1096
client->device->card,
1097
iso_dma_direction(context));
1098
if (ret < 0) {
1099
fw_iso_context_destroy(context);
1100
1101
return ret;
1102
}
1103
client->buffer_is_mapped = true;
1104
}
1105
client->iso_closure = a->closure;
1106
client->iso_context = context;
1107
1108
a->handle = 0;
1109
1110
return 0;
1111
}
1112
1113
static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1114
{
1115
struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1116
struct fw_iso_context *ctx = client->iso_context;
1117
1118
if (ctx == NULL || a->handle != 0)
1119
return -EINVAL;
1120
1121
return fw_iso_context_set_channels(ctx, &a->channels);
1122
}
1123
1124
/* Macros for decoding the iso packet control header. */
1125
#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1126
#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1127
#define GET_SKIP(v) (((v) >> 17) & 0x01)
1128
#define GET_TAG(v) (((v) >> 18) & 0x03)
1129
#define GET_SY(v) (((v) >> 20) & 0x0f)
1130
#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1131
1132
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1133
{
1134
struct fw_cdev_queue_iso *a = &arg->queue_iso;
1135
struct fw_cdev_iso_packet __user *p, *end, *next;
1136
struct fw_iso_context *ctx = client->iso_context;
1137
unsigned long payload, buffer_end, transmit_header_bytes = 0;
1138
u32 control;
1139
int count;
1140
DEFINE_RAW_FLEX(struct fw_iso_packet, u, header, 64);
1141
1142
if (ctx == NULL || a->handle != 0)
1143
return -EINVAL;
1144
1145
/*
1146
* If the user passes a non-NULL data pointer, has mmap()'ed
1147
* the iso buffer, and the pointer points inside the buffer,
1148
* we setup the payload pointers accordingly. Otherwise we
1149
* set them both to 0, which will still let packets with
1150
* payload_length == 0 through. In other words, if no packets
1151
* use the indirect payload, the iso buffer need not be mapped
1152
* and the a->data pointer is ignored.
1153
*/
1154
payload = (unsigned long)a->data - client->vm_start;
1155
buffer_end = client->buffer.page_count << PAGE_SHIFT;
1156
if (a->data == 0 || client->buffer.pages == NULL ||
1157
payload >= buffer_end) {
1158
payload = 0;
1159
buffer_end = 0;
1160
}
1161
1162
if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1163
return -EINVAL;
1164
1165
p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1166
1167
end = (void __user *)p + a->size;
1168
count = 0;
1169
while (p < end) {
1170
if (get_user(control, &p->control))
1171
return -EFAULT;
1172
u->payload_length = GET_PAYLOAD_LENGTH(control);
1173
u->interrupt = GET_INTERRUPT(control);
1174
u->skip = GET_SKIP(control);
1175
u->tag = GET_TAG(control);
1176
u->sy = GET_SY(control);
1177
u->header_length = GET_HEADER_LENGTH(control);
1178
1179
switch (ctx->type) {
1180
case FW_ISO_CONTEXT_TRANSMIT:
1181
if (u->header_length & 3)
1182
return -EINVAL;
1183
transmit_header_bytes = u->header_length;
1184
break;
1185
1186
case FW_ISO_CONTEXT_RECEIVE:
1187
if (u->header_length == 0 ||
1188
u->header_length % ctx->header_size != 0)
1189
return -EINVAL;
1190
break;
1191
1192
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1193
if (u->payload_length == 0 ||
1194
u->payload_length & 3)
1195
return -EINVAL;
1196
break;
1197
}
1198
1199
next = (struct fw_cdev_iso_packet __user *)
1200
&p->header[transmit_header_bytes / 4];
1201
if (next > end)
1202
return -EINVAL;
1203
if (copy_from_user
1204
(u->header, p->header, transmit_header_bytes))
1205
return -EFAULT;
1206
if (u->skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1207
u->header_length + u->payload_length > 0)
1208
return -EINVAL;
1209
if (payload + u->payload_length > buffer_end)
1210
return -EINVAL;
1211
1212
if (fw_iso_context_queue(ctx, u, &client->buffer, payload))
1213
break;
1214
1215
p = next;
1216
payload += u->payload_length;
1217
count++;
1218
}
1219
fw_iso_context_queue_flush(ctx);
1220
1221
a->size -= uptr_to_u64(p) - a->packets;
1222
a->packets = uptr_to_u64(p);
1223
a->data = client->vm_start + payload;
1224
1225
return count;
1226
}
1227
1228
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1229
{
1230
struct fw_cdev_start_iso *a = &arg->start_iso;
1231
1232
BUILD_BUG_ON(
1233
FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1234
FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1235
FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1236
FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1237
FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1238
1239
if (client->iso_context == NULL || a->handle != 0)
1240
return -EINVAL;
1241
1242
if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1243
(a->tags == 0 || a->tags > 15 || a->sync > 15))
1244
return -EINVAL;
1245
1246
return fw_iso_context_start(client->iso_context,
1247
a->cycle, a->sync, a->tags);
1248
}
1249
1250
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1251
{
1252
struct fw_cdev_stop_iso *a = &arg->stop_iso;
1253
1254
if (client->iso_context == NULL || a->handle != 0)
1255
return -EINVAL;
1256
1257
return fw_iso_context_stop(client->iso_context);
1258
}
1259
1260
static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1261
{
1262
struct fw_cdev_flush_iso *a = &arg->flush_iso;
1263
1264
if (client->iso_context == NULL || a->handle != 0)
1265
return -EINVAL;
1266
1267
return fw_iso_context_flush_completions(client->iso_context);
1268
}
1269
1270
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1271
{
1272
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1273
struct fw_card *card = client->device->card;
1274
struct timespec64 ts = {0, 0};
1275
u32 cycle_time = 0;
1276
int ret;
1277
1278
guard(irq)();
1279
1280
ret = fw_card_read_cycle_time(card, &cycle_time);
1281
if (ret < 0)
1282
return ret;
1283
1284
switch (a->clk_id) {
1285
case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
1286
case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
1287
case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
1288
default:
1289
return -EINVAL;
1290
}
1291
1292
a->tv_sec = ts.tv_sec;
1293
a->tv_nsec = ts.tv_nsec;
1294
a->cycle_timer = cycle_time;
1295
1296
return 0;
1297
}
1298
1299
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1300
{
1301
struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1302
struct fw_cdev_get_cycle_timer2 ct2;
1303
1304
ct2.clk_id = CLOCK_REALTIME;
1305
ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1306
1307
a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1308
a->cycle_timer = ct2.cycle_timer;
1309
1310
return 0;
1311
}
1312
1313
static void iso_resource_work(struct work_struct *work)
1314
{
1315
struct iso_resource_event *e;
1316
struct iso_resource *r = from_work(r, work, work.work);
1317
struct client *client = r->client;
1318
unsigned long index = r->resource.handle;
1319
int generation, channel, bandwidth, todo;
1320
bool skip, free, success;
1321
1322
scoped_guard(spinlock_irq, &client->lock) {
1323
generation = client->device->generation;
1324
todo = r->todo;
1325
// Allow 1000ms grace period for other reallocations.
1326
if (todo == ISO_RES_ALLOC &&
1327
time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
1328
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1329
skip = true;
1330
} else {
1331
// We could be called twice within the same generation.
1332
skip = todo == ISO_RES_REALLOC &&
1333
r->generation == generation;
1334
}
1335
free = todo == ISO_RES_DEALLOC ||
1336
todo == ISO_RES_ALLOC_ONCE ||
1337
todo == ISO_RES_DEALLOC_ONCE;
1338
r->generation = generation;
1339
}
1340
1341
if (skip)
1342
goto out;
1343
1344
bandwidth = r->bandwidth;
1345
1346
fw_iso_resource_manage(client->device->card, generation,
1347
r->channels, &channel, &bandwidth,
1348
todo == ISO_RES_ALLOC ||
1349
todo == ISO_RES_REALLOC ||
1350
todo == ISO_RES_ALLOC_ONCE);
1351
/*
1352
* Is this generation outdated already? As long as this resource sticks
1353
* in the xarray, it will be scheduled again for a newer generation or at
1354
* shutdown.
1355
*/
1356
if (channel == -EAGAIN &&
1357
(todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1358
goto out;
1359
1360
success = channel >= 0 || bandwidth > 0;
1361
1362
scoped_guard(spinlock_irq, &client->lock) {
1363
// Transit from allocation to reallocation, except if the client
1364
// requested deallocation in the meantime.
1365
if (r->todo == ISO_RES_ALLOC)
1366
r->todo = ISO_RES_REALLOC;
1367
// Allocation or reallocation failure? Pull this resource out of the
1368
// xarray and prepare for deletion, unless the client is shutting down.
1369
if (r->todo == ISO_RES_REALLOC && !success &&
1370
!client->in_shutdown &&
1371
xa_erase(&client->resource_xa, index)) {
1372
client_put(client);
1373
free = true;
1374
}
1375
}
1376
1377
if (todo == ISO_RES_ALLOC && channel >= 0)
1378
r->channels = 1ULL << channel;
1379
1380
if (todo == ISO_RES_REALLOC && success)
1381
goto out;
1382
1383
if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1384
e = r->e_alloc;
1385
r->e_alloc = NULL;
1386
} else {
1387
e = r->e_dealloc;
1388
r->e_dealloc = NULL;
1389
}
1390
e->iso_resource.handle = r->resource.handle;
1391
e->iso_resource.channel = channel;
1392
e->iso_resource.bandwidth = bandwidth;
1393
1394
queue_event(client, &e->event,
1395
&e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1396
1397
if (free) {
1398
cancel_delayed_work(&r->work);
1399
kfree(r->e_alloc);
1400
kfree(r->e_dealloc);
1401
kfree(r);
1402
}
1403
out:
1404
client_put(client);
1405
}
1406
1407
static void release_iso_resource(struct client *client,
1408
struct client_resource *resource)
1409
{
1410
struct iso_resource *r = to_iso_resource(resource);
1411
1412
guard(spinlock_irq)(&client->lock);
1413
1414
r->todo = ISO_RES_DEALLOC;
1415
schedule_iso_resource(r, 0);
1416
}
1417
1418
static int init_iso_resource(struct client *client,
1419
struct fw_cdev_allocate_iso_resource *request, int todo)
1420
{
1421
struct iso_resource_event *e1, *e2;
1422
struct iso_resource *r;
1423
int ret;
1424
1425
if ((request->channels == 0 && request->bandwidth == 0) ||
1426
request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1427
return -EINVAL;
1428
1429
r = kmalloc(sizeof(*r), GFP_KERNEL);
1430
e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1431
e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1432
if (r == NULL || e1 == NULL || e2 == NULL) {
1433
ret = -ENOMEM;
1434
goto fail;
1435
}
1436
1437
INIT_DELAYED_WORK(&r->work, iso_resource_work);
1438
r->client = client;
1439
r->todo = todo;
1440
r->generation = -1;
1441
r->channels = request->channels;
1442
r->bandwidth = request->bandwidth;
1443
r->e_alloc = e1;
1444
r->e_dealloc = e2;
1445
1446
e1->iso_resource.closure = request->closure;
1447
e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1448
e2->iso_resource.closure = request->closure;
1449
e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1450
1451
if (todo == ISO_RES_ALLOC) {
1452
r->resource.release = release_iso_resource;
1453
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1454
if (ret < 0)
1455
goto fail;
1456
} else {
1457
r->resource.release = NULL;
1458
r->resource.handle = -1;
1459
schedule_iso_resource(r, 0);
1460
}
1461
request->handle = r->resource.handle;
1462
1463
return 0;
1464
fail:
1465
kfree(r);
1466
kfree(e1);
1467
kfree(e2);
1468
1469
return ret;
1470
}
1471
1472
static int ioctl_allocate_iso_resource(struct client *client,
1473
union ioctl_arg *arg)
1474
{
1475
return init_iso_resource(client,
1476
&arg->allocate_iso_resource, ISO_RES_ALLOC);
1477
}
1478
1479
static int ioctl_deallocate_iso_resource(struct client *client,
1480
union ioctl_arg *arg)
1481
{
1482
return release_client_resource(client,
1483
arg->deallocate.handle, release_iso_resource, NULL);
1484
}
1485
1486
static int ioctl_allocate_iso_resource_once(struct client *client,
1487
union ioctl_arg *arg)
1488
{
1489
return init_iso_resource(client,
1490
&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1491
}
1492
1493
static int ioctl_deallocate_iso_resource_once(struct client *client,
1494
union ioctl_arg *arg)
1495
{
1496
return init_iso_resource(client,
1497
&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1498
}
1499
1500
/*
1501
* Returns a speed code: Maximum speed to or from this device,
1502
* limited by the device's link speed, the local node's link speed,
1503
* and all PHY port speeds between the two links.
1504
*/
1505
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1506
{
1507
return client->device->max_speed;
1508
}
1509
1510
static int ioctl_send_broadcast_request(struct client *client,
1511
union ioctl_arg *arg)
1512
{
1513
struct fw_cdev_send_request *a = &arg->send_request;
1514
1515
switch (a->tcode) {
1516
case TCODE_WRITE_QUADLET_REQUEST:
1517
case TCODE_WRITE_BLOCK_REQUEST:
1518
break;
1519
default:
1520
return -EINVAL;
1521
}
1522
1523
/* Security policy: Only allow accesses to Units Space. */
1524
if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1525
return -EACCES;
1526
1527
return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1528
}
1529
1530
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1531
{
1532
struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1533
struct fw_cdev_send_request request;
1534
int dest;
1535
1536
if (a->speed > client->device->card->link_speed ||
1537
a->length > 1024 << a->speed)
1538
return -EIO;
1539
1540
if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1541
return -EINVAL;
1542
1543
dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1544
request.tcode = TCODE_STREAM_DATA;
1545
request.length = a->length;
1546
request.closure = a->closure;
1547
request.data = a->data;
1548
request.generation = a->generation;
1549
1550
return init_request(client, &request, dest, a->speed);
1551
}
1552
1553
static void outbound_phy_packet_callback(struct fw_packet *packet,
1554
struct fw_card *card, int status)
1555
{
1556
struct outbound_phy_packet_event *e =
1557
container_of(packet, struct outbound_phy_packet_event, p);
1558
struct client *e_client = e->client;
1559
u32 rcode;
1560
1561
trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
1562
packet->timestamp);
1563
1564
switch (status) {
1565
// expected:
1566
case ACK_COMPLETE:
1567
rcode = RCODE_COMPLETE;
1568
break;
1569
// should never happen with PHY packets:
1570
case ACK_PENDING:
1571
rcode = RCODE_COMPLETE;
1572
break;
1573
case ACK_BUSY_X:
1574
case ACK_BUSY_A:
1575
case ACK_BUSY_B:
1576
rcode = RCODE_BUSY;
1577
break;
1578
case ACK_DATA_ERROR:
1579
rcode = RCODE_DATA_ERROR;
1580
break;
1581
case ACK_TYPE_ERROR:
1582
rcode = RCODE_TYPE_ERROR;
1583
break;
1584
// stale generation; cancelled; on certain controllers: no ack
1585
default:
1586
rcode = status;
1587
break;
1588
}
1589
1590
switch (e->phy_packet.without_tstamp.type) {
1591
case FW_CDEV_EVENT_PHY_PACKET_SENT:
1592
{
1593
struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1594
1595
pp->rcode = rcode;
1596
pp->data[0] = packet->timestamp;
1597
queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1598
NULL, 0);
1599
break;
1600
}
1601
case FW_CDEV_EVENT_PHY_PACKET_SENT2:
1602
{
1603
struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1604
1605
pp->rcode = rcode;
1606
pp->tstamp = packet->timestamp;
1607
queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1608
NULL, 0);
1609
break;
1610
}
1611
default:
1612
WARN_ON(1);
1613
break;
1614
}
1615
1616
client_put(e_client);
1617
}
1618
1619
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1620
{
1621
struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1622
struct fw_card *card = client->device->card;
1623
struct outbound_phy_packet_event *e;
1624
1625
/* Access policy: Allow this ioctl only on local nodes' device files. */
1626
if (!client->device->is_local)
1627
return -ENOSYS;
1628
1629
e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL);
1630
if (e == NULL)
1631
return -ENOMEM;
1632
1633
client_get(client);
1634
e->client = client;
1635
e->p.speed = SCODE_100;
1636
e->p.generation = a->generation;
1637
async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL);
1638
e->p.header[1] = a->data[0];
1639
e->p.header[2] = a->data[1];
1640
e->p.header_length = 12;
1641
e->p.callback = outbound_phy_packet_callback;
1642
1643
if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1644
struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1645
1646
pp->closure = a->closure;
1647
pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1648
if (is_ping_packet(a->data))
1649
pp->length = 4;
1650
} else {
1651
struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1652
1653
pp->closure = a->closure;
1654
pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2;
1655
// Keep the data field so that application can match the response event to the
1656
// request.
1657
pp->length = sizeof(a->data);
1658
memcpy(pp->data, a->data, sizeof(a->data));
1659
}
1660
1661
trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
1662
e->p.header[1], e->p.header[2]);
1663
1664
card->driver->send_request(card, &e->p);
1665
1666
return 0;
1667
}
1668
1669
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1670
{
1671
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1672
struct fw_card *card = client->device->card;
1673
1674
/* Access policy: Allow this ioctl only on local nodes' device files. */
1675
if (!client->device->is_local)
1676
return -ENOSYS;
1677
1678
guard(spinlock_irq)(&card->lock);
1679
1680
list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1681
client->phy_receiver_closure = a->closure;
1682
1683
return 0;
1684
}
1685
1686
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1687
{
1688
struct client *client;
1689
1690
guard(spinlock_irqsave)(&card->lock);
1691
1692
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1693
struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1694
if (e == NULL)
1695
break;
1696
1697
if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1698
struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1699
1700
pp->closure = client->phy_receiver_closure;
1701
pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1702
pp->rcode = RCODE_COMPLETE;
1703
pp->length = 8;
1704
pp->data[0] = p->header[1];
1705
pp->data[1] = p->header[2];
1706
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1707
} else {
1708
struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1709
1710
pp = &e->phy_packet.with_tstamp;
1711
pp->closure = client->phy_receiver_closure;
1712
pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2;
1713
pp->rcode = RCODE_COMPLETE;
1714
pp->length = 8;
1715
pp->tstamp = p->timestamp;
1716
pp->data[0] = p->header[1];
1717
pp->data[1] = p->header[2];
1718
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1719
}
1720
}
1721
}
1722
1723
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1724
[0x00] = ioctl_get_info,
1725
[0x01] = ioctl_send_request,
1726
[0x02] = ioctl_allocate,
1727
[0x03] = ioctl_deallocate,
1728
[0x04] = ioctl_send_response,
1729
[0x05] = ioctl_initiate_bus_reset,
1730
[0x06] = ioctl_add_descriptor,
1731
[0x07] = ioctl_remove_descriptor,
1732
[0x08] = ioctl_create_iso_context,
1733
[0x09] = ioctl_queue_iso,
1734
[0x0a] = ioctl_start_iso,
1735
[0x0b] = ioctl_stop_iso,
1736
[0x0c] = ioctl_get_cycle_timer,
1737
[0x0d] = ioctl_allocate_iso_resource,
1738
[0x0e] = ioctl_deallocate_iso_resource,
1739
[0x0f] = ioctl_allocate_iso_resource_once,
1740
[0x10] = ioctl_deallocate_iso_resource_once,
1741
[0x11] = ioctl_get_speed,
1742
[0x12] = ioctl_send_broadcast_request,
1743
[0x13] = ioctl_send_stream_packet,
1744
[0x14] = ioctl_get_cycle_timer2,
1745
[0x15] = ioctl_send_phy_packet,
1746
[0x16] = ioctl_receive_phy_packets,
1747
[0x17] = ioctl_set_iso_channels,
1748
[0x18] = ioctl_flush_iso,
1749
};
1750
1751
static int dispatch_ioctl(struct client *client,
1752
unsigned int cmd, void __user *arg)
1753
{
1754
union ioctl_arg buffer;
1755
int ret;
1756
1757
if (fw_device_is_shutdown(client->device))
1758
return -ENODEV;
1759
1760
if (_IOC_TYPE(cmd) != '#' ||
1761
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1762
_IOC_SIZE(cmd) > sizeof(buffer))
1763
return -ENOTTY;
1764
1765
memset(&buffer, 0, sizeof(buffer));
1766
1767
if (_IOC_DIR(cmd) & _IOC_WRITE)
1768
if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1769
return -EFAULT;
1770
1771
ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1772
if (ret < 0)
1773
return ret;
1774
1775
if (_IOC_DIR(cmd) & _IOC_READ)
1776
if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1777
return -EFAULT;
1778
1779
return ret;
1780
}
1781
1782
static long fw_device_op_ioctl(struct file *file,
1783
unsigned int cmd, unsigned long arg)
1784
{
1785
return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1786
}
1787
1788
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1789
{
1790
struct client *client = file->private_data;
1791
unsigned long size;
1792
int page_count, ret;
1793
1794
if (fw_device_is_shutdown(client->device))
1795
return -ENODEV;
1796
1797
/* FIXME: We could support multiple buffers, but we don't. */
1798
if (client->buffer.pages != NULL)
1799
return -EBUSY;
1800
1801
if (!(vma->vm_flags & VM_SHARED))
1802
return -EINVAL;
1803
1804
if (vma->vm_start & ~PAGE_MASK)
1805
return -EINVAL;
1806
1807
client->vm_start = vma->vm_start;
1808
size = vma->vm_end - vma->vm_start;
1809
page_count = size >> PAGE_SHIFT;
1810
if (size & ~PAGE_MASK)
1811
return -EINVAL;
1812
1813
ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1814
if (ret < 0)
1815
return ret;
1816
1817
scoped_guard(spinlock_irq, &client->lock) {
1818
if (client->iso_context) {
1819
ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
1820
iso_dma_direction(client->iso_context));
1821
if (ret < 0)
1822
goto fail;
1823
client->buffer_is_mapped = true;
1824
}
1825
}
1826
1827
ret = vm_map_pages_zero(vma, client->buffer.pages,
1828
client->buffer.page_count);
1829
if (ret < 0)
1830
goto fail;
1831
1832
return 0;
1833
fail:
1834
fw_iso_buffer_destroy(&client->buffer, client->device->card);
1835
return ret;
1836
}
1837
1838
static bool has_outbound_transactions(struct client *client)
1839
{
1840
struct client_resource *resource;
1841
unsigned long index;
1842
1843
guard(spinlock_irq)(&client->lock);
1844
1845
xa_for_each(&client->resource_xa, index, resource) {
1846
if (is_outbound_transaction_resource(resource))
1847
return true;
1848
}
1849
1850
return false;
1851
}
1852
1853
static int fw_device_op_release(struct inode *inode, struct file *file)
1854
{
1855
struct client *client = file->private_data;
1856
struct event *event, *next_event;
1857
struct client_resource *resource;
1858
unsigned long index;
1859
1860
scoped_guard(spinlock_irq, &client->device->card->lock)
1861
list_del(&client->phy_receiver_link);
1862
1863
scoped_guard(mutex, &client->device->client_list_mutex)
1864
list_del(&client->link);
1865
1866
if (client->iso_context)
1867
fw_iso_context_destroy(client->iso_context);
1868
1869
if (client->buffer.pages)
1870
fw_iso_buffer_destroy(&client->buffer, client->device->card);
1871
1872
// Freeze client->resource_xa and client->event_list.
1873
scoped_guard(spinlock_irq, &client->lock)
1874
client->in_shutdown = true;
1875
1876
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1877
1878
xa_for_each(&client->resource_xa, index, resource) {
1879
resource->release(client, resource);
1880
client_put(client);
1881
}
1882
xa_destroy(&client->resource_xa);
1883
1884
list_for_each_entry_safe(event, next_event, &client->event_list, link)
1885
kfree(event);
1886
1887
client_put(client);
1888
1889
return 0;
1890
}
1891
1892
static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1893
{
1894
struct client *client = file->private_data;
1895
__poll_t mask = 0;
1896
1897
poll_wait(file, &client->wait, pt);
1898
1899
if (fw_device_is_shutdown(client->device))
1900
mask |= EPOLLHUP | EPOLLERR;
1901
if (!list_empty(&client->event_list))
1902
mask |= EPOLLIN | EPOLLRDNORM;
1903
1904
return mask;
1905
}
1906
1907
const struct file_operations fw_device_ops = {
1908
.owner = THIS_MODULE,
1909
.open = fw_device_op_open,
1910
.read = fw_device_op_read,
1911
.unlocked_ioctl = fw_device_op_ioctl,
1912
.mmap = fw_device_op_mmap,
1913
.release = fw_device_op_release,
1914
.poll = fw_device_op_poll,
1915
.compat_ioctl = compat_ptr_ioctl,
1916
};
1917
1918