Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/firewire/core-transaction.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Core IEEE1394 transaction logic
4
*
5
* Copyright (C) 2004-2006 Kristian Hoegsberg <[email protected]>
6
*/
7
8
#include <linux/bug.h>
9
#include <linux/completion.h>
10
#include <linux/device.h>
11
#include <linux/errno.h>
12
#include <linux/firewire.h>
13
#include <linux/firewire-constants.h>
14
#include <linux/fs.h>
15
#include <linux/init.h>
16
#include <linux/jiffies.h>
17
#include <linux/kernel.h>
18
#include <linux/list.h>
19
#include <linux/module.h>
20
#include <linux/rculist.h>
21
#include <linux/slab.h>
22
#include <linux/spinlock.h>
23
#include <linux/string.h>
24
#include <linux/timer.h>
25
#include <linux/types.h>
26
#include <linux/workqueue.h>
27
28
#include <asm/byteorder.h>
29
30
#include "core.h"
31
#include "packet-header-definitions.h"
32
#include "phy-packet-definitions.h"
33
#include <trace/events/firewire.h>
34
35
#define HEADER_DESTINATION_IS_BROADCAST(header) \
36
((async_header_get_destination(header) & 0x3f) == 0x3f)
37
38
/* returns 0 if the split timeout handler is already running */
39
static int try_cancel_split_timeout(struct fw_transaction *t)
40
{
41
if (t->is_split_transaction)
42
return timer_delete(&t->split_timeout_timer);
43
else
44
return 1;
45
}
46
47
static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
48
u32 response_tstamp)
49
{
50
struct fw_transaction *t = NULL, *iter;
51
52
scoped_guard(spinlock_irqsave, &card->lock) {
53
list_for_each_entry(iter, &card->transaction_list, link) {
54
if (iter == transaction) {
55
if (try_cancel_split_timeout(iter)) {
56
list_del_init(&iter->link);
57
card->tlabel_mask &= ~(1ULL << iter->tlabel);
58
t = iter;
59
}
60
break;
61
}
62
}
63
}
64
65
if (!t)
66
return -ENOENT;
67
68
if (!t->with_tstamp) {
69
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
70
} else {
71
t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
72
t->callback_data);
73
}
74
75
return 0;
76
}
77
78
/*
79
* Only valid for transactions that are potentially pending (ie have
80
* been sent).
81
*/
82
int fw_cancel_transaction(struct fw_card *card,
83
struct fw_transaction *transaction)
84
{
85
u32 tstamp;
86
87
/*
88
* Cancel the packet transmission if it's still queued. That
89
* will call the packet transmission callback which cancels
90
* the transaction.
91
*/
92
93
if (card->driver->cancel_packet(card, &transaction->packet) == 0)
94
return 0;
95
96
/*
97
* If the request packet has already been sent, we need to see
98
* if the transaction is still pending and remove it in that case.
99
*/
100
101
if (transaction->packet.ack == 0) {
102
// The timestamp is reused since it was just read now.
103
tstamp = transaction->packet.timestamp;
104
} else {
105
u32 curr_cycle_time = 0;
106
107
(void)fw_card_read_cycle_time(card, &curr_cycle_time);
108
tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
109
}
110
111
return close_transaction(transaction, card, RCODE_CANCELLED, tstamp);
112
}
113
EXPORT_SYMBOL(fw_cancel_transaction);
114
115
static void split_transaction_timeout_callback(struct timer_list *timer)
116
{
117
struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
118
struct fw_card *card = t->card;
119
120
scoped_guard(spinlock_irqsave, &card->lock) {
121
if (list_empty(&t->link))
122
return;
123
list_del(&t->link);
124
card->tlabel_mask &= ~(1ULL << t->tlabel);
125
}
126
127
if (!t->with_tstamp) {
128
t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
129
} else {
130
t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp,
131
t->split_timeout_cycle, NULL, 0, t->callback_data);
132
}
133
}
134
135
static void start_split_transaction_timeout(struct fw_transaction *t,
136
struct fw_card *card)
137
{
138
guard(spinlock_irqsave)(&card->lock);
139
140
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
141
return;
142
143
t->is_split_transaction = true;
144
mod_timer(&t->split_timeout_timer,
145
jiffies + card->split_timeout_jiffies);
146
}
147
148
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
149
150
static void transmit_complete_callback(struct fw_packet *packet,
151
struct fw_card *card, int status)
152
{
153
struct fw_transaction *t =
154
container_of(packet, struct fw_transaction, packet);
155
156
trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
157
packet->speed, status, packet->timestamp);
158
159
switch (status) {
160
case ACK_COMPLETE:
161
close_transaction(t, card, RCODE_COMPLETE, packet->timestamp);
162
break;
163
case ACK_PENDING:
164
{
165
t->split_timeout_cycle =
166
compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
167
start_split_transaction_timeout(t, card);
168
break;
169
}
170
case ACK_BUSY_X:
171
case ACK_BUSY_A:
172
case ACK_BUSY_B:
173
close_transaction(t, card, RCODE_BUSY, packet->timestamp);
174
break;
175
case ACK_DATA_ERROR:
176
close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp);
177
break;
178
case ACK_TYPE_ERROR:
179
close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp);
180
break;
181
default:
182
/*
183
* In this case the ack is really a juju specific
184
* rcode, so just forward that to the callback.
185
*/
186
close_transaction(t, card, status, packet->timestamp);
187
break;
188
}
189
}
190
191
static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
192
int destination_id, int source_id, int generation, int speed,
193
unsigned long long offset, void *payload, size_t length)
194
{
195
int ext_tcode;
196
197
if (tcode == TCODE_STREAM_DATA) {
198
// The value of destination_id argument should include tag, channel, and sy fields
199
// as isochronous packet header has.
200
packet->header[0] = destination_id;
201
isoc_header_set_data_length(packet->header, length);
202
isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA);
203
packet->header_length = 4;
204
packet->payload = payload;
205
packet->payload_length = length;
206
207
goto common;
208
}
209
210
if (tcode > 0x10) {
211
ext_tcode = tcode & ~0x10;
212
tcode = TCODE_LOCK_REQUEST;
213
} else
214
ext_tcode = 0;
215
216
async_header_set_retry(packet->header, RETRY_X);
217
async_header_set_tlabel(packet->header, tlabel);
218
async_header_set_tcode(packet->header, tcode);
219
async_header_set_destination(packet->header, destination_id);
220
async_header_set_source(packet->header, source_id);
221
async_header_set_offset(packet->header, offset);
222
223
switch (tcode) {
224
case TCODE_WRITE_QUADLET_REQUEST:
225
async_header_set_quadlet_data(packet->header, *(u32 *)payload);
226
packet->header_length = 16;
227
packet->payload_length = 0;
228
break;
229
230
case TCODE_LOCK_REQUEST:
231
case TCODE_WRITE_BLOCK_REQUEST:
232
async_header_set_data_length(packet->header, length);
233
async_header_set_extended_tcode(packet->header, ext_tcode);
234
packet->header_length = 16;
235
packet->payload = payload;
236
packet->payload_length = length;
237
break;
238
239
case TCODE_READ_QUADLET_REQUEST:
240
packet->header_length = 12;
241
packet->payload_length = 0;
242
break;
243
244
case TCODE_READ_BLOCK_REQUEST:
245
async_header_set_data_length(packet->header, length);
246
async_header_set_extended_tcode(packet->header, ext_tcode);
247
packet->header_length = 16;
248
packet->payload_length = 0;
249
break;
250
251
default:
252
WARN(1, "wrong tcode %d\n", tcode);
253
}
254
common:
255
packet->speed = speed;
256
packet->generation = generation;
257
packet->ack = 0;
258
packet->payload_mapped = false;
259
}
260
261
static int allocate_tlabel(struct fw_card *card)
262
{
263
int tlabel;
264
265
tlabel = card->current_tlabel;
266
while (card->tlabel_mask & (1ULL << tlabel)) {
267
tlabel = (tlabel + 1) & 0x3f;
268
if (tlabel == card->current_tlabel)
269
return -EBUSY;
270
}
271
272
card->current_tlabel = (tlabel + 1) & 0x3f;
273
card->tlabel_mask |= 1ULL << tlabel;
274
275
return tlabel;
276
}
277
278
/**
279
* __fw_send_request() - submit a request packet for transmission to generate callback for response
280
* subaction with or without time stamp.
281
* @card: interface to send the request at
282
* @t: transaction instance to which the request belongs
283
* @tcode: transaction code
284
* @destination_id: destination node ID, consisting of bus_ID and phy_ID
285
* @generation: bus generation in which request and response are valid
286
* @speed: transmission speed
287
* @offset: 48bit wide offset into destination's address space
288
* @payload: data payload for the request subaction
289
* @length: length of the payload, in bytes
290
* @callback: union of two functions whether to receive time stamp or not for response
291
* subaction.
292
* @with_tstamp: Whether to receive time stamp or not for response subaction.
293
* @callback_data: data to be passed to the transaction completion callback
294
*
295
* Submit a request packet into the asynchronous request transmission queue.
296
* Can be called from atomic context. If you prefer a blocking API, use
297
* fw_run_transaction() in a context that can sleep.
298
*
299
* In case of lock requests, specify one of the firewire-core specific %TCODE_
300
* constants instead of %TCODE_LOCK_REQUEST in @tcode.
301
*
302
* Make sure that the value in @destination_id is not older than the one in
303
* @generation. Otherwise the request is in danger to be sent to a wrong node.
304
*
305
* In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
306
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
307
* It will contain tag, channel, and sy data instead of a node ID then.
308
*
309
* The payload buffer at @data is going to be DMA-mapped except in case of
310
* @length <= 8 or of local (loopback) requests. Hence make sure that the
311
* buffer complies with the restrictions of the streaming DMA mapping API.
312
* @payload must not be freed before the @callback is called.
313
*
314
* In case of request types without payload, @data is NULL and @length is 0.
315
*
316
* After the transaction is completed successfully or unsuccessfully, the
317
* @callback will be called. Among its parameters is the response code which
318
* is either one of the rcodes per IEEE 1394 or, in case of internal errors,
319
* the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
320
* specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
321
* %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
322
* generation, or missing ACK respectively.
323
*
324
* Note some timing corner cases: fw_send_request() may complete much earlier
325
* than when the request packet actually hits the wire. On the other hand,
326
* transaction completion and hence execution of @callback may happen even
327
* before fw_send_request() returns.
328
*/
329
void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
330
int destination_id, int generation, int speed, unsigned long long offset,
331
void *payload, size_t length, union fw_transaction_callback callback,
332
bool with_tstamp, void *callback_data)
333
{
334
unsigned long flags;
335
int tlabel;
336
337
/*
338
* Allocate tlabel from the bitmap and put the transaction on
339
* the list while holding the card spinlock.
340
*/
341
342
spin_lock_irqsave(&card->lock, flags);
343
344
tlabel = allocate_tlabel(card);
345
if (tlabel < 0) {
346
spin_unlock_irqrestore(&card->lock, flags);
347
if (!with_tstamp) {
348
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
349
} else {
350
// Timestamping on behalf of hardware.
351
u32 curr_cycle_time = 0;
352
u32 tstamp;
353
354
(void)fw_card_read_cycle_time(card, &curr_cycle_time);
355
tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
356
357
callback.with_tstamp(card, RCODE_SEND_ERROR, tstamp, tstamp, NULL, 0,
358
callback_data);
359
}
360
return;
361
}
362
363
t->node_id = destination_id;
364
t->tlabel = tlabel;
365
t->card = card;
366
t->is_split_transaction = false;
367
timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0);
368
t->callback = callback;
369
t->with_tstamp = with_tstamp;
370
t->callback_data = callback_data;
371
372
fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
373
speed, offset, payload, length);
374
t->packet.callback = transmit_complete_callback;
375
376
list_add_tail(&t->link, &card->transaction_list);
377
378
spin_unlock_irqrestore(&card->lock, flags);
379
380
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
381
t->packet.header, payload,
382
tcode_is_read_request(tcode) ? 0 : length / 4);
383
384
card->driver->send_request(card, &t->packet);
385
}
386
EXPORT_SYMBOL_GPL(__fw_send_request);
387
388
struct transaction_callback_data {
389
struct completion done;
390
void *payload;
391
int rcode;
392
};
393
394
static void transaction_callback(struct fw_card *card, int rcode,
395
void *payload, size_t length, void *data)
396
{
397
struct transaction_callback_data *d = data;
398
399
if (rcode == RCODE_COMPLETE)
400
memcpy(d->payload, payload, length);
401
d->rcode = rcode;
402
complete(&d->done);
403
}
404
405
/**
406
* fw_run_transaction() - send request and sleep until transaction is completed
407
* @card: card interface for this request
408
* @tcode: transaction code
409
* @destination_id: destination node ID, consisting of bus_ID and phy_ID
410
* @generation: bus generation in which request and response are valid
411
* @speed: transmission speed
412
* @offset: 48bit wide offset into destination's address space
413
* @payload: data payload for the request subaction
414
* @length: length of the payload, in bytes
415
*
416
* Returns the RCODE. See fw_send_request() for parameter documentation.
417
* Unlike fw_send_request(), @data points to the payload of the request or/and
418
* to the payload of the response. DMA mapping restrictions apply to outbound
419
* request payloads of >= 8 bytes but not to inbound response payloads.
420
*/
421
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
422
int generation, int speed, unsigned long long offset,
423
void *payload, size_t length)
424
{
425
struct transaction_callback_data d;
426
struct fw_transaction t;
427
428
timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
429
init_completion(&d.done);
430
d.payload = payload;
431
fw_send_request(card, &t, tcode, destination_id, generation, speed,
432
offset, payload, length, transaction_callback, &d);
433
wait_for_completion(&d.done);
434
timer_destroy_on_stack(&t.split_timeout_timer);
435
436
return d.rcode;
437
}
438
EXPORT_SYMBOL(fw_run_transaction);
439
440
static DEFINE_MUTEX(phy_config_mutex);
441
static DECLARE_COMPLETION(phy_config_done);
442
443
static void transmit_phy_packet_callback(struct fw_packet *packet,
444
struct fw_card *card, int status)
445
{
446
trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
447
packet->timestamp);
448
complete(&phy_config_done);
449
}
450
451
static struct fw_packet phy_config_packet = {
452
.header_length = 12,
453
.payload_length = 0,
454
.speed = SCODE_100,
455
.callback = transmit_phy_packet_callback,
456
};
457
458
void fw_send_phy_config(struct fw_card *card,
459
int node_id, int generation, int gap_count)
460
{
461
long timeout = DIV_ROUND_UP(HZ, 10);
462
u32 data = 0;
463
464
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
465
466
if (node_id != FW_PHY_CONFIG_NO_NODE_ID) {
467
phy_packet_phy_config_set_root_id(&data, node_id);
468
phy_packet_phy_config_set_force_root_node(&data, true);
469
}
470
471
if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
472
gap_count = card->driver->read_phy_reg(card, 1);
473
if (gap_count < 0)
474
return;
475
476
gap_count &= 63;
477
if (gap_count == 63)
478
return;
479
}
480
phy_packet_phy_config_set_gap_count(&data, gap_count);
481
phy_packet_phy_config_set_gap_count_optimization(&data, true);
482
483
guard(mutex)(&phy_config_mutex);
484
485
async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
486
phy_config_packet.header[1] = data;
487
phy_config_packet.header[2] = ~data;
488
phy_config_packet.generation = generation;
489
reinit_completion(&phy_config_done);
490
491
trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
492
phy_config_packet.generation, phy_config_packet.header[1],
493
phy_config_packet.header[2]);
494
495
card->driver->send_request(card, &phy_config_packet);
496
wait_for_completion_timeout(&phy_config_done, timeout);
497
}
498
499
static struct fw_address_handler *lookup_overlapping_address_handler(
500
struct list_head *list, unsigned long long offset, size_t length)
501
{
502
struct fw_address_handler *handler;
503
504
list_for_each_entry_rcu(handler, list, link) {
505
if (handler->offset < offset + length &&
506
offset < handler->offset + handler->length)
507
return handler;
508
}
509
510
return NULL;
511
}
512
513
static bool is_enclosing_handler(struct fw_address_handler *handler,
514
unsigned long long offset, size_t length)
515
{
516
return handler->offset <= offset &&
517
offset + length <= handler->offset + handler->length;
518
}
519
520
static struct fw_address_handler *lookup_enclosing_address_handler(
521
struct list_head *list, unsigned long long offset, size_t length)
522
{
523
struct fw_address_handler *handler;
524
525
list_for_each_entry_rcu(handler, list, link) {
526
if (is_enclosing_handler(handler, offset, length))
527
return handler;
528
}
529
530
return NULL;
531
}
532
533
static DEFINE_SPINLOCK(address_handler_list_lock);
534
static LIST_HEAD(address_handler_list);
535
536
const struct fw_address_region fw_high_memory_region =
537
{ .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
538
EXPORT_SYMBOL(fw_high_memory_region);
539
540
static const struct fw_address_region low_memory_region =
541
{ .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
542
543
#if 0
544
const struct fw_address_region fw_private_region =
545
{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
546
const struct fw_address_region fw_csr_region =
547
{ .start = CSR_REGISTER_BASE,
548
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
549
const struct fw_address_region fw_unit_space_region =
550
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
551
#endif /* 0 */
552
553
static void complete_address_handler(struct kref *kref)
554
{
555
struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
556
557
complete(&handler->done);
558
}
559
560
static void get_address_handler(struct fw_address_handler *handler)
561
{
562
kref_get(&handler->kref);
563
}
564
565
static int put_address_handler(struct fw_address_handler *handler)
566
{
567
return kref_put(&handler->kref, complete_address_handler);
568
}
569
570
/**
571
* fw_core_add_address_handler() - register for incoming requests
572
* @handler: callback
573
* @region: region in the IEEE 1212 node space address range
574
*
575
* region->start, ->end, and handler->length have to be quadlet-aligned.
576
*
577
* When a request is received that falls within the specified address range, the specified callback
578
* is invoked. The parameters passed to the callback give the details of the particular request.
579
* The callback is invoked in the workqueue context in most cases. However, if the request is
580
* initiated by the local node, the callback is invoked in the initiator's context.
581
*
582
* To be called in process context.
583
* Return value: 0 on success, non-zero otherwise.
584
*
585
* The start offset of the handler's address region is determined by
586
* fw_core_add_address_handler() and is returned in handler->offset.
587
*
588
* Address allocations are exclusive, except for the FCP registers.
589
*/
590
int fw_core_add_address_handler(struct fw_address_handler *handler,
591
const struct fw_address_region *region)
592
{
593
struct fw_address_handler *other;
594
int ret = -EBUSY;
595
596
if (region->start & 0xffff000000000003ULL ||
597
region->start >= region->end ||
598
region->end > 0x0001000000000000ULL ||
599
handler->length & 3 ||
600
handler->length == 0)
601
return -EINVAL;
602
603
guard(spinlock)(&address_handler_list_lock);
604
605
handler->offset = region->start;
606
while (handler->offset + handler->length <= region->end) {
607
if (is_in_fcp_region(handler->offset, handler->length))
608
other = NULL;
609
else
610
other = lookup_overlapping_address_handler
611
(&address_handler_list,
612
handler->offset, handler->length);
613
if (other != NULL) {
614
handler->offset += other->length;
615
} else {
616
init_completion(&handler->done);
617
kref_init(&handler->kref);
618
list_add_tail_rcu(&handler->link, &address_handler_list);
619
ret = 0;
620
break;
621
}
622
}
623
624
return ret;
625
}
626
EXPORT_SYMBOL(fw_core_add_address_handler);
627
628
/**
629
* fw_core_remove_address_handler() - unregister an address handler
630
* @handler: callback
631
*
632
* To be called in process context.
633
*
634
* When fw_core_remove_address_handler() returns, @handler->callback() is
635
* guaranteed to not run on any CPU anymore.
636
*/
637
void fw_core_remove_address_handler(struct fw_address_handler *handler)
638
{
639
scoped_guard(spinlock, &address_handler_list_lock)
640
list_del_rcu(&handler->link);
641
642
synchronize_rcu();
643
644
if (!put_address_handler(handler))
645
wait_for_completion(&handler->done);
646
}
647
EXPORT_SYMBOL(fw_core_remove_address_handler);
648
649
struct fw_request {
650
struct kref kref;
651
struct fw_packet response;
652
u32 request_header[ASYNC_HEADER_QUADLET_COUNT];
653
int ack;
654
u32 timestamp;
655
u32 length;
656
u32 data[];
657
};
658
659
void fw_request_get(struct fw_request *request)
660
{
661
kref_get(&request->kref);
662
}
663
664
static void release_request(struct kref *kref)
665
{
666
struct fw_request *request = container_of(kref, struct fw_request, kref);
667
668
kfree(request);
669
}
670
671
void fw_request_put(struct fw_request *request)
672
{
673
kref_put(&request->kref, release_request);
674
}
675
676
static void free_response_callback(struct fw_packet *packet,
677
struct fw_card *card, int status)
678
{
679
struct fw_request *request = container_of(packet, struct fw_request, response);
680
681
trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
682
packet->speed, status, packet->timestamp);
683
684
// Decrease the reference count since not at in-flight.
685
fw_request_put(request);
686
687
// Decrease the reference count to release the object.
688
fw_request_put(request);
689
}
690
691
int fw_get_response_length(struct fw_request *r)
692
{
693
int tcode, ext_tcode, data_length;
694
695
tcode = async_header_get_tcode(r->request_header);
696
697
switch (tcode) {
698
case TCODE_WRITE_QUADLET_REQUEST:
699
case TCODE_WRITE_BLOCK_REQUEST:
700
return 0;
701
702
case TCODE_READ_QUADLET_REQUEST:
703
return 4;
704
705
case TCODE_READ_BLOCK_REQUEST:
706
data_length = async_header_get_data_length(r->request_header);
707
return data_length;
708
709
case TCODE_LOCK_REQUEST:
710
ext_tcode = async_header_get_extended_tcode(r->request_header);
711
data_length = async_header_get_data_length(r->request_header);
712
switch (ext_tcode) {
713
case EXTCODE_FETCH_ADD:
714
case EXTCODE_LITTLE_ADD:
715
return data_length;
716
default:
717
return data_length / 2;
718
}
719
720
default:
721
WARN(1, "wrong tcode %d\n", tcode);
722
return 0;
723
}
724
}
725
726
void fw_fill_response(struct fw_packet *response, u32 *request_header,
727
int rcode, void *payload, size_t length)
728
{
729
int tcode, tlabel, extended_tcode, source, destination;
730
731
tcode = async_header_get_tcode(request_header);
732
tlabel = async_header_get_tlabel(request_header);
733
source = async_header_get_destination(request_header); // Exchange.
734
destination = async_header_get_source(request_header); // Exchange.
735
extended_tcode = async_header_get_extended_tcode(request_header);
736
737
async_header_set_retry(response->header, RETRY_1);
738
async_header_set_tlabel(response->header, tlabel);
739
async_header_set_destination(response->header, destination);
740
async_header_set_source(response->header, source);
741
async_header_set_rcode(response->header, rcode);
742
response->header[2] = 0; // The field is reserved.
743
744
switch (tcode) {
745
case TCODE_WRITE_QUADLET_REQUEST:
746
case TCODE_WRITE_BLOCK_REQUEST:
747
async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE);
748
response->header_length = 12;
749
response->payload_length = 0;
750
break;
751
752
case TCODE_READ_QUADLET_REQUEST:
753
async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE);
754
if (payload != NULL)
755
async_header_set_quadlet_data(response->header, *(u32 *)payload);
756
else
757
async_header_set_quadlet_data(response->header, 0);
758
response->header_length = 16;
759
response->payload_length = 0;
760
break;
761
762
case TCODE_READ_BLOCK_REQUEST:
763
case TCODE_LOCK_REQUEST:
764
async_header_set_tcode(response->header, tcode + 2);
765
async_header_set_data_length(response->header, length);
766
async_header_set_extended_tcode(response->header, extended_tcode);
767
response->header_length = 16;
768
response->payload = payload;
769
response->payload_length = length;
770
break;
771
772
default:
773
WARN(1, "wrong tcode %d\n", tcode);
774
}
775
776
response->payload_mapped = false;
777
}
778
EXPORT_SYMBOL(fw_fill_response);
779
780
static u32 compute_split_timeout_timestamp(struct fw_card *card,
781
u32 request_timestamp)
782
{
783
unsigned int cycles;
784
u32 timestamp;
785
786
cycles = card->split_timeout_cycles;
787
cycles += request_timestamp & 0x1fff;
788
789
timestamp = request_timestamp & ~0x1fff;
790
timestamp += (cycles / 8000) << 13;
791
timestamp |= cycles % 8000;
792
793
return timestamp;
794
}
795
796
static struct fw_request *allocate_request(struct fw_card *card,
797
struct fw_packet *p)
798
{
799
struct fw_request *request;
800
u32 *data, length;
801
int request_tcode;
802
803
request_tcode = async_header_get_tcode(p->header);
804
switch (request_tcode) {
805
case TCODE_WRITE_QUADLET_REQUEST:
806
data = &p->header[3];
807
length = 4;
808
break;
809
810
case TCODE_WRITE_BLOCK_REQUEST:
811
case TCODE_LOCK_REQUEST:
812
data = p->payload;
813
length = async_header_get_data_length(p->header);
814
break;
815
816
case TCODE_READ_QUADLET_REQUEST:
817
data = NULL;
818
length = 4;
819
break;
820
821
case TCODE_READ_BLOCK_REQUEST:
822
data = NULL;
823
length = async_header_get_data_length(p->header);
824
break;
825
826
default:
827
fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
828
p->header[0], p->header[1], p->header[2]);
829
return NULL;
830
}
831
832
request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
833
if (request == NULL)
834
return NULL;
835
kref_init(&request->kref);
836
837
request->response.speed = p->speed;
838
request->response.timestamp =
839
compute_split_timeout_timestamp(card, p->timestamp);
840
request->response.generation = p->generation;
841
request->response.ack = 0;
842
request->response.callback = free_response_callback;
843
request->ack = p->ack;
844
request->timestamp = p->timestamp;
845
request->length = length;
846
if (data)
847
memcpy(request->data, data, length);
848
849
memcpy(request->request_header, p->header, sizeof(p->header));
850
851
return request;
852
}
853
854
/**
855
* fw_send_response: - send response packet for asynchronous transaction.
856
* @card: interface to send the response at.
857
* @request: firewire request data for the transaction.
858
* @rcode: response code to send.
859
*
860
* Submit a response packet into the asynchronous response transmission queue. The @request
861
* is going to be released when the transmission successfully finishes later.
862
*/
863
void fw_send_response(struct fw_card *card,
864
struct fw_request *request, int rcode)
865
{
866
u32 *data = NULL;
867
unsigned int data_length = 0;
868
869
/* unified transaction or broadcast transaction: don't respond */
870
if (request->ack != ACK_PENDING ||
871
HEADER_DESTINATION_IS_BROADCAST(request->request_header)) {
872
fw_request_put(request);
873
return;
874
}
875
876
if (rcode == RCODE_COMPLETE) {
877
data = request->data;
878
data_length = fw_get_response_length(request);
879
}
880
881
fw_fill_response(&request->response, request->request_header, rcode, data, data_length);
882
883
// Increase the reference count so that the object is kept during in-flight.
884
fw_request_get(request);
885
886
trace_async_response_outbound_initiate((uintptr_t)request, card->index,
887
request->response.generation, request->response.speed,
888
request->response.header, data,
889
data ? data_length / 4 : 0);
890
891
card->driver->send_response(card, &request->response);
892
}
893
EXPORT_SYMBOL(fw_send_response);
894
895
/**
896
* fw_get_request_speed() - returns speed at which the @request was received
897
* @request: firewire request data
898
*/
899
int fw_get_request_speed(struct fw_request *request)
900
{
901
return request->response.speed;
902
}
903
EXPORT_SYMBOL(fw_get_request_speed);
904
905
/**
906
* fw_request_get_timestamp: Get timestamp of the request.
907
* @request: The opaque pointer to request structure.
908
*
909
* Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The
910
* timestamp consists of the low order 3 bits of second field and the full 13 bits of count
911
* field of isochronous cycle time register.
912
*
913
* Returns: timestamp of the request.
914
*/
915
u32 fw_request_get_timestamp(const struct fw_request *request)
916
{
917
return request->timestamp;
918
}
919
EXPORT_SYMBOL_GPL(fw_request_get_timestamp);
920
921
static void handle_exclusive_region_request(struct fw_card *card,
922
struct fw_packet *p,
923
struct fw_request *request,
924
unsigned long long offset)
925
{
926
struct fw_address_handler *handler;
927
int tcode, destination, source;
928
929
destination = async_header_get_destination(p->header);
930
source = async_header_get_source(p->header);
931
tcode = async_header_get_tcode(p->header);
932
if (tcode == TCODE_LOCK_REQUEST)
933
tcode = 0x10 + async_header_get_extended_tcode(p->header);
934
935
scoped_guard(rcu) {
936
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
937
request->length);
938
if (handler)
939
get_address_handler(handler);
940
}
941
942
if (!handler) {
943
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
944
return;
945
}
946
947
// Outside the RCU read-side critical section. Without spinlock. With reference count.
948
handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
949
request->data, request->length, handler->callback_data);
950
put_address_handler(handler);
951
}
952
953
// To use kmalloc allocator efficiently, this should be power of two.
954
#define BUFFER_ON_KERNEL_STACK_SIZE 4
955
956
static void handle_fcp_region_request(struct fw_card *card,
957
struct fw_packet *p,
958
struct fw_request *request,
959
unsigned long long offset)
960
{
961
struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
962
struct fw_address_handler *handler, **handlers;
963
int tcode, destination, source, i, count, buffer_size;
964
965
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
966
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
967
request->length > 0x200) {
968
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
969
970
return;
971
}
972
973
tcode = async_header_get_tcode(p->header);
974
destination = async_header_get_destination(p->header);
975
source = async_header_get_source(p->header);
976
977
if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
978
tcode != TCODE_WRITE_BLOCK_REQUEST) {
979
fw_send_response(card, request, RCODE_TYPE_ERROR);
980
981
return;
982
}
983
984
count = 0;
985
handlers = buffer_on_kernel_stack;
986
buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
987
scoped_guard(rcu) {
988
list_for_each_entry_rcu(handler, &address_handler_list, link) {
989
if (is_enclosing_handler(handler, offset, request->length)) {
990
if (count >= buffer_size) {
991
int next_size = buffer_size * 2;
992
struct fw_address_handler **buffer_on_kernel_heap;
993
994
if (handlers == buffer_on_kernel_stack)
995
buffer_on_kernel_heap = NULL;
996
else
997
buffer_on_kernel_heap = handlers;
998
999
buffer_on_kernel_heap =
1000
krealloc_array(buffer_on_kernel_heap, next_size,
1001
sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
1002
// FCP is used for purposes unrelated to significant system
1003
// resources (e.g. storage or networking), so allocation
1004
// failures are not considered so critical.
1005
if (!buffer_on_kernel_heap)
1006
break;
1007
1008
if (handlers == buffer_on_kernel_stack) {
1009
memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
1010
sizeof(buffer_on_kernel_stack));
1011
}
1012
1013
handlers = buffer_on_kernel_heap;
1014
buffer_size = next_size;
1015
}
1016
get_address_handler(handler);
1017
handlers[count++] = handler;
1018
}
1019
}
1020
}
1021
1022
for (i = 0; i < count; ++i) {
1023
handler = handlers[i];
1024
handler->address_callback(card, request, tcode, destination, source,
1025
p->generation, offset, request->data,
1026
request->length, handler->callback_data);
1027
put_address_handler(handler);
1028
}
1029
1030
if (handlers != buffer_on_kernel_stack)
1031
kfree(handlers);
1032
1033
fw_send_response(card, request, RCODE_COMPLETE);
1034
}
1035
1036
void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
1037
{
1038
struct fw_request *request;
1039
unsigned long long offset;
1040
unsigned int tcode;
1041
1042
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
1043
return;
1044
1045
tcode = async_header_get_tcode(p->header);
1046
if (tcode_is_link_internal(tcode)) {
1047
trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
1048
p->header[1], p->header[2]);
1049
fw_cdev_handle_phy_packet(card, p);
1050
return;
1051
}
1052
1053
request = allocate_request(card, p);
1054
if (request == NULL) {
1055
/* FIXME: send statically allocated busy packet. */
1056
return;
1057
}
1058
1059
trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
1060
p->ack, p->timestamp, p->header, request->data,
1061
tcode_is_read_request(tcode) ? 0 : request->length / 4);
1062
1063
offset = async_header_get_offset(p->header);
1064
1065
if (!is_in_fcp_region(offset, request->length))
1066
handle_exclusive_region_request(card, p, request, offset);
1067
else
1068
handle_fcp_region_request(card, p, request, offset);
1069
1070
}
1071
EXPORT_SYMBOL(fw_core_handle_request);
1072
1073
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
1074
{
1075
struct fw_transaction *t = NULL, *iter;
1076
u32 *data;
1077
size_t data_length;
1078
int tcode, tlabel, source, rcode;
1079
1080
tcode = async_header_get_tcode(p->header);
1081
tlabel = async_header_get_tlabel(p->header);
1082
source = async_header_get_source(p->header);
1083
rcode = async_header_get_rcode(p->header);
1084
1085
// FIXME: sanity check packet, is length correct, does tcodes
1086
// and addresses match to the transaction request queried later.
1087
//
1088
// For the tracepoints event, let us decode the header here against the concern.
1089
1090
switch (tcode) {
1091
case TCODE_READ_QUADLET_RESPONSE:
1092
data = (u32 *) &p->header[3];
1093
data_length = 4;
1094
break;
1095
1096
case TCODE_WRITE_RESPONSE:
1097
data = NULL;
1098
data_length = 0;
1099
break;
1100
1101
case TCODE_READ_BLOCK_RESPONSE:
1102
case TCODE_LOCK_RESPONSE:
1103
data = p->payload;
1104
data_length = async_header_get_data_length(p->header);
1105
break;
1106
1107
default:
1108
/* Should never happen, this is just to shut up gcc. */
1109
data = NULL;
1110
data_length = 0;
1111
break;
1112
}
1113
1114
scoped_guard(spinlock_irqsave, &card->lock) {
1115
list_for_each_entry(iter, &card->transaction_list, link) {
1116
if (iter->node_id == source && iter->tlabel == tlabel) {
1117
if (try_cancel_split_timeout(iter)) {
1118
list_del_init(&iter->link);
1119
card->tlabel_mask &= ~(1ULL << iter->tlabel);
1120
t = iter;
1121
}
1122
break;
1123
}
1124
}
1125
}
1126
1127
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
1128
p->timestamp, p->header, data, data_length / 4);
1129
1130
if (!t) {
1131
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
1132
source, tlabel);
1133
return;
1134
}
1135
1136
/*
1137
* The response handler may be executed while the request handler
1138
* is still pending. Cancel the request handler.
1139
*/
1140
card->driver->cancel_packet(card, &t->packet);
1141
1142
if (!t->with_tstamp) {
1143
t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data);
1144
} else {
1145
t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data,
1146
data_length, t->callback_data);
1147
}
1148
}
1149
EXPORT_SYMBOL(fw_core_handle_response);
1150
1151
/**
1152
* fw_rcode_string - convert a firewire result code to an error description
1153
* @rcode: the result code
1154
*/
1155
const char *fw_rcode_string(int rcode)
1156
{
1157
static const char *const names[] = {
1158
[RCODE_COMPLETE] = "no error",
1159
[RCODE_CONFLICT_ERROR] = "conflict error",
1160
[RCODE_DATA_ERROR] = "data error",
1161
[RCODE_TYPE_ERROR] = "type error",
1162
[RCODE_ADDRESS_ERROR] = "address error",
1163
[RCODE_SEND_ERROR] = "send error",
1164
[RCODE_CANCELLED] = "timeout",
1165
[RCODE_BUSY] = "busy",
1166
[RCODE_GENERATION] = "bus reset",
1167
[RCODE_NO_ACK] = "no ack",
1168
};
1169
1170
if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
1171
return names[rcode];
1172
else
1173
return "unknown";
1174
}
1175
EXPORT_SYMBOL(fw_rcode_string);
1176
1177
static const struct fw_address_region topology_map_region =
1178
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
1179
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
1180
1181
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
1182
int tcode, int destination, int source, int generation,
1183
unsigned long long offset, void *payload, size_t length,
1184
void *callback_data)
1185
{
1186
int start;
1187
1188
if (!tcode_is_read_request(tcode)) {
1189
fw_send_response(card, request, RCODE_TYPE_ERROR);
1190
return;
1191
}
1192
1193
if ((offset & 3) > 0 || (length & 3) > 0) {
1194
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1195
return;
1196
}
1197
1198
start = (offset - topology_map_region.start) / 4;
1199
memcpy(payload, &card->topology_map[start], length);
1200
1201
fw_send_response(card, request, RCODE_COMPLETE);
1202
}
1203
1204
static struct fw_address_handler topology_map = {
1205
.length = 0x400,
1206
.address_callback = handle_topology_map,
1207
};
1208
1209
static const struct fw_address_region registers_region =
1210
{ .start = CSR_REGISTER_BASE,
1211
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
1212
1213
static void update_split_timeout(struct fw_card *card)
1214
{
1215
unsigned int cycles;
1216
1217
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
1218
1219
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
1220
cycles = clamp(cycles, 800u, 3u * 8000u);
1221
1222
card->split_timeout_cycles = cycles;
1223
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
1224
}
1225
1226
static void handle_registers(struct fw_card *card, struct fw_request *request,
1227
int tcode, int destination, int source, int generation,
1228
unsigned long long offset, void *payload, size_t length,
1229
void *callback_data)
1230
{
1231
int reg = offset & ~CSR_REGISTER_BASE;
1232
__be32 *data = payload;
1233
int rcode = RCODE_COMPLETE;
1234
1235
switch (reg) {
1236
case CSR_PRIORITY_BUDGET:
1237
if (!card->priority_budget_implemented) {
1238
rcode = RCODE_ADDRESS_ERROR;
1239
break;
1240
}
1241
fallthrough;
1242
1243
case CSR_NODE_IDS:
1244
/*
1245
* per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
1246
* and 9.6, but interoperable with IEEE 1394.1-2004 bridges
1247
*/
1248
fallthrough;
1249
1250
case CSR_STATE_CLEAR:
1251
case CSR_STATE_SET:
1252
case CSR_CYCLE_TIME:
1253
case CSR_BUS_TIME:
1254
case CSR_BUSY_TIMEOUT:
1255
if (tcode == TCODE_READ_QUADLET_REQUEST)
1256
*data = cpu_to_be32(card->driver->read_csr(card, reg));
1257
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1258
card->driver->write_csr(card, reg, be32_to_cpu(*data));
1259
else
1260
rcode = RCODE_TYPE_ERROR;
1261
break;
1262
1263
case CSR_RESET_START:
1264
if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1265
card->driver->write_csr(card, CSR_STATE_CLEAR,
1266
CSR_STATE_BIT_ABDICATE);
1267
else
1268
rcode = RCODE_TYPE_ERROR;
1269
break;
1270
1271
case CSR_SPLIT_TIMEOUT_HI:
1272
if (tcode == TCODE_READ_QUADLET_REQUEST) {
1273
*data = cpu_to_be32(card->split_timeout_hi);
1274
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1275
guard(spinlock_irqsave)(&card->lock);
1276
1277
card->split_timeout_hi = be32_to_cpu(*data) & 7;
1278
update_split_timeout(card);
1279
} else {
1280
rcode = RCODE_TYPE_ERROR;
1281
}
1282
break;
1283
1284
case CSR_SPLIT_TIMEOUT_LO:
1285
if (tcode == TCODE_READ_QUADLET_REQUEST) {
1286
*data = cpu_to_be32(card->split_timeout_lo);
1287
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1288
guard(spinlock_irqsave)(&card->lock);
1289
1290
card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
1291
update_split_timeout(card);
1292
} else {
1293
rcode = RCODE_TYPE_ERROR;
1294
}
1295
break;
1296
1297
case CSR_MAINT_UTILITY:
1298
if (tcode == TCODE_READ_QUADLET_REQUEST)
1299
*data = card->maint_utility_register;
1300
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1301
card->maint_utility_register = *data;
1302
else
1303
rcode = RCODE_TYPE_ERROR;
1304
break;
1305
1306
case CSR_BROADCAST_CHANNEL:
1307
if (tcode == TCODE_READ_QUADLET_REQUEST)
1308
*data = cpu_to_be32(card->broadcast_channel);
1309
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1310
card->broadcast_channel =
1311
(be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
1312
BROADCAST_CHANNEL_INITIAL;
1313
else
1314
rcode = RCODE_TYPE_ERROR;
1315
break;
1316
1317
case CSR_BUS_MANAGER_ID:
1318
case CSR_BANDWIDTH_AVAILABLE:
1319
case CSR_CHANNELS_AVAILABLE_HI:
1320
case CSR_CHANNELS_AVAILABLE_LO:
1321
/*
1322
* FIXME: these are handled by the OHCI hardware and
1323
* the stack never sees these request. If we add
1324
* support for a new type of controller that doesn't
1325
* handle this in hardware we need to deal with these
1326
* transactions.
1327
*/
1328
BUG();
1329
break;
1330
1331
default:
1332
rcode = RCODE_ADDRESS_ERROR;
1333
break;
1334
}
1335
1336
fw_send_response(card, request, rcode);
1337
}
1338
1339
static struct fw_address_handler registers = {
1340
.length = 0x400,
1341
.address_callback = handle_registers,
1342
};
1343
1344
static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1345
int tcode, int destination, int source, int generation,
1346
unsigned long long offset, void *payload, size_t length,
1347
void *callback_data)
1348
{
1349
/*
1350
* This catches requests not handled by the physical DMA unit,
1351
* i.e., wrong transaction types or unauthorized source nodes.
1352
*/
1353
fw_send_response(card, request, RCODE_TYPE_ERROR);
1354
}
1355
1356
static struct fw_address_handler low_memory = {
1357
.length = FW_MAX_PHYSICAL_RANGE,
1358
.address_callback = handle_low_memory,
1359
};
1360
1361
MODULE_AUTHOR("Kristian Hoegsberg <[email protected]>");
1362
MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1363
MODULE_LICENSE("GPL");
1364
1365
static const u32 vendor_textual_descriptor[] = {
1366
/* textual descriptor leaf () */
1367
0x00060000,
1368
0x00000000,
1369
0x00000000,
1370
0x4c696e75, /* L i n u */
1371
0x78204669, /* x F i */
1372
0x72657769, /* r e w i */
1373
0x72650000, /* r e */
1374
};
1375
1376
static const u32 model_textual_descriptor[] = {
1377
/* model descriptor leaf () */
1378
0x00030000,
1379
0x00000000,
1380
0x00000000,
1381
0x4a756a75, /* J u j u */
1382
};
1383
1384
static struct fw_descriptor vendor_id_descriptor = {
1385
.length = ARRAY_SIZE(vendor_textual_descriptor),
1386
.immediate = 0x03001f11,
1387
.key = 0x81000000,
1388
.data = vendor_textual_descriptor,
1389
};
1390
1391
static struct fw_descriptor model_id_descriptor = {
1392
.length = ARRAY_SIZE(model_textual_descriptor),
1393
.immediate = 0x17023901,
1394
.key = 0x81000000,
1395
.data = model_textual_descriptor,
1396
};
1397
1398
static int __init fw_core_init(void)
1399
{
1400
int ret;
1401
1402
fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
1403
if (!fw_workqueue)
1404
return -ENOMEM;
1405
1406
ret = bus_register(&fw_bus_type);
1407
if (ret < 0) {
1408
destroy_workqueue(fw_workqueue);
1409
return ret;
1410
}
1411
1412
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
1413
if (fw_cdev_major < 0) {
1414
bus_unregister(&fw_bus_type);
1415
destroy_workqueue(fw_workqueue);
1416
return fw_cdev_major;
1417
}
1418
1419
fw_core_add_address_handler(&topology_map, &topology_map_region);
1420
fw_core_add_address_handler(&registers, &registers_region);
1421
fw_core_add_address_handler(&low_memory, &low_memory_region);
1422
fw_core_add_descriptor(&vendor_id_descriptor);
1423
fw_core_add_descriptor(&model_id_descriptor);
1424
1425
return 0;
1426
}
1427
1428
static void __exit fw_core_cleanup(void)
1429
{
1430
unregister_chrdev(fw_cdev_major, "firewire");
1431
bus_unregister(&fw_bus_type);
1432
destroy_workqueue(fw_workqueue);
1433
xa_destroy(&fw_device_xa);
1434
}
1435
1436
module_init(fw_core_init);
1437
module_exit(fw_core_cleanup);
1438
1439