Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/rxrpc/ar-call.c
15112 views
1
/* RxRPC individual remote procedure call handling
2
*
3
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4
* Written by David Howells ([email protected])
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License
8
* as published by the Free Software Foundation; either version
9
* 2 of the License, or (at your option) any later version.
10
*/
11
12
#include <linux/slab.h>
13
#include <linux/module.h>
14
#include <linux/circ_buf.h>
15
#include <net/sock.h>
16
#include <net/af_rxrpc.h>
17
#include "ar-internal.h"
18
19
const char *const rxrpc_call_states[] = {
20
[RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
21
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
22
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
23
[RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
24
[RXRPC_CALL_SERVER_SECURING] = "SvSecure",
25
[RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
26
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
27
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
28
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
29
[RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
30
[RXRPC_CALL_COMPLETE] = "Complete",
31
[RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
32
[RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
33
[RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
34
[RXRPC_CALL_NETWORK_ERROR] = "NetError",
35
[RXRPC_CALL_DEAD] = "Dead ",
36
};
37
38
struct kmem_cache *rxrpc_call_jar;
39
LIST_HEAD(rxrpc_calls);
40
DEFINE_RWLOCK(rxrpc_call_lock);
41
static unsigned rxrpc_call_max_lifetime = 60;
42
static unsigned rxrpc_dead_call_timeout = 2;
43
44
static void rxrpc_destroy_call(struct work_struct *work);
45
static void rxrpc_call_life_expired(unsigned long _call);
46
static void rxrpc_dead_call_expired(unsigned long _call);
47
static void rxrpc_ack_time_expired(unsigned long _call);
48
static void rxrpc_resend_time_expired(unsigned long _call);
49
50
/*
51
* allocate a new call
52
*/
53
static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
54
{
55
struct rxrpc_call *call;
56
57
call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
58
if (!call)
59
return NULL;
60
61
call->acks_winsz = 16;
62
call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
63
gfp);
64
if (!call->acks_window) {
65
kmem_cache_free(rxrpc_call_jar, call);
66
return NULL;
67
}
68
69
setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
70
(unsigned long) call);
71
setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
72
(unsigned long) call);
73
setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
74
(unsigned long) call);
75
setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
76
(unsigned long) call);
77
INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
78
INIT_WORK(&call->processor, &rxrpc_process_call);
79
INIT_LIST_HEAD(&call->accept_link);
80
skb_queue_head_init(&call->rx_queue);
81
skb_queue_head_init(&call->rx_oos_queue);
82
init_waitqueue_head(&call->tx_waitq);
83
spin_lock_init(&call->lock);
84
rwlock_init(&call->state_lock);
85
atomic_set(&call->usage, 1);
86
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
87
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
88
89
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
90
91
call->rx_data_expect = 1;
92
call->rx_data_eaten = 0;
93
call->rx_first_oos = 0;
94
call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
95
call->creation_jif = jiffies;
96
return call;
97
}
98
99
/*
100
* allocate a new client call and attempt to get a connection slot for it
101
*/
102
static struct rxrpc_call *rxrpc_alloc_client_call(
103
struct rxrpc_sock *rx,
104
struct rxrpc_transport *trans,
105
struct rxrpc_conn_bundle *bundle,
106
gfp_t gfp)
107
{
108
struct rxrpc_call *call;
109
int ret;
110
111
_enter("");
112
113
ASSERT(rx != NULL);
114
ASSERT(trans != NULL);
115
ASSERT(bundle != NULL);
116
117
call = rxrpc_alloc_call(gfp);
118
if (!call)
119
return ERR_PTR(-ENOMEM);
120
121
sock_hold(&rx->sk);
122
call->socket = rx;
123
call->rx_data_post = 1;
124
125
ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
126
if (ret < 0) {
127
kmem_cache_free(rxrpc_call_jar, call);
128
return ERR_PTR(ret);
129
}
130
131
spin_lock(&call->conn->trans->peer->lock);
132
list_add(&call->error_link, &call->conn->trans->peer->error_targets);
133
spin_unlock(&call->conn->trans->peer->lock);
134
135
call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
136
add_timer(&call->lifetimer);
137
138
_leave(" = %p", call);
139
return call;
140
}
141
142
/*
143
* set up a call for the given data
144
* - called in process context with IRQs enabled
145
*/
146
struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
147
struct rxrpc_transport *trans,
148
struct rxrpc_conn_bundle *bundle,
149
unsigned long user_call_ID,
150
int create,
151
gfp_t gfp)
152
{
153
struct rxrpc_call *call, *candidate;
154
struct rb_node *p, *parent, **pp;
155
156
_enter("%p,%d,%d,%lx,%d",
157
rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
158
user_call_ID, create);
159
160
/* search the extant calls first for one that matches the specified
161
* user ID */
162
read_lock(&rx->call_lock);
163
164
p = rx->calls.rb_node;
165
while (p) {
166
call = rb_entry(p, struct rxrpc_call, sock_node);
167
168
if (user_call_ID < call->user_call_ID)
169
p = p->rb_left;
170
else if (user_call_ID > call->user_call_ID)
171
p = p->rb_right;
172
else
173
goto found_extant_call;
174
}
175
176
read_unlock(&rx->call_lock);
177
178
if (!create || !trans)
179
return ERR_PTR(-EBADSLT);
180
181
/* not yet present - create a candidate for a new record and then
182
* redo the search */
183
candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
184
if (IS_ERR(candidate)) {
185
_leave(" = %ld", PTR_ERR(candidate));
186
return candidate;
187
}
188
189
candidate->user_call_ID = user_call_ID;
190
__set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
191
192
write_lock(&rx->call_lock);
193
194
pp = &rx->calls.rb_node;
195
parent = NULL;
196
while (*pp) {
197
parent = *pp;
198
call = rb_entry(parent, struct rxrpc_call, sock_node);
199
200
if (user_call_ID < call->user_call_ID)
201
pp = &(*pp)->rb_left;
202
else if (user_call_ID > call->user_call_ID)
203
pp = &(*pp)->rb_right;
204
else
205
goto found_extant_second;
206
}
207
208
/* second search also failed; add the new call */
209
call = candidate;
210
candidate = NULL;
211
rxrpc_get_call(call);
212
213
rb_link_node(&call->sock_node, parent, pp);
214
rb_insert_color(&call->sock_node, &rx->calls);
215
write_unlock(&rx->call_lock);
216
217
write_lock_bh(&rxrpc_call_lock);
218
list_add_tail(&call->link, &rxrpc_calls);
219
write_unlock_bh(&rxrpc_call_lock);
220
221
_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
222
223
_leave(" = %p [new]", call);
224
return call;
225
226
/* we found the call in the list immediately */
227
found_extant_call:
228
rxrpc_get_call(call);
229
read_unlock(&rx->call_lock);
230
_leave(" = %p [extant %d]", call, atomic_read(&call->usage));
231
return call;
232
233
/* we found the call on the second time through the list */
234
found_extant_second:
235
rxrpc_get_call(call);
236
write_unlock(&rx->call_lock);
237
rxrpc_put_call(candidate);
238
_leave(" = %p [second %d]", call, atomic_read(&call->usage));
239
return call;
240
}
241
242
/*
243
* set up an incoming call
244
* - called in process context with IRQs enabled
245
*/
246
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
247
struct rxrpc_connection *conn,
248
struct rxrpc_header *hdr,
249
gfp_t gfp)
250
{
251
struct rxrpc_call *call, *candidate;
252
struct rb_node **p, *parent;
253
__be32 call_id;
254
255
_enter(",%d,,%x", conn->debug_id, gfp);
256
257
ASSERT(rx != NULL);
258
259
candidate = rxrpc_alloc_call(gfp);
260
if (!candidate)
261
return ERR_PTR(-EBUSY);
262
263
candidate->socket = rx;
264
candidate->conn = conn;
265
candidate->cid = hdr->cid;
266
candidate->call_id = hdr->callNumber;
267
candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
268
candidate->rx_data_post = 0;
269
candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
270
if (conn->security_ix > 0)
271
candidate->state = RXRPC_CALL_SERVER_SECURING;
272
273
write_lock_bh(&conn->lock);
274
275
/* set the channel for this call */
276
call = conn->channels[candidate->channel];
277
_debug("channel[%u] is %p", candidate->channel, call);
278
if (call && call->call_id == hdr->callNumber) {
279
/* already set; must've been a duplicate packet */
280
_debug("extant call [%d]", call->state);
281
ASSERTCMP(call->conn, ==, conn);
282
283
read_lock(&call->state_lock);
284
switch (call->state) {
285
case RXRPC_CALL_LOCALLY_ABORTED:
286
if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
287
rxrpc_queue_call(call);
288
case RXRPC_CALL_REMOTELY_ABORTED:
289
read_unlock(&call->state_lock);
290
goto aborted_call;
291
default:
292
rxrpc_get_call(call);
293
read_unlock(&call->state_lock);
294
goto extant_call;
295
}
296
}
297
298
if (call) {
299
/* it seems the channel is still in use from the previous call
300
* - ditch the old binding if its call is now complete */
301
_debug("CALL: %u { %s }",
302
call->debug_id, rxrpc_call_states[call->state]);
303
304
if (call->state >= RXRPC_CALL_COMPLETE) {
305
conn->channels[call->channel] = NULL;
306
} else {
307
write_unlock_bh(&conn->lock);
308
kmem_cache_free(rxrpc_call_jar, candidate);
309
_leave(" = -EBUSY");
310
return ERR_PTR(-EBUSY);
311
}
312
}
313
314
/* check the call number isn't duplicate */
315
_debug("check dup");
316
call_id = hdr->callNumber;
317
p = &conn->calls.rb_node;
318
parent = NULL;
319
while (*p) {
320
parent = *p;
321
call = rb_entry(parent, struct rxrpc_call, conn_node);
322
323
if (call_id < call->call_id)
324
p = &(*p)->rb_left;
325
else if (call_id > call->call_id)
326
p = &(*p)->rb_right;
327
else
328
goto old_call;
329
}
330
331
/* make the call available */
332
_debug("new call");
333
call = candidate;
334
candidate = NULL;
335
rb_link_node(&call->conn_node, parent, p);
336
rb_insert_color(&call->conn_node, &conn->calls);
337
conn->channels[call->channel] = call;
338
sock_hold(&rx->sk);
339
atomic_inc(&conn->usage);
340
write_unlock_bh(&conn->lock);
341
342
spin_lock(&conn->trans->peer->lock);
343
list_add(&call->error_link, &conn->trans->peer->error_targets);
344
spin_unlock(&conn->trans->peer->lock);
345
346
write_lock_bh(&rxrpc_call_lock);
347
list_add_tail(&call->link, &rxrpc_calls);
348
write_unlock_bh(&rxrpc_call_lock);
349
350
_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
351
352
call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
353
add_timer(&call->lifetimer);
354
_leave(" = %p {%d} [new]", call, call->debug_id);
355
return call;
356
357
extant_call:
358
write_unlock_bh(&conn->lock);
359
kmem_cache_free(rxrpc_call_jar, candidate);
360
_leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
361
return call;
362
363
aborted_call:
364
write_unlock_bh(&conn->lock);
365
kmem_cache_free(rxrpc_call_jar, candidate);
366
_leave(" = -ECONNABORTED");
367
return ERR_PTR(-ECONNABORTED);
368
369
old_call:
370
write_unlock_bh(&conn->lock);
371
kmem_cache_free(rxrpc_call_jar, candidate);
372
_leave(" = -ECONNRESET [old]");
373
return ERR_PTR(-ECONNRESET);
374
}
375
376
/*
377
* find an extant server call
378
* - called in process context with IRQs enabled
379
*/
380
struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
381
unsigned long user_call_ID)
382
{
383
struct rxrpc_call *call;
384
struct rb_node *p;
385
386
_enter("%p,%lx", rx, user_call_ID);
387
388
/* search the extant calls for one that matches the specified user
389
* ID */
390
read_lock(&rx->call_lock);
391
392
p = rx->calls.rb_node;
393
while (p) {
394
call = rb_entry(p, struct rxrpc_call, sock_node);
395
396
if (user_call_ID < call->user_call_ID)
397
p = p->rb_left;
398
else if (user_call_ID > call->user_call_ID)
399
p = p->rb_right;
400
else
401
goto found_extant_call;
402
}
403
404
read_unlock(&rx->call_lock);
405
_leave(" = NULL");
406
return NULL;
407
408
/* we found the call in the list immediately */
409
found_extant_call:
410
rxrpc_get_call(call);
411
read_unlock(&rx->call_lock);
412
_leave(" = %p [%d]", call, atomic_read(&call->usage));
413
return call;
414
}
415
416
/*
417
* detach a call from a socket and set up for release
418
*/
419
void rxrpc_release_call(struct rxrpc_call *call)
420
{
421
struct rxrpc_connection *conn = call->conn;
422
struct rxrpc_sock *rx = call->socket;
423
424
_enter("{%d,%d,%d,%d}",
425
call->debug_id, atomic_read(&call->usage),
426
atomic_read(&call->ackr_not_idle),
427
call->rx_first_oos);
428
429
spin_lock_bh(&call->lock);
430
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
431
BUG();
432
spin_unlock_bh(&call->lock);
433
434
/* dissociate from the socket
435
* - the socket's ref on the call is passed to the death timer
436
*/
437
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
438
439
write_lock_bh(&rx->call_lock);
440
if (!list_empty(&call->accept_link)) {
441
_debug("unlinking once-pending call %p { e=%lx f=%lx }",
442
call, call->events, call->flags);
443
ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
444
list_del_init(&call->accept_link);
445
sk_acceptq_removed(&rx->sk);
446
} else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
447
rb_erase(&call->sock_node, &rx->calls);
448
memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
449
clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
450
}
451
write_unlock_bh(&rx->call_lock);
452
453
/* free up the channel for reuse */
454
spin_lock(&conn->trans->client_lock);
455
write_lock_bh(&conn->lock);
456
write_lock(&call->state_lock);
457
458
if (conn->channels[call->channel] == call)
459
conn->channels[call->channel] = NULL;
460
461
if (conn->out_clientflag && conn->bundle) {
462
conn->avail_calls++;
463
switch (conn->avail_calls) {
464
case 1:
465
list_move_tail(&conn->bundle_link,
466
&conn->bundle->avail_conns);
467
case 2 ... RXRPC_MAXCALLS - 1:
468
ASSERT(conn->channels[0] == NULL ||
469
conn->channels[1] == NULL ||
470
conn->channels[2] == NULL ||
471
conn->channels[3] == NULL);
472
break;
473
case RXRPC_MAXCALLS:
474
list_move_tail(&conn->bundle_link,
475
&conn->bundle->unused_conns);
476
ASSERT(conn->channels[0] == NULL &&
477
conn->channels[1] == NULL &&
478
conn->channels[2] == NULL &&
479
conn->channels[3] == NULL);
480
break;
481
default:
482
printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
483
conn->avail_calls);
484
BUG();
485
}
486
}
487
488
spin_unlock(&conn->trans->client_lock);
489
490
if (call->state < RXRPC_CALL_COMPLETE &&
491
call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
492
_debug("+++ ABORTING STATE %d +++\n", call->state);
493
call->state = RXRPC_CALL_LOCALLY_ABORTED;
494
call->abort_code = RX_CALL_DEAD;
495
set_bit(RXRPC_CALL_ABORT, &call->events);
496
rxrpc_queue_call(call);
497
}
498
write_unlock(&call->state_lock);
499
write_unlock_bh(&conn->lock);
500
501
/* clean up the Rx queue */
502
if (!skb_queue_empty(&call->rx_queue) ||
503
!skb_queue_empty(&call->rx_oos_queue)) {
504
struct rxrpc_skb_priv *sp;
505
struct sk_buff *skb;
506
507
_debug("purge Rx queues");
508
509
spin_lock_bh(&call->lock);
510
while ((skb = skb_dequeue(&call->rx_queue)) ||
511
(skb = skb_dequeue(&call->rx_oos_queue))) {
512
sp = rxrpc_skb(skb);
513
if (sp->call) {
514
ASSERTCMP(sp->call, ==, call);
515
rxrpc_put_call(call);
516
sp->call = NULL;
517
}
518
skb->destructor = NULL;
519
spin_unlock_bh(&call->lock);
520
521
_debug("- zap %s %%%u #%u",
522
rxrpc_pkts[sp->hdr.type],
523
ntohl(sp->hdr.serial),
524
ntohl(sp->hdr.seq));
525
rxrpc_free_skb(skb);
526
spin_lock_bh(&call->lock);
527
}
528
spin_unlock_bh(&call->lock);
529
530
ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
531
}
532
533
del_timer_sync(&call->resend_timer);
534
del_timer_sync(&call->ack_timer);
535
del_timer_sync(&call->lifetimer);
536
call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ;
537
add_timer(&call->deadspan);
538
539
_leave("");
540
}
541
542
/*
543
* handle a dead call being ready for reaping
544
*/
545
static void rxrpc_dead_call_expired(unsigned long _call)
546
{
547
struct rxrpc_call *call = (struct rxrpc_call *) _call;
548
549
_enter("{%d}", call->debug_id);
550
551
write_lock_bh(&call->state_lock);
552
call->state = RXRPC_CALL_DEAD;
553
write_unlock_bh(&call->state_lock);
554
rxrpc_put_call(call);
555
}
556
557
/*
558
* mark a call as to be released, aborting it if it's still in progress
559
* - called with softirqs disabled
560
*/
561
static void rxrpc_mark_call_released(struct rxrpc_call *call)
562
{
563
bool sched;
564
565
write_lock(&call->state_lock);
566
if (call->state < RXRPC_CALL_DEAD) {
567
sched = false;
568
if (call->state < RXRPC_CALL_COMPLETE) {
569
_debug("abort call %p", call);
570
call->state = RXRPC_CALL_LOCALLY_ABORTED;
571
call->abort_code = RX_CALL_DEAD;
572
if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
573
sched = true;
574
}
575
if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
576
sched = true;
577
if (sched)
578
rxrpc_queue_call(call);
579
}
580
write_unlock(&call->state_lock);
581
}
582
583
/*
584
* release all the calls associated with a socket
585
*/
586
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
587
{
588
struct rxrpc_call *call;
589
struct rb_node *p;
590
591
_enter("%p", rx);
592
593
read_lock_bh(&rx->call_lock);
594
595
/* mark all the calls as no longer wanting incoming packets */
596
for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
597
call = rb_entry(p, struct rxrpc_call, sock_node);
598
rxrpc_mark_call_released(call);
599
}
600
601
/* kill the not-yet-accepted incoming calls */
602
list_for_each_entry(call, &rx->secureq, accept_link) {
603
rxrpc_mark_call_released(call);
604
}
605
606
list_for_each_entry(call, &rx->acceptq, accept_link) {
607
rxrpc_mark_call_released(call);
608
}
609
610
read_unlock_bh(&rx->call_lock);
611
_leave("");
612
}
613
614
/*
615
* release a call
616
*/
617
void __rxrpc_put_call(struct rxrpc_call *call)
618
{
619
ASSERT(call != NULL);
620
621
_enter("%p{u=%d}", call, atomic_read(&call->usage));
622
623
ASSERTCMP(atomic_read(&call->usage), >, 0);
624
625
if (atomic_dec_and_test(&call->usage)) {
626
_debug("call %d dead", call->debug_id);
627
ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
628
rxrpc_queue_work(&call->destroyer);
629
}
630
_leave("");
631
}
632
633
/*
634
* clean up a call
635
*/
636
static void rxrpc_cleanup_call(struct rxrpc_call *call)
637
{
638
_net("DESTROY CALL %d", call->debug_id);
639
640
ASSERT(call->socket);
641
642
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
643
644
del_timer_sync(&call->lifetimer);
645
del_timer_sync(&call->deadspan);
646
del_timer_sync(&call->ack_timer);
647
del_timer_sync(&call->resend_timer);
648
649
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
650
ASSERTCMP(call->events, ==, 0);
651
if (work_pending(&call->processor)) {
652
_debug("defer destroy");
653
rxrpc_queue_work(&call->destroyer);
654
return;
655
}
656
657
if (call->conn) {
658
spin_lock(&call->conn->trans->peer->lock);
659
list_del(&call->error_link);
660
spin_unlock(&call->conn->trans->peer->lock);
661
662
write_lock_bh(&call->conn->lock);
663
rb_erase(&call->conn_node, &call->conn->calls);
664
write_unlock_bh(&call->conn->lock);
665
rxrpc_put_connection(call->conn);
666
}
667
668
if (call->acks_window) {
669
_debug("kill Tx window %d",
670
CIRC_CNT(call->acks_head, call->acks_tail,
671
call->acks_winsz));
672
smp_mb();
673
while (CIRC_CNT(call->acks_head, call->acks_tail,
674
call->acks_winsz) > 0) {
675
struct rxrpc_skb_priv *sp;
676
unsigned long _skb;
677
678
_skb = call->acks_window[call->acks_tail] & ~1;
679
sp = rxrpc_skb((struct sk_buff *) _skb);
680
_debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
681
rxrpc_free_skb((struct sk_buff *) _skb);
682
call->acks_tail =
683
(call->acks_tail + 1) & (call->acks_winsz - 1);
684
}
685
686
kfree(call->acks_window);
687
}
688
689
rxrpc_free_skb(call->tx_pending);
690
691
rxrpc_purge_queue(&call->rx_queue);
692
ASSERT(skb_queue_empty(&call->rx_oos_queue));
693
sock_put(&call->socket->sk);
694
kmem_cache_free(rxrpc_call_jar, call);
695
}
696
697
/*
698
* destroy a call
699
*/
700
static void rxrpc_destroy_call(struct work_struct *work)
701
{
702
struct rxrpc_call *call =
703
container_of(work, struct rxrpc_call, destroyer);
704
705
_enter("%p{%d,%d,%p}",
706
call, atomic_read(&call->usage), call->channel, call->conn);
707
708
ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
709
710
write_lock_bh(&rxrpc_call_lock);
711
list_del_init(&call->link);
712
write_unlock_bh(&rxrpc_call_lock);
713
714
rxrpc_cleanup_call(call);
715
_leave("");
716
}
717
718
/*
719
* preemptively destroy all the call records from a transport endpoint rather
720
* than waiting for them to time out
721
*/
722
void __exit rxrpc_destroy_all_calls(void)
723
{
724
struct rxrpc_call *call;
725
726
_enter("");
727
write_lock_bh(&rxrpc_call_lock);
728
729
while (!list_empty(&rxrpc_calls)) {
730
call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
731
_debug("Zapping call %p", call);
732
733
list_del_init(&call->link);
734
735
switch (atomic_read(&call->usage)) {
736
case 0:
737
ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
738
break;
739
case 1:
740
if (del_timer_sync(&call->deadspan) != 0 &&
741
call->state != RXRPC_CALL_DEAD)
742
rxrpc_dead_call_expired((unsigned long) call);
743
if (call->state != RXRPC_CALL_DEAD)
744
break;
745
default:
746
printk(KERN_ERR "RXRPC:"
747
" Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
748
call, atomic_read(&call->usage),
749
atomic_read(&call->ackr_not_idle),
750
rxrpc_call_states[call->state],
751
call->flags, call->events);
752
if (!skb_queue_empty(&call->rx_queue))
753
printk(KERN_ERR"RXRPC: Rx queue occupied\n");
754
if (!skb_queue_empty(&call->rx_oos_queue))
755
printk(KERN_ERR"RXRPC: OOS queue occupied\n");
756
break;
757
}
758
759
write_unlock_bh(&rxrpc_call_lock);
760
cond_resched();
761
write_lock_bh(&rxrpc_call_lock);
762
}
763
764
write_unlock_bh(&rxrpc_call_lock);
765
_leave("");
766
}
767
768
/*
769
* handle call lifetime being exceeded
770
*/
771
static void rxrpc_call_life_expired(unsigned long _call)
772
{
773
struct rxrpc_call *call = (struct rxrpc_call *) _call;
774
775
if (call->state >= RXRPC_CALL_COMPLETE)
776
return;
777
778
_enter("{%d}", call->debug_id);
779
read_lock_bh(&call->state_lock);
780
if (call->state < RXRPC_CALL_COMPLETE) {
781
set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
782
rxrpc_queue_call(call);
783
}
784
read_unlock_bh(&call->state_lock);
785
}
786
787
/*
788
* handle resend timer expiry
789
* - may not take call->state_lock as this can deadlock against del_timer_sync()
790
*/
791
static void rxrpc_resend_time_expired(unsigned long _call)
792
{
793
struct rxrpc_call *call = (struct rxrpc_call *) _call;
794
795
_enter("{%d}", call->debug_id);
796
797
if (call->state >= RXRPC_CALL_COMPLETE)
798
return;
799
800
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
801
if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
802
rxrpc_queue_call(call);
803
}
804
805
/*
806
* handle ACK timer expiry
807
*/
808
static void rxrpc_ack_time_expired(unsigned long _call)
809
{
810
struct rxrpc_call *call = (struct rxrpc_call *) _call;
811
812
_enter("{%d}", call->debug_id);
813
814
if (call->state >= RXRPC_CALL_COMPLETE)
815
return;
816
817
read_lock_bh(&call->state_lock);
818
if (call->state < RXRPC_CALL_COMPLETE &&
819
!test_and_set_bit(RXRPC_CALL_ACK, &call->events))
820
rxrpc_queue_call(call);
821
read_unlock_bh(&call->state_lock);
822
}
823
824