Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/rxrpc/af_rxrpc.c
15111 views
1
/* AF_RXRPC implementation
2
*
3
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4
* Written by David Howells ([email protected])
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License
8
* as published by the Free Software Foundation; either version
9
* 2 of the License, or (at your option) any later version.
10
*/
11
12
#include <linux/module.h>
13
#include <linux/net.h>
14
#include <linux/slab.h>
15
#include <linux/skbuff.h>
16
#include <linux/poll.h>
17
#include <linux/proc_fs.h>
18
#include <linux/key-type.h>
19
#include <net/net_namespace.h>
20
#include <net/sock.h>
21
#include <net/af_rxrpc.h>
22
#include "ar-internal.h"
23
24
MODULE_DESCRIPTION("RxRPC network protocol");
25
MODULE_AUTHOR("Red Hat, Inc.");
26
MODULE_LICENSE("GPL");
27
MODULE_ALIAS_NETPROTO(PF_RXRPC);
28
29
unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
30
module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
31
MODULE_PARM_DESC(debug, "RxRPC debugging mask");
32
33
static int sysctl_rxrpc_max_qlen __read_mostly = 10;
34
35
static struct proto rxrpc_proto;
36
static const struct proto_ops rxrpc_rpc_ops;
37
38
/* local epoch for detecting local-end reset */
39
__be32 rxrpc_epoch;
40
41
/* current debugging ID */
42
atomic_t rxrpc_debug_id;
43
44
/* count of skbs currently in use */
45
atomic_t rxrpc_n_skbs;
46
47
struct workqueue_struct *rxrpc_workqueue;
48
49
static void rxrpc_sock_destructor(struct sock *);
50
51
/*
52
* see if an RxRPC socket is currently writable
53
*/
54
static inline int rxrpc_writable(struct sock *sk)
55
{
56
return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
57
}
58
59
/*
60
* wait for write bufferage to become available
61
*/
62
static void rxrpc_write_space(struct sock *sk)
63
{
64
_enter("%p", sk);
65
rcu_read_lock();
66
if (rxrpc_writable(sk)) {
67
struct socket_wq *wq = rcu_dereference(sk->sk_wq);
68
69
if (wq_has_sleeper(wq))
70
wake_up_interruptible(&wq->wait);
71
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
72
}
73
rcu_read_unlock();
74
}
75
76
/*
77
* validate an RxRPC address
78
*/
79
static int rxrpc_validate_address(struct rxrpc_sock *rx,
80
struct sockaddr_rxrpc *srx,
81
int len)
82
{
83
if (len < sizeof(struct sockaddr_rxrpc))
84
return -EINVAL;
85
86
if (srx->srx_family != AF_RXRPC)
87
return -EAFNOSUPPORT;
88
89
if (srx->transport_type != SOCK_DGRAM)
90
return -ESOCKTNOSUPPORT;
91
92
len -= offsetof(struct sockaddr_rxrpc, transport);
93
if (srx->transport_len < sizeof(sa_family_t) ||
94
srx->transport_len > len)
95
return -EINVAL;
96
97
if (srx->transport.family != rx->proto)
98
return -EAFNOSUPPORT;
99
100
switch (srx->transport.family) {
101
case AF_INET:
102
_debug("INET: %x @ %pI4",
103
ntohs(srx->transport.sin.sin_port),
104
&srx->transport.sin.sin_addr);
105
if (srx->transport_len > 8)
106
memset((void *)&srx->transport + 8, 0,
107
srx->transport_len - 8);
108
break;
109
110
case AF_INET6:
111
default:
112
return -EAFNOSUPPORT;
113
}
114
115
return 0;
116
}
117
118
/*
119
* bind a local address to an RxRPC socket
120
*/
121
static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
122
{
123
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
124
struct sock *sk = sock->sk;
125
struct rxrpc_local *local;
126
struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
127
__be16 service_id;
128
int ret;
129
130
_enter("%p,%p,%d", rx, saddr, len);
131
132
ret = rxrpc_validate_address(rx, srx, len);
133
if (ret < 0)
134
goto error;
135
136
lock_sock(&rx->sk);
137
138
if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
139
ret = -EINVAL;
140
goto error_unlock;
141
}
142
143
memcpy(&rx->srx, srx, sizeof(rx->srx));
144
145
/* find a local transport endpoint if we don't have one already */
146
local = rxrpc_lookup_local(&rx->srx);
147
if (IS_ERR(local)) {
148
ret = PTR_ERR(local);
149
goto error_unlock;
150
}
151
152
rx->local = local;
153
if (srx->srx_service) {
154
service_id = htons(srx->srx_service);
155
write_lock_bh(&local->services_lock);
156
list_for_each_entry(prx, &local->services, listen_link) {
157
if (prx->service_id == service_id)
158
goto service_in_use;
159
}
160
161
rx->service_id = service_id;
162
list_add_tail(&rx->listen_link, &local->services);
163
write_unlock_bh(&local->services_lock);
164
165
rx->sk.sk_state = RXRPC_SERVER_BOUND;
166
} else {
167
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
168
}
169
170
release_sock(&rx->sk);
171
_leave(" = 0");
172
return 0;
173
174
service_in_use:
175
ret = -EADDRINUSE;
176
write_unlock_bh(&local->services_lock);
177
error_unlock:
178
release_sock(&rx->sk);
179
error:
180
_leave(" = %d", ret);
181
return ret;
182
}
183
184
/*
185
* set the number of pending calls permitted on a listening socket
186
*/
187
static int rxrpc_listen(struct socket *sock, int backlog)
188
{
189
struct sock *sk = sock->sk;
190
struct rxrpc_sock *rx = rxrpc_sk(sk);
191
int ret;
192
193
_enter("%p,%d", rx, backlog);
194
195
lock_sock(&rx->sk);
196
197
switch (rx->sk.sk_state) {
198
case RXRPC_UNCONNECTED:
199
ret = -EADDRNOTAVAIL;
200
break;
201
case RXRPC_CLIENT_BOUND:
202
case RXRPC_CLIENT_CONNECTED:
203
default:
204
ret = -EBUSY;
205
break;
206
case RXRPC_SERVER_BOUND:
207
ASSERT(rx->local != NULL);
208
sk->sk_max_ack_backlog = backlog;
209
rx->sk.sk_state = RXRPC_SERVER_LISTENING;
210
ret = 0;
211
break;
212
}
213
214
release_sock(&rx->sk);
215
_leave(" = %d", ret);
216
return ret;
217
}
218
219
/*
220
* find a transport by address
221
*/
222
static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
223
struct sockaddr *addr,
224
int addr_len, int flags,
225
gfp_t gfp)
226
{
227
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
228
struct rxrpc_transport *trans;
229
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
230
struct rxrpc_peer *peer;
231
232
_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
233
234
ASSERT(rx->local != NULL);
235
ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
236
237
if (rx->srx.transport_type != srx->transport_type)
238
return ERR_PTR(-ESOCKTNOSUPPORT);
239
if (rx->srx.transport.family != srx->transport.family)
240
return ERR_PTR(-EAFNOSUPPORT);
241
242
/* find a remote transport endpoint from the local one */
243
peer = rxrpc_get_peer(srx, gfp);
244
if (IS_ERR(peer))
245
return ERR_CAST(peer);
246
247
/* find a transport */
248
trans = rxrpc_get_transport(rx->local, peer, gfp);
249
rxrpc_put_peer(peer);
250
_leave(" = %p", trans);
251
return trans;
252
}
253
254
/**
255
* rxrpc_kernel_begin_call - Allow a kernel service to begin a call
256
* @sock: The socket on which to make the call
257
* @srx: The address of the peer to contact (defaults to socket setting)
258
* @key: The security context to use (defaults to socket setting)
259
* @user_call_ID: The ID to use
260
*
261
* Allow a kernel service to begin a call on the nominated socket. This just
262
* sets up all the internal tracking structures and allocates connection and
263
* call IDs as appropriate. The call to be used is returned.
264
*
265
* The default socket destination address and security may be overridden by
266
* supplying @srx and @key.
267
*/
268
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
269
struct sockaddr_rxrpc *srx,
270
struct key *key,
271
unsigned long user_call_ID,
272
gfp_t gfp)
273
{
274
struct rxrpc_conn_bundle *bundle;
275
struct rxrpc_transport *trans;
276
struct rxrpc_call *call;
277
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
278
__be16 service_id;
279
280
_enter(",,%x,%lx", key_serial(key), user_call_ID);
281
282
lock_sock(&rx->sk);
283
284
if (srx) {
285
trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
286
sizeof(*srx), 0, gfp);
287
if (IS_ERR(trans)) {
288
call = ERR_CAST(trans);
289
trans = NULL;
290
goto out_notrans;
291
}
292
} else {
293
trans = rx->trans;
294
if (!trans) {
295
call = ERR_PTR(-ENOTCONN);
296
goto out_notrans;
297
}
298
atomic_inc(&trans->usage);
299
}
300
301
service_id = rx->service_id;
302
if (srx)
303
service_id = htons(srx->srx_service);
304
305
if (!key)
306
key = rx->key;
307
if (key && !key->payload.data)
308
key = NULL; /* a no-security key */
309
310
bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
311
if (IS_ERR(bundle)) {
312
call = ERR_CAST(bundle);
313
goto out;
314
}
315
316
call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
317
gfp);
318
rxrpc_put_bundle(trans, bundle);
319
out:
320
rxrpc_put_transport(trans);
321
out_notrans:
322
release_sock(&rx->sk);
323
_leave(" = %p", call);
324
return call;
325
}
326
327
EXPORT_SYMBOL(rxrpc_kernel_begin_call);
328
329
/**
330
* rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
331
* @call: The call to end
332
*
333
* Allow a kernel service to end a call it was using. The call must be
334
* complete before this is called (the call should be aborted if necessary).
335
*/
336
void rxrpc_kernel_end_call(struct rxrpc_call *call)
337
{
338
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
339
rxrpc_remove_user_ID(call->socket, call);
340
rxrpc_put_call(call);
341
}
342
343
EXPORT_SYMBOL(rxrpc_kernel_end_call);
344
345
/**
346
* rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
347
* @sock: The socket to intercept received messages on
348
* @interceptor: The function to pass the messages to
349
*
350
* Allow a kernel service to intercept messages heading for the Rx queue on an
351
* RxRPC socket. They get passed to the specified function instead.
352
* @interceptor should free the socket buffers it is given. @interceptor is
353
* called with the socket receive queue spinlock held and softirqs disabled -
354
* this ensures that the messages will be delivered in the right order.
355
*/
356
void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
357
rxrpc_interceptor_t interceptor)
358
{
359
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
360
361
_enter("");
362
rx->interceptor = interceptor;
363
}
364
365
EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
366
367
/*
368
* connect an RxRPC socket
369
* - this just targets it at a specific destination; no actual connection
370
* negotiation takes place
371
*/
372
static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
373
int addr_len, int flags)
374
{
375
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
376
struct sock *sk = sock->sk;
377
struct rxrpc_transport *trans;
378
struct rxrpc_local *local;
379
struct rxrpc_sock *rx = rxrpc_sk(sk);
380
int ret;
381
382
_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
383
384
ret = rxrpc_validate_address(rx, srx, addr_len);
385
if (ret < 0) {
386
_leave(" = %d [bad addr]", ret);
387
return ret;
388
}
389
390
lock_sock(&rx->sk);
391
392
switch (rx->sk.sk_state) {
393
case RXRPC_UNCONNECTED:
394
/* find a local transport endpoint if we don't have one already */
395
ASSERTCMP(rx->local, ==, NULL);
396
rx->srx.srx_family = AF_RXRPC;
397
rx->srx.srx_service = 0;
398
rx->srx.transport_type = srx->transport_type;
399
rx->srx.transport_len = sizeof(sa_family_t);
400
rx->srx.transport.family = srx->transport.family;
401
local = rxrpc_lookup_local(&rx->srx);
402
if (IS_ERR(local)) {
403
release_sock(&rx->sk);
404
return PTR_ERR(local);
405
}
406
rx->local = local;
407
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
408
case RXRPC_CLIENT_BOUND:
409
break;
410
case RXRPC_CLIENT_CONNECTED:
411
release_sock(&rx->sk);
412
return -EISCONN;
413
default:
414
release_sock(&rx->sk);
415
return -EBUSY; /* server sockets can't connect as well */
416
}
417
418
trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
419
GFP_KERNEL);
420
if (IS_ERR(trans)) {
421
release_sock(&rx->sk);
422
_leave(" = %ld", PTR_ERR(trans));
423
return PTR_ERR(trans);
424
}
425
426
rx->trans = trans;
427
rx->service_id = htons(srx->srx_service);
428
rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
429
430
release_sock(&rx->sk);
431
return 0;
432
}
433
434
/*
435
* send a message through an RxRPC socket
436
* - in a client this does a number of things:
437
* - finds/sets up a connection for the security specified (if any)
438
* - initiates a call (ID in control data)
439
* - ends the request phase of a call (if MSG_MORE is not set)
440
* - sends a call data packet
441
* - may send an abort (abort code in control data)
442
*/
443
static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
444
struct msghdr *m, size_t len)
445
{
446
struct rxrpc_transport *trans;
447
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
448
int ret;
449
450
_enter(",{%d},,%zu", rx->sk.sk_state, len);
451
452
if (m->msg_flags & MSG_OOB)
453
return -EOPNOTSUPP;
454
455
if (m->msg_name) {
456
ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
457
if (ret < 0) {
458
_leave(" = %d [bad addr]", ret);
459
return ret;
460
}
461
}
462
463
trans = NULL;
464
lock_sock(&rx->sk);
465
466
if (m->msg_name) {
467
ret = -EISCONN;
468
trans = rxrpc_name_to_transport(sock, m->msg_name,
469
m->msg_namelen, 0, GFP_KERNEL);
470
if (IS_ERR(trans)) {
471
ret = PTR_ERR(trans);
472
trans = NULL;
473
goto out;
474
}
475
} else {
476
trans = rx->trans;
477
if (trans)
478
atomic_inc(&trans->usage);
479
}
480
481
switch (rx->sk.sk_state) {
482
case RXRPC_SERVER_LISTENING:
483
if (!m->msg_name) {
484
ret = rxrpc_server_sendmsg(iocb, rx, m, len);
485
break;
486
}
487
case RXRPC_SERVER_BOUND:
488
case RXRPC_CLIENT_BOUND:
489
if (!m->msg_name) {
490
ret = -ENOTCONN;
491
break;
492
}
493
case RXRPC_CLIENT_CONNECTED:
494
ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
495
break;
496
default:
497
ret = -ENOTCONN;
498
break;
499
}
500
501
out:
502
release_sock(&rx->sk);
503
if (trans)
504
rxrpc_put_transport(trans);
505
_leave(" = %d", ret);
506
return ret;
507
}
508
509
/*
510
* set RxRPC socket options
511
*/
512
static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
513
char __user *optval, unsigned int optlen)
514
{
515
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
516
unsigned min_sec_level;
517
int ret;
518
519
_enter(",%d,%d,,%d", level, optname, optlen);
520
521
lock_sock(&rx->sk);
522
ret = -EOPNOTSUPP;
523
524
if (level == SOL_RXRPC) {
525
switch (optname) {
526
case RXRPC_EXCLUSIVE_CONNECTION:
527
ret = -EINVAL;
528
if (optlen != 0)
529
goto error;
530
ret = -EISCONN;
531
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
532
goto error;
533
set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
534
goto success;
535
536
case RXRPC_SECURITY_KEY:
537
ret = -EINVAL;
538
if (rx->key)
539
goto error;
540
ret = -EISCONN;
541
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
542
goto error;
543
ret = rxrpc_request_key(rx, optval, optlen);
544
goto error;
545
546
case RXRPC_SECURITY_KEYRING:
547
ret = -EINVAL;
548
if (rx->key)
549
goto error;
550
ret = -EISCONN;
551
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
552
goto error;
553
ret = rxrpc_server_keyring(rx, optval, optlen);
554
goto error;
555
556
case RXRPC_MIN_SECURITY_LEVEL:
557
ret = -EINVAL;
558
if (optlen != sizeof(unsigned))
559
goto error;
560
ret = -EISCONN;
561
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
562
goto error;
563
ret = get_user(min_sec_level,
564
(unsigned __user *) optval);
565
if (ret < 0)
566
goto error;
567
ret = -EINVAL;
568
if (min_sec_level > RXRPC_SECURITY_MAX)
569
goto error;
570
rx->min_sec_level = min_sec_level;
571
goto success;
572
573
default:
574
break;
575
}
576
}
577
578
success:
579
ret = 0;
580
error:
581
release_sock(&rx->sk);
582
return ret;
583
}
584
585
/*
586
* permit an RxRPC socket to be polled
587
*/
588
static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
589
poll_table *wait)
590
{
591
unsigned int mask;
592
struct sock *sk = sock->sk;
593
594
sock_poll_wait(file, sk_sleep(sk), wait);
595
mask = 0;
596
597
/* the socket is readable if there are any messages waiting on the Rx
598
* queue */
599
if (!skb_queue_empty(&sk->sk_receive_queue))
600
mask |= POLLIN | POLLRDNORM;
601
602
/* the socket is writable if there is space to add new data to the
603
* socket; there is no guarantee that any particular call in progress
604
* on the socket may have space in the Tx ACK window */
605
if (rxrpc_writable(sk))
606
mask |= POLLOUT | POLLWRNORM;
607
608
return mask;
609
}
610
611
/*
612
* create an RxRPC socket
613
*/
614
static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
615
int kern)
616
{
617
struct rxrpc_sock *rx;
618
struct sock *sk;
619
620
_enter("%p,%d", sock, protocol);
621
622
if (!net_eq(net, &init_net))
623
return -EAFNOSUPPORT;
624
625
/* we support transport protocol UDP only */
626
if (protocol != PF_INET)
627
return -EPROTONOSUPPORT;
628
629
if (sock->type != SOCK_DGRAM)
630
return -ESOCKTNOSUPPORT;
631
632
sock->ops = &rxrpc_rpc_ops;
633
sock->state = SS_UNCONNECTED;
634
635
sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
636
if (!sk)
637
return -ENOMEM;
638
639
sock_init_data(sock, sk);
640
sk->sk_state = RXRPC_UNCONNECTED;
641
sk->sk_write_space = rxrpc_write_space;
642
sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
643
sk->sk_destruct = rxrpc_sock_destructor;
644
645
rx = rxrpc_sk(sk);
646
rx->proto = protocol;
647
rx->calls = RB_ROOT;
648
649
INIT_LIST_HEAD(&rx->listen_link);
650
INIT_LIST_HEAD(&rx->secureq);
651
INIT_LIST_HEAD(&rx->acceptq);
652
rwlock_init(&rx->call_lock);
653
memset(&rx->srx, 0, sizeof(rx->srx));
654
655
_leave(" = 0 [%p]", rx);
656
return 0;
657
}
658
659
/*
660
* RxRPC socket destructor
661
*/
662
static void rxrpc_sock_destructor(struct sock *sk)
663
{
664
_enter("%p", sk);
665
666
rxrpc_purge_queue(&sk->sk_receive_queue);
667
668
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
669
WARN_ON(!sk_unhashed(sk));
670
WARN_ON(sk->sk_socket);
671
672
if (!sock_flag(sk, SOCK_DEAD)) {
673
printk("Attempt to release alive rxrpc socket: %p\n", sk);
674
return;
675
}
676
}
677
678
/*
679
* release an RxRPC socket
680
*/
681
static int rxrpc_release_sock(struct sock *sk)
682
{
683
struct rxrpc_sock *rx = rxrpc_sk(sk);
684
685
_enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
686
687
/* declare the socket closed for business */
688
sock_orphan(sk);
689
sk->sk_shutdown = SHUTDOWN_MASK;
690
691
spin_lock_bh(&sk->sk_receive_queue.lock);
692
sk->sk_state = RXRPC_CLOSE;
693
spin_unlock_bh(&sk->sk_receive_queue.lock);
694
695
ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
696
697
if (!list_empty(&rx->listen_link)) {
698
write_lock_bh(&rx->local->services_lock);
699
list_del(&rx->listen_link);
700
write_unlock_bh(&rx->local->services_lock);
701
}
702
703
/* try to flush out this socket */
704
rxrpc_release_calls_on_socket(rx);
705
flush_workqueue(rxrpc_workqueue);
706
rxrpc_purge_queue(&sk->sk_receive_queue);
707
708
if (rx->conn) {
709
rxrpc_put_connection(rx->conn);
710
rx->conn = NULL;
711
}
712
713
if (rx->bundle) {
714
rxrpc_put_bundle(rx->trans, rx->bundle);
715
rx->bundle = NULL;
716
}
717
if (rx->trans) {
718
rxrpc_put_transport(rx->trans);
719
rx->trans = NULL;
720
}
721
if (rx->local) {
722
rxrpc_put_local(rx->local);
723
rx->local = NULL;
724
}
725
726
key_put(rx->key);
727
rx->key = NULL;
728
key_put(rx->securities);
729
rx->securities = NULL;
730
sock_put(sk);
731
732
_leave(" = 0");
733
return 0;
734
}
735
736
/*
737
* release an RxRPC BSD socket on close() or equivalent
738
*/
739
static int rxrpc_release(struct socket *sock)
740
{
741
struct sock *sk = sock->sk;
742
743
_enter("%p{%p}", sock, sk);
744
745
if (!sk)
746
return 0;
747
748
sock->sk = NULL;
749
750
return rxrpc_release_sock(sk);
751
}
752
753
/*
754
* RxRPC network protocol
755
*/
756
static const struct proto_ops rxrpc_rpc_ops = {
757
.family = PF_UNIX,
758
.owner = THIS_MODULE,
759
.release = rxrpc_release,
760
.bind = rxrpc_bind,
761
.connect = rxrpc_connect,
762
.socketpair = sock_no_socketpair,
763
.accept = sock_no_accept,
764
.getname = sock_no_getname,
765
.poll = rxrpc_poll,
766
.ioctl = sock_no_ioctl,
767
.listen = rxrpc_listen,
768
.shutdown = sock_no_shutdown,
769
.setsockopt = rxrpc_setsockopt,
770
.getsockopt = sock_no_getsockopt,
771
.sendmsg = rxrpc_sendmsg,
772
.recvmsg = rxrpc_recvmsg,
773
.mmap = sock_no_mmap,
774
.sendpage = sock_no_sendpage,
775
};
776
777
static struct proto rxrpc_proto = {
778
.name = "RXRPC",
779
.owner = THIS_MODULE,
780
.obj_size = sizeof(struct rxrpc_sock),
781
.max_header = sizeof(struct rxrpc_header),
782
};
783
784
static const struct net_proto_family rxrpc_family_ops = {
785
.family = PF_RXRPC,
786
.create = rxrpc_create,
787
.owner = THIS_MODULE,
788
};
789
790
/*
791
* initialise and register the RxRPC protocol
792
*/
793
static int __init af_rxrpc_init(void)
794
{
795
struct sk_buff *dummy_skb;
796
int ret = -1;
797
798
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
799
800
rxrpc_epoch = htonl(get_seconds());
801
802
ret = -ENOMEM;
803
rxrpc_call_jar = kmem_cache_create(
804
"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
805
SLAB_HWCACHE_ALIGN, NULL);
806
if (!rxrpc_call_jar) {
807
printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
808
goto error_call_jar;
809
}
810
811
rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
812
if (!rxrpc_workqueue) {
813
printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
814
goto error_work_queue;
815
}
816
817
ret = proto_register(&rxrpc_proto, 1);
818
if (ret < 0) {
819
printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
820
goto error_proto;
821
}
822
823
ret = sock_register(&rxrpc_family_ops);
824
if (ret < 0) {
825
printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
826
goto error_sock;
827
}
828
829
ret = register_key_type(&key_type_rxrpc);
830
if (ret < 0) {
831
printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
832
goto error_key_type;
833
}
834
835
ret = register_key_type(&key_type_rxrpc_s);
836
if (ret < 0) {
837
printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
838
goto error_key_type_s;
839
}
840
841
#ifdef CONFIG_PROC_FS
842
proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops);
843
proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops);
844
#endif
845
return 0;
846
847
error_key_type_s:
848
unregister_key_type(&key_type_rxrpc);
849
error_key_type:
850
sock_unregister(PF_RXRPC);
851
error_sock:
852
proto_unregister(&rxrpc_proto);
853
error_proto:
854
destroy_workqueue(rxrpc_workqueue);
855
error_work_queue:
856
kmem_cache_destroy(rxrpc_call_jar);
857
error_call_jar:
858
return ret;
859
}
860
861
/*
862
* unregister the RxRPC protocol
863
*/
864
static void __exit af_rxrpc_exit(void)
865
{
866
_enter("");
867
unregister_key_type(&key_type_rxrpc_s);
868
unregister_key_type(&key_type_rxrpc);
869
sock_unregister(PF_RXRPC);
870
proto_unregister(&rxrpc_proto);
871
rxrpc_destroy_all_calls();
872
rxrpc_destroy_all_connections();
873
rxrpc_destroy_all_transports();
874
rxrpc_destroy_all_peers();
875
rxrpc_destroy_all_locals();
876
877
ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
878
879
_debug("flush scheduled work");
880
flush_workqueue(rxrpc_workqueue);
881
proc_net_remove(&init_net, "rxrpc_conns");
882
proc_net_remove(&init_net, "rxrpc_calls");
883
destroy_workqueue(rxrpc_workqueue);
884
kmem_cache_destroy(rxrpc_call_jar);
885
_leave("");
886
}
887
888
module_init(af_rxrpc_init);
889
module_exit(af_rxrpc_exit);
890
891