Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/caif/caif_socket.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) ST-Ericsson AB 2010
4
* Author: Sjur Brendeland
5
*/
6
7
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
9
#include <linux/filter.h>
10
#include <linux/fs.h>
11
#include <linux/init.h>
12
#include <linux/module.h>
13
#include <linux/sched/signal.h>
14
#include <linux/spinlock.h>
15
#include <linux/mutex.h>
16
#include <linux/list.h>
17
#include <linux/wait.h>
18
#include <linux/poll.h>
19
#include <linux/tcp.h>
20
#include <linux/uaccess.h>
21
#include <linux/debugfs.h>
22
#include <linux/caif/caif_socket.h>
23
#include <linux/pkt_sched.h>
24
#include <net/sock.h>
25
#include <net/tcp_states.h>
26
#include <net/caif/caif_layer.h>
27
#include <net/caif/caif_dev.h>
28
#include <net/caif/cfpkt.h>
29
30
MODULE_DESCRIPTION("ST-Ericsson CAIF modem protocol socket support (AF_CAIF)");
31
MODULE_LICENSE("GPL");
32
MODULE_ALIAS_NETPROTO(AF_CAIF);
33
34
/*
35
* CAIF state is re-using the TCP socket states.
36
* caif_states stored in sk_state reflect the state as reported by
37
* the CAIF stack, while sk_socket->state is the state of the socket.
38
*/
39
enum caif_states {
40
CAIF_CONNECTED = TCP_ESTABLISHED,
41
CAIF_CONNECTING = TCP_SYN_SENT,
42
CAIF_DISCONNECTED = TCP_CLOSE
43
};
44
45
#define TX_FLOW_ON_BIT 1
46
#define RX_FLOW_ON_BIT 2
47
48
struct caifsock {
49
struct sock sk; /* must be first member */
50
struct cflayer layer;
51
unsigned long flow_state;
52
struct caif_connect_request conn_req;
53
struct mutex readlock;
54
struct dentry *debugfs_socket_dir;
55
int headroom, tailroom, maxframe;
56
};
57
58
static int rx_flow_is_on(struct caifsock *cf_sk)
59
{
60
return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
61
}
62
63
static int tx_flow_is_on(struct caifsock *cf_sk)
64
{
65
return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
66
}
67
68
static void set_rx_flow_off(struct caifsock *cf_sk)
69
{
70
clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
71
}
72
73
static void set_rx_flow_on(struct caifsock *cf_sk)
74
{
75
set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
76
}
77
78
static void set_tx_flow_off(struct caifsock *cf_sk)
79
{
80
clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
81
}
82
83
static void set_tx_flow_on(struct caifsock *cf_sk)
84
{
85
set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
86
}
87
88
static void caif_read_lock(struct sock *sk)
89
{
90
struct caifsock *cf_sk;
91
cf_sk = container_of(sk, struct caifsock, sk);
92
mutex_lock(&cf_sk->readlock);
93
}
94
95
static void caif_read_unlock(struct sock *sk)
96
{
97
struct caifsock *cf_sk;
98
cf_sk = container_of(sk, struct caifsock, sk);
99
mutex_unlock(&cf_sk->readlock);
100
}
101
102
static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
103
{
104
/* A quarter of full buffer is used a low water mark */
105
return cf_sk->sk.sk_rcvbuf / 4;
106
}
107
108
static void caif_flow_ctrl(struct sock *sk, int mode)
109
{
110
struct caifsock *cf_sk;
111
cf_sk = container_of(sk, struct caifsock, sk);
112
if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
113
cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
114
}
115
116
/*
117
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
118
* not dropped, but CAIF is sending flow off instead.
119
*/
120
static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
121
{
122
int err;
123
unsigned long flags;
124
struct sk_buff_head *list = &sk->sk_receive_queue;
125
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
126
bool queued = false;
127
128
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
129
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
130
net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
131
atomic_read(&cf_sk->sk.sk_rmem_alloc),
132
sk_rcvbuf_lowwater(cf_sk));
133
set_rx_flow_off(cf_sk);
134
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
135
}
136
137
err = sk_filter(sk, skb);
138
if (err)
139
goto out;
140
141
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
142
set_rx_flow_off(cf_sk);
143
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
144
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
145
}
146
skb->dev = NULL;
147
skb_set_owner_r(skb, sk);
148
spin_lock_irqsave(&list->lock, flags);
149
queued = !sock_flag(sk, SOCK_DEAD);
150
if (queued)
151
__skb_queue_tail(list, skb);
152
spin_unlock_irqrestore(&list->lock, flags);
153
out:
154
if (queued)
155
sk->sk_data_ready(sk);
156
else
157
kfree_skb(skb);
158
}
159
160
/* Packet Receive Callback function called from CAIF Stack */
161
static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
162
{
163
struct caifsock *cf_sk;
164
struct sk_buff *skb;
165
166
cf_sk = container_of(layr, struct caifsock, layer);
167
skb = cfpkt_tonative(pkt);
168
169
if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
170
kfree_skb(skb);
171
return 0;
172
}
173
caif_queue_rcv_skb(&cf_sk->sk, skb);
174
return 0;
175
}
176
177
static void cfsk_hold(struct cflayer *layr)
178
{
179
struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
180
sock_hold(&cf_sk->sk);
181
}
182
183
static void cfsk_put(struct cflayer *layr)
184
{
185
struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
186
sock_put(&cf_sk->sk);
187
}
188
189
/* Packet Control Callback function called from CAIF */
190
static void caif_ctrl_cb(struct cflayer *layr,
191
enum caif_ctrlcmd flow,
192
int phyid)
193
{
194
struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
195
switch (flow) {
196
case CAIF_CTRLCMD_FLOW_ON_IND:
197
/* OK from modem to start sending again */
198
set_tx_flow_on(cf_sk);
199
cf_sk->sk.sk_state_change(&cf_sk->sk);
200
break;
201
202
case CAIF_CTRLCMD_FLOW_OFF_IND:
203
/* Modem asks us to shut up */
204
set_tx_flow_off(cf_sk);
205
cf_sk->sk.sk_state_change(&cf_sk->sk);
206
break;
207
208
case CAIF_CTRLCMD_INIT_RSP:
209
/* We're now connected */
210
caif_client_register_refcnt(&cf_sk->layer,
211
cfsk_hold, cfsk_put);
212
cf_sk->sk.sk_state = CAIF_CONNECTED;
213
set_tx_flow_on(cf_sk);
214
cf_sk->sk.sk_shutdown = 0;
215
cf_sk->sk.sk_state_change(&cf_sk->sk);
216
break;
217
218
case CAIF_CTRLCMD_DEINIT_RSP:
219
/* We're now disconnected */
220
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
221
cf_sk->sk.sk_state_change(&cf_sk->sk);
222
break;
223
224
case CAIF_CTRLCMD_INIT_FAIL_RSP:
225
/* Connect request failed */
226
cf_sk->sk.sk_err = ECONNREFUSED;
227
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
228
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
229
/*
230
* Socket "standards" seems to require POLLOUT to
231
* be set at connect failure.
232
*/
233
set_tx_flow_on(cf_sk);
234
cf_sk->sk.sk_state_change(&cf_sk->sk);
235
break;
236
237
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
238
/* Modem has closed this connection, or device is down. */
239
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
240
cf_sk->sk.sk_err = ECONNRESET;
241
set_rx_flow_on(cf_sk);
242
sk_error_report(&cf_sk->sk);
243
break;
244
245
default:
246
pr_debug("Unexpected flow command %d\n", flow);
247
}
248
}
249
250
static void caif_check_flow_release(struct sock *sk)
251
{
252
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
253
254
if (rx_flow_is_on(cf_sk))
255
return;
256
257
if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
258
set_rx_flow_on(cf_sk);
259
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
260
}
261
}
262
263
/*
264
* Copied from unix_dgram_recvmsg, but removed credit checks,
265
* changed locking, address handling and added MSG_TRUNC.
266
*/
267
static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m,
268
size_t len, int flags)
269
270
{
271
struct sock *sk = sock->sk;
272
struct sk_buff *skb;
273
int ret;
274
int copylen;
275
276
ret = -EOPNOTSUPP;
277
if (flags & MSG_OOB)
278
goto read_error;
279
280
skb = skb_recv_datagram(sk, flags, &ret);
281
if (!skb)
282
goto read_error;
283
copylen = skb->len;
284
if (len < copylen) {
285
m->msg_flags |= MSG_TRUNC;
286
copylen = len;
287
}
288
289
ret = skb_copy_datagram_msg(skb, 0, m, copylen);
290
if (ret)
291
goto out_free;
292
293
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
294
out_free:
295
skb_free_datagram(sk, skb);
296
caif_check_flow_release(sk);
297
return ret;
298
299
read_error:
300
return ret;
301
}
302
303
304
/* Copied from unix_stream_wait_data, identical except for lock call. */
305
static long caif_stream_data_wait(struct sock *sk, long timeo)
306
{
307
DEFINE_WAIT(wait);
308
lock_sock(sk);
309
310
for (;;) {
311
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
312
313
if (!skb_queue_empty(&sk->sk_receive_queue) ||
314
sk->sk_err ||
315
sk->sk_state != CAIF_CONNECTED ||
316
sock_flag(sk, SOCK_DEAD) ||
317
(sk->sk_shutdown & RCV_SHUTDOWN) ||
318
signal_pending(current) ||
319
!timeo)
320
break;
321
322
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
323
release_sock(sk);
324
timeo = schedule_timeout(timeo);
325
lock_sock(sk);
326
327
if (sock_flag(sk, SOCK_DEAD))
328
break;
329
330
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
331
}
332
333
finish_wait(sk_sleep(sk), &wait);
334
release_sock(sk);
335
return timeo;
336
}
337
338
339
/*
340
* Copied from unix_stream_recvmsg, but removed credit checks,
341
* changed locking calls, changed address handling.
342
*/
343
static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
344
size_t size, int flags)
345
{
346
struct sock *sk = sock->sk;
347
int copied = 0;
348
int target;
349
int err = 0;
350
long timeo;
351
352
err = -EOPNOTSUPP;
353
if (flags&MSG_OOB)
354
goto out;
355
356
/*
357
* Lock the socket to prevent queue disordering
358
* while sleeps in memcpy_tomsg
359
*/
360
err = -EAGAIN;
361
if (sk->sk_state == CAIF_CONNECTING)
362
goto out;
363
364
caif_read_lock(sk);
365
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
366
timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
367
368
do {
369
int chunk;
370
struct sk_buff *skb;
371
372
lock_sock(sk);
373
if (sock_flag(sk, SOCK_DEAD)) {
374
err = -ECONNRESET;
375
goto unlock;
376
}
377
skb = skb_dequeue(&sk->sk_receive_queue);
378
caif_check_flow_release(sk);
379
380
if (skb == NULL) {
381
if (copied >= target)
382
goto unlock;
383
/*
384
* POSIX 1003.1g mandates this order.
385
*/
386
err = sock_error(sk);
387
if (err)
388
goto unlock;
389
err = -ECONNRESET;
390
if (sk->sk_shutdown & RCV_SHUTDOWN)
391
goto unlock;
392
393
err = -EPIPE;
394
if (sk->sk_state != CAIF_CONNECTED)
395
goto unlock;
396
if (sock_flag(sk, SOCK_DEAD))
397
goto unlock;
398
399
release_sock(sk);
400
401
err = -EAGAIN;
402
if (!timeo)
403
break;
404
405
caif_read_unlock(sk);
406
407
timeo = caif_stream_data_wait(sk, timeo);
408
409
if (signal_pending(current)) {
410
err = sock_intr_errno(timeo);
411
goto out;
412
}
413
caif_read_lock(sk);
414
continue;
415
unlock:
416
release_sock(sk);
417
break;
418
}
419
release_sock(sk);
420
chunk = min_t(unsigned int, skb->len, size);
421
if (memcpy_to_msg(msg, skb->data, chunk)) {
422
skb_queue_head(&sk->sk_receive_queue, skb);
423
if (copied == 0)
424
copied = -EFAULT;
425
break;
426
}
427
copied += chunk;
428
size -= chunk;
429
430
/* Mark read part of skb as used */
431
if (!(flags & MSG_PEEK)) {
432
skb_pull(skb, chunk);
433
434
/* put the skb back if we didn't use it up. */
435
if (skb->len) {
436
skb_queue_head(&sk->sk_receive_queue, skb);
437
break;
438
}
439
kfree_skb(skb);
440
441
} else {
442
/*
443
* It is questionable, see note in unix_dgram_recvmsg.
444
*/
445
/* put message back and return */
446
skb_queue_head(&sk->sk_receive_queue, skb);
447
break;
448
}
449
} while (size);
450
caif_read_unlock(sk);
451
452
out:
453
return copied ? : err;
454
}
455
456
/*
457
* Copied from sock.c:sock_wait_for_wmem, but change to wait for
458
* CAIF flow-on and sock_writable.
459
*/
460
static long caif_wait_for_flow_on(struct caifsock *cf_sk,
461
int wait_writeable, long timeo, int *err)
462
{
463
struct sock *sk = &cf_sk->sk;
464
DEFINE_WAIT(wait);
465
for (;;) {
466
*err = 0;
467
if (tx_flow_is_on(cf_sk) &&
468
(!wait_writeable || sock_writeable(&cf_sk->sk)))
469
break;
470
*err = -ETIMEDOUT;
471
if (!timeo)
472
break;
473
*err = -ERESTARTSYS;
474
if (signal_pending(current))
475
break;
476
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
477
*err = -ECONNRESET;
478
if (sk->sk_shutdown & SHUTDOWN_MASK)
479
break;
480
*err = -sk->sk_err;
481
if (sk->sk_err)
482
break;
483
*err = -EPIPE;
484
if (cf_sk->sk.sk_state != CAIF_CONNECTED)
485
break;
486
timeo = schedule_timeout(timeo);
487
}
488
finish_wait(sk_sleep(sk), &wait);
489
return timeo;
490
}
491
492
/*
493
* Transmit a SKB. The device may temporarily request re-transmission
494
* by returning EAGAIN.
495
*/
496
static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
497
int noblock, long timeo)
498
{
499
struct cfpkt *pkt;
500
501
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
502
memset(skb->cb, 0, sizeof(struct caif_payload_info));
503
cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
504
505
if (cf_sk->layer.dn == NULL) {
506
kfree_skb(skb);
507
return -EINVAL;
508
}
509
510
return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
511
}
512
513
/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
514
static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
515
size_t len)
516
{
517
struct sock *sk = sock->sk;
518
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
519
int buffer_size;
520
int ret = 0;
521
struct sk_buff *skb = NULL;
522
int noblock;
523
long timeo;
524
caif_assert(cf_sk);
525
ret = sock_error(sk);
526
if (ret)
527
goto err;
528
529
ret = -EOPNOTSUPP;
530
if (msg->msg_flags&MSG_OOB)
531
goto err;
532
533
ret = -EOPNOTSUPP;
534
if (msg->msg_namelen)
535
goto err;
536
537
noblock = msg->msg_flags & MSG_DONTWAIT;
538
539
timeo = sock_sndtimeo(sk, noblock);
540
timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
541
1, timeo, &ret);
542
543
if (ret)
544
goto err;
545
ret = -EPIPE;
546
if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
547
sock_flag(sk, SOCK_DEAD) ||
548
(sk->sk_shutdown & RCV_SHUTDOWN))
549
goto err;
550
551
/* Error if trying to write more than maximum frame size. */
552
ret = -EMSGSIZE;
553
if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
554
goto err;
555
556
buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
557
558
ret = -ENOMEM;
559
skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
560
561
if (!skb || skb_tailroom(skb) < buffer_size)
562
goto err;
563
564
skb_reserve(skb, cf_sk->headroom);
565
566
ret = memcpy_from_msg(skb_put(skb, len), msg, len);
567
568
if (ret)
569
goto err;
570
ret = transmit_skb(skb, cf_sk, noblock, timeo);
571
if (ret < 0)
572
/* skb is already freed */
573
return ret;
574
575
return len;
576
err:
577
kfree_skb(skb);
578
return ret;
579
}
580
581
/*
582
* Copied from unix_stream_sendmsg and adapted to CAIF:
583
* Changed removed permission handling and added waiting for flow on
584
* and other minor adaptations.
585
*/
586
static int caif_stream_sendmsg(struct socket *sock, struct msghdr *msg,
587
size_t len)
588
{
589
struct sock *sk = sock->sk;
590
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
591
int err, size;
592
struct sk_buff *skb;
593
int sent = 0;
594
long timeo;
595
596
err = -EOPNOTSUPP;
597
if (unlikely(msg->msg_flags&MSG_OOB))
598
goto out_err;
599
600
if (unlikely(msg->msg_namelen))
601
goto out_err;
602
603
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
604
timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
605
606
if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
607
goto pipe_err;
608
609
while (sent < len) {
610
611
size = len-sent;
612
613
if (size > cf_sk->maxframe)
614
size = cf_sk->maxframe;
615
616
/* If size is more than half of sndbuf, chop up message */
617
if (size > ((sk->sk_sndbuf >> 1) - 64))
618
size = (sk->sk_sndbuf >> 1) - 64;
619
620
if (size > SKB_MAX_ALLOC)
621
size = SKB_MAX_ALLOC;
622
623
skb = sock_alloc_send_skb(sk,
624
size + cf_sk->headroom +
625
cf_sk->tailroom,
626
msg->msg_flags&MSG_DONTWAIT,
627
&err);
628
if (skb == NULL)
629
goto out_err;
630
631
skb_reserve(skb, cf_sk->headroom);
632
/*
633
* If you pass two values to the sock_alloc_send_skb
634
* it tries to grab the large buffer with GFP_NOFS
635
* (which can fail easily), and if it fails grab the
636
* fallback size buffer which is under a page and will
637
* succeed. [Alan]
638
*/
639
size = min_t(int, size, skb_tailroom(skb));
640
641
err = memcpy_from_msg(skb_put(skb, size), msg, size);
642
if (err) {
643
kfree_skb(skb);
644
goto out_err;
645
}
646
err = transmit_skb(skb, cf_sk,
647
msg->msg_flags&MSG_DONTWAIT, timeo);
648
if (err < 0)
649
/* skb is already freed */
650
goto pipe_err;
651
652
sent += size;
653
}
654
655
return sent;
656
657
pipe_err:
658
if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
659
send_sig(SIGPIPE, current, 0);
660
err = -EPIPE;
661
out_err:
662
return sent ? : err;
663
}
664
665
static int setsockopt(struct socket *sock, int lvl, int opt, sockptr_t ov,
666
unsigned int ol)
667
{
668
struct sock *sk = sock->sk;
669
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
670
int linksel;
671
672
if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
673
return -ENOPROTOOPT;
674
675
switch (opt) {
676
case CAIFSO_LINK_SELECT:
677
if (ol < sizeof(int))
678
return -EINVAL;
679
if (lvl != SOL_CAIF)
680
goto bad_sol;
681
if (copy_from_sockptr(&linksel, ov, sizeof(int)))
682
return -EINVAL;
683
lock_sock(&(cf_sk->sk));
684
cf_sk->conn_req.link_selector = linksel;
685
release_sock(&cf_sk->sk);
686
return 0;
687
688
case CAIFSO_REQ_PARAM:
689
if (lvl != SOL_CAIF)
690
goto bad_sol;
691
if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
692
return -ENOPROTOOPT;
693
lock_sock(&(cf_sk->sk));
694
if (ol > sizeof(cf_sk->conn_req.param.data) ||
695
copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) {
696
release_sock(&cf_sk->sk);
697
return -EINVAL;
698
}
699
cf_sk->conn_req.param.size = ol;
700
release_sock(&cf_sk->sk);
701
return 0;
702
703
default:
704
return -ENOPROTOOPT;
705
}
706
707
return 0;
708
bad_sol:
709
return -ENOPROTOOPT;
710
711
}
712
713
/*
714
* caif_connect() - Connect a CAIF Socket
715
* Copied and modified af_irda.c:irda_connect().
716
*
717
* Note : by consulting "errno", the user space caller may learn the cause
718
* of the failure. Most of them are visible in the function, others may come
719
* from subroutines called and are listed here :
720
* o -EAFNOSUPPORT: bad socket family or type.
721
* o -ESOCKTNOSUPPORT: bad socket type or protocol
722
* o -EINVAL: bad socket address, or CAIF link type
723
* o -ECONNREFUSED: remote end refused the connection.
724
* o -EINPROGRESS: connect request sent but timed out (or non-blocking)
725
* o -EISCONN: already connected.
726
* o -ETIMEDOUT: Connection timed out (send timeout)
727
* o -ENODEV: No link layer to send request
728
* o -ECONNRESET: Received Shutdown indication or lost link layer
729
* o -ENOMEM: Out of memory
730
*
731
* State Strategy:
732
* o sk_state: holds the CAIF_* protocol state, it's updated by
733
* caif_ctrl_cb.
734
* o sock->state: holds the SS_* socket state and is updated by connect and
735
* disconnect.
736
*/
737
static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
738
int addr_len, int flags)
739
{
740
struct sock *sk = sock->sk;
741
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
742
long timeo;
743
int err;
744
int ifindex, headroom, tailroom;
745
unsigned int mtu;
746
struct net_device *dev;
747
748
lock_sock(sk);
749
750
err = -EINVAL;
751
if (addr_len < offsetofend(struct sockaddr, sa_family))
752
goto out;
753
754
err = -EAFNOSUPPORT;
755
if (uaddr->sa_family != AF_CAIF)
756
goto out;
757
758
switch (sock->state) {
759
case SS_UNCONNECTED:
760
/* Normal case, a fresh connect */
761
caif_assert(sk->sk_state == CAIF_DISCONNECTED);
762
break;
763
case SS_CONNECTING:
764
switch (sk->sk_state) {
765
case CAIF_CONNECTED:
766
sock->state = SS_CONNECTED;
767
err = -EISCONN;
768
goto out;
769
case CAIF_DISCONNECTED:
770
/* Reconnect allowed */
771
break;
772
case CAIF_CONNECTING:
773
err = -EALREADY;
774
if (flags & O_NONBLOCK)
775
goto out;
776
goto wait_connect;
777
}
778
break;
779
case SS_CONNECTED:
780
caif_assert(sk->sk_state == CAIF_CONNECTED ||
781
sk->sk_state == CAIF_DISCONNECTED);
782
if (sk->sk_shutdown & SHUTDOWN_MASK) {
783
/* Allow re-connect after SHUTDOWN_IND */
784
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
785
caif_free_client(&cf_sk->layer);
786
break;
787
}
788
/* No reconnect on a seqpacket socket */
789
err = -EISCONN;
790
goto out;
791
case SS_DISCONNECTING:
792
case SS_FREE:
793
caif_assert(1); /*Should never happen */
794
break;
795
}
796
sk->sk_state = CAIF_DISCONNECTED;
797
sock->state = SS_UNCONNECTED;
798
sk_stream_kill_queues(&cf_sk->sk);
799
800
err = -EINVAL;
801
if (addr_len != sizeof(struct sockaddr_caif))
802
goto out;
803
804
memcpy(&cf_sk->conn_req.sockaddr, uaddr,
805
sizeof(struct sockaddr_caif));
806
807
/* Move to connecting socket, start sending Connect Requests */
808
sock->state = SS_CONNECTING;
809
sk->sk_state = CAIF_CONNECTING;
810
811
/* Check priority value comming from socket */
812
/* if priority value is out of range it will be ajusted */
813
if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
814
cf_sk->conn_req.priority = CAIF_PRIO_MAX;
815
else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
816
cf_sk->conn_req.priority = CAIF_PRIO_MIN;
817
else
818
cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
819
820
/*ifindex = id of the interface.*/
821
cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
822
823
cf_sk->layer.receive = caif_sktrecv_cb;
824
825
err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
826
&cf_sk->layer, &ifindex, &headroom, &tailroom);
827
828
if (err < 0) {
829
cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
830
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
831
goto out;
832
}
833
834
err = -ENODEV;
835
rcu_read_lock();
836
dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
837
if (!dev) {
838
rcu_read_unlock();
839
goto out;
840
}
841
cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
842
mtu = dev->mtu;
843
rcu_read_unlock();
844
845
cf_sk->tailroom = tailroom;
846
cf_sk->maxframe = mtu - (headroom + tailroom);
847
if (cf_sk->maxframe < 1) {
848
pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
849
err = -ENODEV;
850
goto out;
851
}
852
853
err = -EINPROGRESS;
854
wait_connect:
855
856
if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
857
goto out;
858
859
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
860
861
release_sock(sk);
862
err = -ERESTARTSYS;
863
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
864
sk->sk_state != CAIF_CONNECTING,
865
timeo);
866
lock_sock(sk);
867
if (timeo < 0)
868
goto out; /* -ERESTARTSYS */
869
870
err = -ETIMEDOUT;
871
if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
872
goto out;
873
if (sk->sk_state != CAIF_CONNECTED) {
874
sock->state = SS_UNCONNECTED;
875
err = sock_error(sk);
876
if (!err)
877
err = -ECONNREFUSED;
878
goto out;
879
}
880
sock->state = SS_CONNECTED;
881
err = 0;
882
out:
883
release_sock(sk);
884
return err;
885
}
886
887
/*
888
* caif_release() - Disconnect a CAIF Socket
889
* Copied and modified af_irda.c:irda_release().
890
*/
891
static int caif_release(struct socket *sock)
892
{
893
struct sock *sk = sock->sk;
894
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
895
896
if (!sk)
897
return 0;
898
899
set_tx_flow_off(cf_sk);
900
901
/*
902
* Ensure that packets are not queued after this point in time.
903
* caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
904
* this ensures no packets when sock is dead.
905
*/
906
spin_lock_bh(&sk->sk_receive_queue.lock);
907
sock_set_flag(sk, SOCK_DEAD);
908
spin_unlock_bh(&sk->sk_receive_queue.lock);
909
sock->sk = NULL;
910
911
WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
912
debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
913
914
lock_sock(&(cf_sk->sk));
915
sk->sk_state = CAIF_DISCONNECTED;
916
sk->sk_shutdown = SHUTDOWN_MASK;
917
918
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
919
cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
920
wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP);
921
922
sock_orphan(sk);
923
sk_stream_kill_queues(&cf_sk->sk);
924
release_sock(sk);
925
sock_put(sk);
926
return 0;
927
}
928
929
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
930
static __poll_t caif_poll(struct file *file,
931
struct socket *sock, poll_table *wait)
932
{
933
struct sock *sk = sock->sk;
934
__poll_t mask;
935
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
936
937
sock_poll_wait(file, sock, wait);
938
mask = 0;
939
940
/* exceptional events? */
941
if (sk->sk_err)
942
mask |= EPOLLERR;
943
if (sk->sk_shutdown == SHUTDOWN_MASK)
944
mask |= EPOLLHUP;
945
if (sk->sk_shutdown & RCV_SHUTDOWN)
946
mask |= EPOLLRDHUP;
947
948
/* readable? */
949
if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
950
(sk->sk_shutdown & RCV_SHUTDOWN))
951
mask |= EPOLLIN | EPOLLRDNORM;
952
953
/*
954
* we set writable also when the other side has shut down the
955
* connection. This prevents stuck sockets.
956
*/
957
if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
958
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
959
960
return mask;
961
}
962
963
static const struct proto_ops caif_seqpacket_ops = {
964
.family = PF_CAIF,
965
.owner = THIS_MODULE,
966
.release = caif_release,
967
.bind = sock_no_bind,
968
.connect = caif_connect,
969
.socketpair = sock_no_socketpair,
970
.accept = sock_no_accept,
971
.getname = sock_no_getname,
972
.poll = caif_poll,
973
.ioctl = sock_no_ioctl,
974
.listen = sock_no_listen,
975
.shutdown = sock_no_shutdown,
976
.setsockopt = setsockopt,
977
.sendmsg = caif_seqpkt_sendmsg,
978
.recvmsg = caif_seqpkt_recvmsg,
979
.mmap = sock_no_mmap,
980
};
981
982
static const struct proto_ops caif_stream_ops = {
983
.family = PF_CAIF,
984
.owner = THIS_MODULE,
985
.release = caif_release,
986
.bind = sock_no_bind,
987
.connect = caif_connect,
988
.socketpair = sock_no_socketpair,
989
.accept = sock_no_accept,
990
.getname = sock_no_getname,
991
.poll = caif_poll,
992
.ioctl = sock_no_ioctl,
993
.listen = sock_no_listen,
994
.shutdown = sock_no_shutdown,
995
.setsockopt = setsockopt,
996
.sendmsg = caif_stream_sendmsg,
997
.recvmsg = caif_stream_recvmsg,
998
.mmap = sock_no_mmap,
999
};
1000
1001
/* This function is called when a socket is finally destroyed. */
1002
static void caif_sock_destructor(struct sock *sk)
1003
{
1004
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1005
caif_assert(!refcount_read(&sk->sk_wmem_alloc));
1006
caif_assert(sk_unhashed(sk));
1007
caif_assert(!sk->sk_socket);
1008
if (!sock_flag(sk, SOCK_DEAD)) {
1009
pr_debug("Attempt to release alive CAIF socket: %p\n", sk);
1010
return;
1011
}
1012
sk_stream_kill_queues(&cf_sk->sk);
1013
WARN_ON_ONCE(sk->sk_forward_alloc);
1014
caif_free_client(&cf_sk->layer);
1015
}
1016
1017
static int caif_create(struct net *net, struct socket *sock, int protocol,
1018
int kern)
1019
{
1020
struct sock *sk = NULL;
1021
struct caifsock *cf_sk = NULL;
1022
static struct proto prot = {.name = "PF_CAIF",
1023
.owner = THIS_MODULE,
1024
.obj_size = sizeof(struct caifsock),
1025
.useroffset = offsetof(struct caifsock, conn_req.param),
1026
.usersize = sizeof_field(struct caifsock, conn_req.param)
1027
};
1028
1029
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
1030
return -EPERM;
1031
/*
1032
* The sock->type specifies the socket type to use.
1033
* The CAIF socket is a packet stream in the sense
1034
* that it is packet based. CAIF trusts the reliability
1035
* of the link, no resending is implemented.
1036
*/
1037
if (sock->type == SOCK_SEQPACKET)
1038
sock->ops = &caif_seqpacket_ops;
1039
else if (sock->type == SOCK_STREAM)
1040
sock->ops = &caif_stream_ops;
1041
else
1042
return -ESOCKTNOSUPPORT;
1043
1044
if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1045
return -EPROTONOSUPPORT;
1046
/*
1047
* Set the socket state to unconnected. The socket state
1048
* is really not used at all in the net/core or socket.c but the
1049
* initialization makes sure that sock->state is not uninitialized.
1050
*/
1051
sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot, kern);
1052
if (!sk)
1053
return -ENOMEM;
1054
1055
cf_sk = container_of(sk, struct caifsock, sk);
1056
1057
/* Store the protocol */
1058
sk->sk_protocol = (unsigned char) protocol;
1059
1060
/* Initialize default priority for well-known cases */
1061
switch (protocol) {
1062
case CAIFPROTO_AT:
1063
sk->sk_priority = TC_PRIO_CONTROL;
1064
break;
1065
case CAIFPROTO_RFM:
1066
sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1067
break;
1068
default:
1069
sk->sk_priority = TC_PRIO_BESTEFFORT;
1070
}
1071
1072
/*
1073
* Lock in order to try to stop someone from opening the socket
1074
* too early.
1075
*/
1076
lock_sock(&(cf_sk->sk));
1077
1078
/* Initialize the nozero default sock structure data. */
1079
sock_init_data(sock, sk);
1080
sk->sk_destruct = caif_sock_destructor;
1081
1082
mutex_init(&cf_sk->readlock); /* single task reading lock */
1083
cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1084
cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1085
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1086
1087
set_tx_flow_off(cf_sk);
1088
set_rx_flow_on(cf_sk);
1089
1090
/* Set default options on configuration */
1091
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1092
cf_sk->conn_req.protocol = protocol;
1093
release_sock(&cf_sk->sk);
1094
return 0;
1095
}
1096
1097
1098
static const struct net_proto_family caif_family_ops = {
1099
.family = PF_CAIF,
1100
.create = caif_create,
1101
.owner = THIS_MODULE,
1102
};
1103
1104
static int __init caif_sktinit_module(void)
1105
{
1106
return sock_register(&caif_family_ops);
1107
}
1108
1109
static void __exit caif_sktexit_module(void)
1110
{
1111
sock_unregister(PF_CAIF);
1112
}
1113
module_init(caif_sktinit_module);
1114
module_exit(caif_sktexit_module);
1115
1116