Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/iucv/af_iucv.c
15109 views
1
/*
2
* IUCV protocol stack for Linux on zSeries
3
*
4
* Copyright IBM Corp. 2006, 2009
5
*
6
* Author(s): Jennifer Hunt <[email protected]>
7
* Hendrik Brueckner <[email protected]>
8
* PM functions:
9
* Ursula Braun <[email protected]>
10
*/
11
12
#define KMSG_COMPONENT "af_iucv"
13
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15
#include <linux/module.h>
16
#include <linux/types.h>
17
#include <linux/list.h>
18
#include <linux/errno.h>
19
#include <linux/kernel.h>
20
#include <linux/sched.h>
21
#include <linux/slab.h>
22
#include <linux/skbuff.h>
23
#include <linux/init.h>
24
#include <linux/poll.h>
25
#include <net/sock.h>
26
#include <asm/ebcdic.h>
27
#include <asm/cpcmd.h>
28
#include <linux/kmod.h>
29
30
#include <net/iucv/iucv.h>
31
#include <net/iucv/af_iucv.h>
32
33
#define VERSION "1.1"
34
35
static char iucv_userid[80];
36
37
static const struct proto_ops iucv_sock_ops;
38
39
static struct proto iucv_proto = {
40
.name = "AF_IUCV",
41
.owner = THIS_MODULE,
42
.obj_size = sizeof(struct iucv_sock),
43
};
44
45
/* special AF_IUCV IPRM messages */
46
static const u8 iprm_shutdown[8] =
47
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48
49
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
50
51
/* macros to set/get socket control buffer at correct offset */
52
#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
53
#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
54
#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55
#define CB_TRGCLS_LEN (TRGCLS_SIZE)
56
57
#define __iucv_sock_wait(sk, condition, timeo, ret) \
58
do { \
59
DEFINE_WAIT(__wait); \
60
long __timeo = timeo; \
61
ret = 0; \
62
prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
63
while (!(condition)) { \
64
if (!__timeo) { \
65
ret = -EAGAIN; \
66
break; \
67
} \
68
if (signal_pending(current)) { \
69
ret = sock_intr_errno(__timeo); \
70
break; \
71
} \
72
release_sock(sk); \
73
__timeo = schedule_timeout(__timeo); \
74
lock_sock(sk); \
75
ret = sock_error(sk); \
76
if (ret) \
77
break; \
78
} \
79
finish_wait(sk_sleep(sk), &__wait); \
80
} while (0)
81
82
#define iucv_sock_wait(sk, condition, timeo) \
83
({ \
84
int __ret = 0; \
85
if (!(condition)) \
86
__iucv_sock_wait(sk, condition, timeo, __ret); \
87
__ret; \
88
})
89
90
static void iucv_sock_kill(struct sock *sk);
91
static void iucv_sock_close(struct sock *sk);
92
93
/* Call Back functions */
94
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95
static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
96
static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
97
static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
98
u8 ipuser[16]);
99
static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
100
static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
101
102
static struct iucv_sock_list iucv_sk_list = {
103
.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
104
.autobind_name = ATOMIC_INIT(0)
105
};
106
107
static struct iucv_handler af_iucv_handler = {
108
.path_pending = iucv_callback_connreq,
109
.path_complete = iucv_callback_connack,
110
.path_severed = iucv_callback_connrej,
111
.message_pending = iucv_callback_rx,
112
.message_complete = iucv_callback_txdone,
113
.path_quiesced = iucv_callback_shutdown,
114
};
115
116
static inline void high_nmcpy(unsigned char *dst, char *src)
117
{
118
memcpy(dst, src, 8);
119
}
120
121
static inline void low_nmcpy(unsigned char *dst, char *src)
122
{
123
memcpy(&dst[8], src, 8);
124
}
125
126
static int afiucv_pm_prepare(struct device *dev)
127
{
128
#ifdef CONFIG_PM_DEBUG
129
printk(KERN_WARNING "afiucv_pm_prepare\n");
130
#endif
131
return 0;
132
}
133
134
static void afiucv_pm_complete(struct device *dev)
135
{
136
#ifdef CONFIG_PM_DEBUG
137
printk(KERN_WARNING "afiucv_pm_complete\n");
138
#endif
139
}
140
141
/**
142
* afiucv_pm_freeze() - Freeze PM callback
143
* @dev: AFIUCV dummy device
144
*
145
* Sever all established IUCV communication pathes
146
*/
147
static int afiucv_pm_freeze(struct device *dev)
148
{
149
struct iucv_sock *iucv;
150
struct sock *sk;
151
struct hlist_node *node;
152
int err = 0;
153
154
#ifdef CONFIG_PM_DEBUG
155
printk(KERN_WARNING "afiucv_pm_freeze\n");
156
#endif
157
read_lock(&iucv_sk_list.lock);
158
sk_for_each(sk, node, &iucv_sk_list.head) {
159
iucv = iucv_sk(sk);
160
skb_queue_purge(&iucv->send_skb_q);
161
skb_queue_purge(&iucv->backlog_skb_q);
162
switch (sk->sk_state) {
163
case IUCV_SEVERED:
164
case IUCV_DISCONN:
165
case IUCV_CLOSING:
166
case IUCV_CONNECTED:
167
if (iucv->path) {
168
err = iucv_path_sever(iucv->path, NULL);
169
iucv_path_free(iucv->path);
170
iucv->path = NULL;
171
}
172
break;
173
case IUCV_OPEN:
174
case IUCV_BOUND:
175
case IUCV_LISTEN:
176
case IUCV_CLOSED:
177
default:
178
break;
179
}
180
}
181
read_unlock(&iucv_sk_list.lock);
182
return err;
183
}
184
185
/**
186
* afiucv_pm_restore_thaw() - Thaw and restore PM callback
187
* @dev: AFIUCV dummy device
188
*
189
* socket clean up after freeze
190
*/
191
static int afiucv_pm_restore_thaw(struct device *dev)
192
{
193
struct sock *sk;
194
struct hlist_node *node;
195
196
#ifdef CONFIG_PM_DEBUG
197
printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
198
#endif
199
read_lock(&iucv_sk_list.lock);
200
sk_for_each(sk, node, &iucv_sk_list.head) {
201
switch (sk->sk_state) {
202
case IUCV_CONNECTED:
203
sk->sk_err = EPIPE;
204
sk->sk_state = IUCV_DISCONN;
205
sk->sk_state_change(sk);
206
break;
207
case IUCV_DISCONN:
208
case IUCV_SEVERED:
209
case IUCV_CLOSING:
210
case IUCV_LISTEN:
211
case IUCV_BOUND:
212
case IUCV_OPEN:
213
default:
214
break;
215
}
216
}
217
read_unlock(&iucv_sk_list.lock);
218
return 0;
219
}
220
221
static const struct dev_pm_ops afiucv_pm_ops = {
222
.prepare = afiucv_pm_prepare,
223
.complete = afiucv_pm_complete,
224
.freeze = afiucv_pm_freeze,
225
.thaw = afiucv_pm_restore_thaw,
226
.restore = afiucv_pm_restore_thaw,
227
};
228
229
static struct device_driver af_iucv_driver = {
230
.owner = THIS_MODULE,
231
.name = "afiucv",
232
.bus = &iucv_bus,
233
.pm = &afiucv_pm_ops,
234
};
235
236
/* dummy device used as trigger for PM functions */
237
static struct device *af_iucv_dev;
238
239
/**
240
* iucv_msg_length() - Returns the length of an iucv message.
241
* @msg: Pointer to struct iucv_message, MUST NOT be NULL
242
*
243
* The function returns the length of the specified iucv message @msg of data
244
* stored in a buffer and of data stored in the parameter list (PRMDATA).
245
*
246
* For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
247
* data:
248
* PRMDATA[0..6] socket data (max 7 bytes);
249
* PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
250
*
251
* The socket data length is computed by subtracting the socket data length
252
* value from 0xFF.
253
* If the socket data len is greater 7, then PRMDATA can be used for special
254
* notifications (see iucv_sock_shutdown); and further,
255
* if the socket data len is > 7, the function returns 8.
256
*
257
* Use this function to allocate socket buffers to store iucv message data.
258
*/
259
static inline size_t iucv_msg_length(struct iucv_message *msg)
260
{
261
size_t datalen;
262
263
if (msg->flags & IUCV_IPRMDATA) {
264
datalen = 0xff - msg->rmmsg[7];
265
return (datalen < 8) ? datalen : 8;
266
}
267
return msg->length;
268
}
269
270
/**
271
* iucv_sock_in_state() - check for specific states
272
* @sk: sock structure
273
* @state: first iucv sk state
274
* @state: second iucv sk state
275
*
276
* Returns true if the socket in either in the first or second state.
277
*/
278
static int iucv_sock_in_state(struct sock *sk, int state, int state2)
279
{
280
return (sk->sk_state == state || sk->sk_state == state2);
281
}
282
283
/**
284
* iucv_below_msglim() - function to check if messages can be sent
285
* @sk: sock structure
286
*
287
* Returns true if the send queue length is lower than the message limit.
288
* Always returns true if the socket is not connected (no iucv path for
289
* checking the message limit).
290
*/
291
static inline int iucv_below_msglim(struct sock *sk)
292
{
293
struct iucv_sock *iucv = iucv_sk(sk);
294
295
if (sk->sk_state != IUCV_CONNECTED)
296
return 1;
297
return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
298
}
299
300
/**
301
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
302
*/
303
static void iucv_sock_wake_msglim(struct sock *sk)
304
{
305
struct socket_wq *wq;
306
307
rcu_read_lock();
308
wq = rcu_dereference(sk->sk_wq);
309
if (wq_has_sleeper(wq))
310
wake_up_interruptible_all(&wq->wait);
311
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
312
rcu_read_unlock();
313
}
314
315
/* Timers */
316
static void iucv_sock_timeout(unsigned long arg)
317
{
318
struct sock *sk = (struct sock *)arg;
319
320
bh_lock_sock(sk);
321
sk->sk_err = ETIMEDOUT;
322
sk->sk_state_change(sk);
323
bh_unlock_sock(sk);
324
325
iucv_sock_kill(sk);
326
sock_put(sk);
327
}
328
329
static void iucv_sock_clear_timer(struct sock *sk)
330
{
331
sk_stop_timer(sk, &sk->sk_timer);
332
}
333
334
static struct sock *__iucv_get_sock_by_name(char *nm)
335
{
336
struct sock *sk;
337
struct hlist_node *node;
338
339
sk_for_each(sk, node, &iucv_sk_list.head)
340
if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
341
return sk;
342
343
return NULL;
344
}
345
346
static void iucv_sock_destruct(struct sock *sk)
347
{
348
skb_queue_purge(&sk->sk_receive_queue);
349
skb_queue_purge(&sk->sk_write_queue);
350
}
351
352
/* Cleanup Listen */
353
static void iucv_sock_cleanup_listen(struct sock *parent)
354
{
355
struct sock *sk;
356
357
/* Close non-accepted connections */
358
while ((sk = iucv_accept_dequeue(parent, NULL))) {
359
iucv_sock_close(sk);
360
iucv_sock_kill(sk);
361
}
362
363
parent->sk_state = IUCV_CLOSED;
364
}
365
366
/* Kill socket (only if zapped and orphaned) */
367
static void iucv_sock_kill(struct sock *sk)
368
{
369
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
370
return;
371
372
iucv_sock_unlink(&iucv_sk_list, sk);
373
sock_set_flag(sk, SOCK_DEAD);
374
sock_put(sk);
375
}
376
377
/* Close an IUCV socket */
378
static void iucv_sock_close(struct sock *sk)
379
{
380
unsigned char user_data[16];
381
struct iucv_sock *iucv = iucv_sk(sk);
382
unsigned long timeo;
383
384
iucv_sock_clear_timer(sk);
385
lock_sock(sk);
386
387
switch (sk->sk_state) {
388
case IUCV_LISTEN:
389
iucv_sock_cleanup_listen(sk);
390
break;
391
392
case IUCV_CONNECTED:
393
case IUCV_DISCONN:
394
sk->sk_state = IUCV_CLOSING;
395
sk->sk_state_change(sk);
396
397
if (!skb_queue_empty(&iucv->send_skb_q)) {
398
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
399
timeo = sk->sk_lingertime;
400
else
401
timeo = IUCV_DISCONN_TIMEOUT;
402
iucv_sock_wait(sk,
403
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
404
timeo);
405
}
406
407
case IUCV_CLOSING: /* fall through */
408
sk->sk_state = IUCV_CLOSED;
409
sk->sk_state_change(sk);
410
411
if (iucv->path) {
412
low_nmcpy(user_data, iucv->src_name);
413
high_nmcpy(user_data, iucv->dst_name);
414
ASCEBC(user_data, sizeof(user_data));
415
iucv_path_sever(iucv->path, user_data);
416
iucv_path_free(iucv->path);
417
iucv->path = NULL;
418
}
419
420
sk->sk_err = ECONNRESET;
421
sk->sk_state_change(sk);
422
423
skb_queue_purge(&iucv->send_skb_q);
424
skb_queue_purge(&iucv->backlog_skb_q);
425
break;
426
427
default:
428
/* nothing to do here */
429
break;
430
}
431
432
/* mark socket for deletion by iucv_sock_kill() */
433
sock_set_flag(sk, SOCK_ZAPPED);
434
435
release_sock(sk);
436
}
437
438
static void iucv_sock_init(struct sock *sk, struct sock *parent)
439
{
440
if (parent)
441
sk->sk_type = parent->sk_type;
442
}
443
444
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
445
{
446
struct sock *sk;
447
448
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
449
if (!sk)
450
return NULL;
451
452
sock_init_data(sock, sk);
453
INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
454
spin_lock_init(&iucv_sk(sk)->accept_q_lock);
455
skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
456
INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
457
spin_lock_init(&iucv_sk(sk)->message_q.lock);
458
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
459
iucv_sk(sk)->send_tag = 0;
460
iucv_sk(sk)->flags = 0;
461
iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
462
iucv_sk(sk)->path = NULL;
463
memset(&iucv_sk(sk)->src_user_id , 0, 32);
464
465
sk->sk_destruct = iucv_sock_destruct;
466
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
467
sk->sk_allocation = GFP_DMA;
468
469
sock_reset_flag(sk, SOCK_ZAPPED);
470
471
sk->sk_protocol = proto;
472
sk->sk_state = IUCV_OPEN;
473
474
setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
475
476
iucv_sock_link(&iucv_sk_list, sk);
477
return sk;
478
}
479
480
/* Create an IUCV socket */
481
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
482
int kern)
483
{
484
struct sock *sk;
485
486
if (protocol && protocol != PF_IUCV)
487
return -EPROTONOSUPPORT;
488
489
sock->state = SS_UNCONNECTED;
490
491
switch (sock->type) {
492
case SOCK_STREAM:
493
sock->ops = &iucv_sock_ops;
494
break;
495
case SOCK_SEQPACKET:
496
/* currently, proto ops can handle both sk types */
497
sock->ops = &iucv_sock_ops;
498
break;
499
default:
500
return -ESOCKTNOSUPPORT;
501
}
502
503
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
504
if (!sk)
505
return -ENOMEM;
506
507
iucv_sock_init(sk, NULL);
508
509
return 0;
510
}
511
512
void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
513
{
514
write_lock_bh(&l->lock);
515
sk_add_node(sk, &l->head);
516
write_unlock_bh(&l->lock);
517
}
518
519
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
520
{
521
write_lock_bh(&l->lock);
522
sk_del_node_init(sk);
523
write_unlock_bh(&l->lock);
524
}
525
526
void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
527
{
528
unsigned long flags;
529
struct iucv_sock *par = iucv_sk(parent);
530
531
sock_hold(sk);
532
spin_lock_irqsave(&par->accept_q_lock, flags);
533
list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
534
spin_unlock_irqrestore(&par->accept_q_lock, flags);
535
iucv_sk(sk)->parent = parent;
536
sk_acceptq_added(parent);
537
}
538
539
void iucv_accept_unlink(struct sock *sk)
540
{
541
unsigned long flags;
542
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
543
544
spin_lock_irqsave(&par->accept_q_lock, flags);
545
list_del_init(&iucv_sk(sk)->accept_q);
546
spin_unlock_irqrestore(&par->accept_q_lock, flags);
547
sk_acceptq_removed(iucv_sk(sk)->parent);
548
iucv_sk(sk)->parent = NULL;
549
sock_put(sk);
550
}
551
552
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
553
{
554
struct iucv_sock *isk, *n;
555
struct sock *sk;
556
557
list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
558
sk = (struct sock *) isk;
559
lock_sock(sk);
560
561
if (sk->sk_state == IUCV_CLOSED) {
562
iucv_accept_unlink(sk);
563
release_sock(sk);
564
continue;
565
}
566
567
if (sk->sk_state == IUCV_CONNECTED ||
568
sk->sk_state == IUCV_SEVERED ||
569
sk->sk_state == IUCV_DISCONN || /* due to PM restore */
570
!newsock) {
571
iucv_accept_unlink(sk);
572
if (newsock)
573
sock_graft(sk, newsock);
574
575
if (sk->sk_state == IUCV_SEVERED)
576
sk->sk_state = IUCV_DISCONN;
577
578
release_sock(sk);
579
return sk;
580
}
581
582
release_sock(sk);
583
}
584
return NULL;
585
}
586
587
/* Bind an unbound socket */
588
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
589
int addr_len)
590
{
591
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
592
struct sock *sk = sock->sk;
593
struct iucv_sock *iucv;
594
int err;
595
596
/* Verify the input sockaddr */
597
if (!addr || addr->sa_family != AF_IUCV)
598
return -EINVAL;
599
600
lock_sock(sk);
601
if (sk->sk_state != IUCV_OPEN) {
602
err = -EBADFD;
603
goto done;
604
}
605
606
write_lock_bh(&iucv_sk_list.lock);
607
608
iucv = iucv_sk(sk);
609
if (__iucv_get_sock_by_name(sa->siucv_name)) {
610
err = -EADDRINUSE;
611
goto done_unlock;
612
}
613
if (iucv->path) {
614
err = 0;
615
goto done_unlock;
616
}
617
618
/* Bind the socket */
619
memcpy(iucv->src_name, sa->siucv_name, 8);
620
621
/* Copy the user id */
622
memcpy(iucv->src_user_id, iucv_userid, 8);
623
sk->sk_state = IUCV_BOUND;
624
err = 0;
625
626
done_unlock:
627
/* Release the socket list lock */
628
write_unlock_bh(&iucv_sk_list.lock);
629
done:
630
release_sock(sk);
631
return err;
632
}
633
634
/* Automatically bind an unbound socket */
635
static int iucv_sock_autobind(struct sock *sk)
636
{
637
struct iucv_sock *iucv = iucv_sk(sk);
638
char query_buffer[80];
639
char name[12];
640
int err = 0;
641
642
/* Set the userid and name */
643
cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
644
if (unlikely(err))
645
return -EPROTO;
646
647
memcpy(iucv->src_user_id, query_buffer, 8);
648
649
write_lock_bh(&iucv_sk_list.lock);
650
651
sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
652
while (__iucv_get_sock_by_name(name)) {
653
sprintf(name, "%08x",
654
atomic_inc_return(&iucv_sk_list.autobind_name));
655
}
656
657
write_unlock_bh(&iucv_sk_list.lock);
658
659
memcpy(&iucv->src_name, name, 8);
660
661
return err;
662
}
663
664
/* Connect an unconnected socket */
665
static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
666
int alen, int flags)
667
{
668
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
669
struct sock *sk = sock->sk;
670
struct iucv_sock *iucv;
671
unsigned char user_data[16];
672
int err;
673
674
if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
675
return -EINVAL;
676
677
if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
678
return -EBADFD;
679
680
if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
681
return -EINVAL;
682
683
if (sk->sk_state == IUCV_OPEN) {
684
err = iucv_sock_autobind(sk);
685
if (unlikely(err))
686
return err;
687
}
688
689
lock_sock(sk);
690
691
/* Set the destination information */
692
memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
693
memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
694
695
high_nmcpy(user_data, sa->siucv_name);
696
low_nmcpy(user_data, iucv_sk(sk)->src_name);
697
ASCEBC(user_data, sizeof(user_data));
698
699
iucv = iucv_sk(sk);
700
/* Create path. */
701
iucv->path = iucv_path_alloc(iucv->msglimit,
702
IUCV_IPRMDATA, GFP_KERNEL);
703
if (!iucv->path) {
704
err = -ENOMEM;
705
goto done;
706
}
707
err = iucv_path_connect(iucv->path, &af_iucv_handler,
708
sa->siucv_user_id, NULL, user_data, sk);
709
if (err) {
710
iucv_path_free(iucv->path);
711
iucv->path = NULL;
712
switch (err) {
713
case 0x0b: /* Target communicator is not logged on */
714
err = -ENETUNREACH;
715
break;
716
case 0x0d: /* Max connections for this guest exceeded */
717
case 0x0e: /* Max connections for target guest exceeded */
718
err = -EAGAIN;
719
break;
720
case 0x0f: /* Missing IUCV authorization */
721
err = -EACCES;
722
break;
723
default:
724
err = -ECONNREFUSED;
725
break;
726
}
727
goto done;
728
}
729
730
if (sk->sk_state != IUCV_CONNECTED) {
731
err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
732
IUCV_DISCONN),
733
sock_sndtimeo(sk, flags & O_NONBLOCK));
734
}
735
736
if (sk->sk_state == IUCV_DISCONN) {
737
err = -ECONNREFUSED;
738
}
739
740
if (err) {
741
iucv_path_sever(iucv->path, NULL);
742
iucv_path_free(iucv->path);
743
iucv->path = NULL;
744
}
745
746
done:
747
release_sock(sk);
748
return err;
749
}
750
751
/* Move a socket into listening state. */
752
static int iucv_sock_listen(struct socket *sock, int backlog)
753
{
754
struct sock *sk = sock->sk;
755
int err;
756
757
lock_sock(sk);
758
759
err = -EINVAL;
760
if (sk->sk_state != IUCV_BOUND)
761
goto done;
762
763
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
764
goto done;
765
766
sk->sk_max_ack_backlog = backlog;
767
sk->sk_ack_backlog = 0;
768
sk->sk_state = IUCV_LISTEN;
769
err = 0;
770
771
done:
772
release_sock(sk);
773
return err;
774
}
775
776
/* Accept a pending connection */
777
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
778
int flags)
779
{
780
DECLARE_WAITQUEUE(wait, current);
781
struct sock *sk = sock->sk, *nsk;
782
long timeo;
783
int err = 0;
784
785
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
786
787
if (sk->sk_state != IUCV_LISTEN) {
788
err = -EBADFD;
789
goto done;
790
}
791
792
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
793
794
/* Wait for an incoming connection */
795
add_wait_queue_exclusive(sk_sleep(sk), &wait);
796
while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
797
set_current_state(TASK_INTERRUPTIBLE);
798
if (!timeo) {
799
err = -EAGAIN;
800
break;
801
}
802
803
release_sock(sk);
804
timeo = schedule_timeout(timeo);
805
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
806
807
if (sk->sk_state != IUCV_LISTEN) {
808
err = -EBADFD;
809
break;
810
}
811
812
if (signal_pending(current)) {
813
err = sock_intr_errno(timeo);
814
break;
815
}
816
}
817
818
set_current_state(TASK_RUNNING);
819
remove_wait_queue(sk_sleep(sk), &wait);
820
821
if (err)
822
goto done;
823
824
newsock->state = SS_CONNECTED;
825
826
done:
827
release_sock(sk);
828
return err;
829
}
830
831
static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
832
int *len, int peer)
833
{
834
struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
835
struct sock *sk = sock->sk;
836
837
addr->sa_family = AF_IUCV;
838
*len = sizeof(struct sockaddr_iucv);
839
840
if (peer) {
841
memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
842
memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
843
} else {
844
memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
845
memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
846
}
847
memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
848
memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
849
memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
850
851
return 0;
852
}
853
854
/**
855
* iucv_send_iprm() - Send socket data in parameter list of an iucv message.
856
* @path: IUCV path
857
* @msg: Pointer to a struct iucv_message
858
* @skb: The socket data to send, skb->len MUST BE <= 7
859
*
860
* Send the socket data in the parameter list in the iucv message
861
* (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
862
* list and the socket data len at index 7 (last byte).
863
* See also iucv_msg_length().
864
*
865
* Returns the error code from the iucv_message_send() call.
866
*/
867
static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
868
struct sk_buff *skb)
869
{
870
u8 prmdata[8];
871
872
memcpy(prmdata, (void *) skb->data, skb->len);
873
prmdata[7] = 0xff - (u8) skb->len;
874
return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
875
(void *) prmdata, 8);
876
}
877
878
static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
879
struct msghdr *msg, size_t len)
880
{
881
struct sock *sk = sock->sk;
882
struct iucv_sock *iucv = iucv_sk(sk);
883
struct sk_buff *skb;
884
struct iucv_message txmsg;
885
struct cmsghdr *cmsg;
886
int cmsg_done;
887
long timeo;
888
char user_id[9];
889
char appl_id[9];
890
int err;
891
int noblock = msg->msg_flags & MSG_DONTWAIT;
892
893
err = sock_error(sk);
894
if (err)
895
return err;
896
897
if (msg->msg_flags & MSG_OOB)
898
return -EOPNOTSUPP;
899
900
/* SOCK_SEQPACKET: we do not support segmented records */
901
if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
902
return -EOPNOTSUPP;
903
904
lock_sock(sk);
905
906
if (sk->sk_shutdown & SEND_SHUTDOWN) {
907
err = -EPIPE;
908
goto out;
909
}
910
911
/* Return if the socket is not in connected state */
912
if (sk->sk_state != IUCV_CONNECTED) {
913
err = -ENOTCONN;
914
goto out;
915
}
916
917
/* initialize defaults */
918
cmsg_done = 0; /* check for duplicate headers */
919
txmsg.class = 0;
920
921
/* iterate over control messages */
922
for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
923
cmsg = CMSG_NXTHDR(msg, cmsg)) {
924
925
if (!CMSG_OK(msg, cmsg)) {
926
err = -EINVAL;
927
goto out;
928
}
929
930
if (cmsg->cmsg_level != SOL_IUCV)
931
continue;
932
933
if (cmsg->cmsg_type & cmsg_done) {
934
err = -EINVAL;
935
goto out;
936
}
937
cmsg_done |= cmsg->cmsg_type;
938
939
switch (cmsg->cmsg_type) {
940
case SCM_IUCV_TRGCLS:
941
if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
942
err = -EINVAL;
943
goto out;
944
}
945
946
/* set iucv message target class */
947
memcpy(&txmsg.class,
948
(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
949
950
break;
951
952
default:
953
err = -EINVAL;
954
goto out;
955
break;
956
}
957
}
958
959
/* allocate one skb for each iucv message:
960
* this is fine for SOCK_SEQPACKET (unless we want to support
961
* segmented records using the MSG_EOR flag), but
962
* for SOCK_STREAM we might want to improve it in future */
963
skb = sock_alloc_send_skb(sk, len, noblock, &err);
964
if (!skb)
965
goto out;
966
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
967
err = -EFAULT;
968
goto fail;
969
}
970
971
/* wait if outstanding messages for iucv path has reached */
972
timeo = sock_sndtimeo(sk, noblock);
973
err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
974
if (err)
975
goto fail;
976
977
/* return -ECONNRESET if the socket is no longer connected */
978
if (sk->sk_state != IUCV_CONNECTED) {
979
err = -ECONNRESET;
980
goto fail;
981
}
982
983
/* increment and save iucv message tag for msg_completion cbk */
984
txmsg.tag = iucv->send_tag++;
985
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
986
skb_queue_tail(&iucv->send_skb_q, skb);
987
988
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
989
&& skb->len <= 7) {
990
err = iucv_send_iprm(iucv->path, &txmsg, skb);
991
992
/* on success: there is no message_complete callback
993
* for an IPRMDATA msg; remove skb from send queue */
994
if (err == 0) {
995
skb_unlink(skb, &iucv->send_skb_q);
996
kfree_skb(skb);
997
}
998
999
/* this error should never happen since the
1000
* IUCV_IPRMDATA path flag is set... sever path */
1001
if (err == 0x15) {
1002
iucv_path_sever(iucv->path, NULL);
1003
skb_unlink(skb, &iucv->send_skb_q);
1004
err = -EPIPE;
1005
goto fail;
1006
}
1007
} else
1008
err = iucv_message_send(iucv->path, &txmsg, 0, 0,
1009
(void *) skb->data, skb->len);
1010
if (err) {
1011
if (err == 3) {
1012
user_id[8] = 0;
1013
memcpy(user_id, iucv->dst_user_id, 8);
1014
appl_id[8] = 0;
1015
memcpy(appl_id, iucv->dst_name, 8);
1016
pr_err("Application %s on z/VM guest %s"
1017
" exceeds message limit\n",
1018
appl_id, user_id);
1019
err = -EAGAIN;
1020
} else
1021
err = -EPIPE;
1022
skb_unlink(skb, &iucv->send_skb_q);
1023
goto fail;
1024
}
1025
1026
release_sock(sk);
1027
return len;
1028
1029
fail:
1030
kfree_skb(skb);
1031
out:
1032
release_sock(sk);
1033
return err;
1034
}
1035
1036
/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1037
*
1038
* Locking: must be called with message_q.lock held
1039
*/
1040
static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1041
{
1042
int dataleft, size, copied = 0;
1043
struct sk_buff *nskb;
1044
1045
dataleft = len;
1046
while (dataleft) {
1047
if (dataleft >= sk->sk_rcvbuf / 4)
1048
size = sk->sk_rcvbuf / 4;
1049
else
1050
size = dataleft;
1051
1052
nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1053
if (!nskb)
1054
return -ENOMEM;
1055
1056
/* copy target class to control buffer of new skb */
1057
memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1058
1059
/* copy data fragment */
1060
memcpy(nskb->data, skb->data + copied, size);
1061
copied += size;
1062
dataleft -= size;
1063
1064
skb_reset_transport_header(nskb);
1065
skb_reset_network_header(nskb);
1066
nskb->len = size;
1067
1068
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1069
}
1070
1071
return 0;
1072
}
1073
1074
/* iucv_process_message() - Receive a single outstanding IUCV message
1075
*
1076
* Locking: must be called with message_q.lock held
1077
*/
1078
static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1079
struct iucv_path *path,
1080
struct iucv_message *msg)
1081
{
1082
int rc;
1083
unsigned int len;
1084
1085
len = iucv_msg_length(msg);
1086
1087
/* store msg target class in the second 4 bytes of skb ctrl buffer */
1088
/* Note: the first 4 bytes are reserved for msg tag */
1089
memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1090
1091
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1092
if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1093
if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1094
skb->data = NULL;
1095
skb->len = 0;
1096
}
1097
} else {
1098
rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
1099
skb->data, len, NULL);
1100
if (rc) {
1101
kfree_skb(skb);
1102
return;
1103
}
1104
/* we need to fragment iucv messages for SOCK_STREAM only;
1105
* for SOCK_SEQPACKET, it is only relevant if we support
1106
* record segmentation using MSG_EOR (see also recvmsg()) */
1107
if (sk->sk_type == SOCK_STREAM &&
1108
skb->truesize >= sk->sk_rcvbuf / 4) {
1109
rc = iucv_fragment_skb(sk, skb, len);
1110
kfree_skb(skb);
1111
skb = NULL;
1112
if (rc) {
1113
iucv_path_sever(path, NULL);
1114
return;
1115
}
1116
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1117
} else {
1118
skb_reset_transport_header(skb);
1119
skb_reset_network_header(skb);
1120
skb->len = len;
1121
}
1122
}
1123
1124
if (sock_queue_rcv_skb(sk, skb))
1125
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1126
}
1127
1128
/* iucv_process_message_q() - Process outstanding IUCV messages
1129
*
1130
* Locking: must be called with message_q.lock held
1131
*/
1132
static void iucv_process_message_q(struct sock *sk)
1133
{
1134
struct iucv_sock *iucv = iucv_sk(sk);
1135
struct sk_buff *skb;
1136
struct sock_msg_q *p, *n;
1137
1138
list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1139
skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1140
if (!skb)
1141
break;
1142
iucv_process_message(sk, skb, p->path, &p->msg);
1143
list_del(&p->list);
1144
kfree(p);
1145
if (!skb_queue_empty(&iucv->backlog_skb_q))
1146
break;
1147
}
1148
}
1149
1150
static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1151
struct msghdr *msg, size_t len, int flags)
1152
{
1153
int noblock = flags & MSG_DONTWAIT;
1154
struct sock *sk = sock->sk;
1155
struct iucv_sock *iucv = iucv_sk(sk);
1156
unsigned int copied, rlen;
1157
struct sk_buff *skb, *rskb, *cskb;
1158
int err = 0;
1159
1160
if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
1161
skb_queue_empty(&iucv->backlog_skb_q) &&
1162
skb_queue_empty(&sk->sk_receive_queue) &&
1163
list_empty(&iucv->message_q.list))
1164
return 0;
1165
1166
if (flags & (MSG_OOB))
1167
return -EOPNOTSUPP;
1168
1169
/* receive/dequeue next skb:
1170
* the function understands MSG_PEEK and, thus, does not dequeue skb */
1171
skb = skb_recv_datagram(sk, flags, noblock, &err);
1172
if (!skb) {
1173
if (sk->sk_shutdown & RCV_SHUTDOWN)
1174
return 0;
1175
return err;
1176
}
1177
1178
rlen = skb->len; /* real length of skb */
1179
copied = min_t(unsigned int, rlen, len);
1180
1181
cskb = skb;
1182
if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
1183
if (!(flags & MSG_PEEK))
1184
skb_queue_head(&sk->sk_receive_queue, skb);
1185
return -EFAULT;
1186
}
1187
1188
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1189
if (sk->sk_type == SOCK_SEQPACKET) {
1190
if (copied < rlen)
1191
msg->msg_flags |= MSG_TRUNC;
1192
/* each iucv message contains a complete record */
1193
msg->msg_flags |= MSG_EOR;
1194
}
1195
1196
/* create control message to store iucv msg target class:
1197
* get the trgcls from the control buffer of the skb due to
1198
* fragmentation of original iucv message. */
1199
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1200
CB_TRGCLS_LEN, CB_TRGCLS(skb));
1201
if (err) {
1202
if (!(flags & MSG_PEEK))
1203
skb_queue_head(&sk->sk_receive_queue, skb);
1204
return err;
1205
}
1206
1207
/* Mark read part of skb as used */
1208
if (!(flags & MSG_PEEK)) {
1209
1210
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
1211
if (sk->sk_type == SOCK_STREAM) {
1212
skb_pull(skb, copied);
1213
if (skb->len) {
1214
skb_queue_head(&sk->sk_receive_queue, skb);
1215
goto done;
1216
}
1217
}
1218
1219
kfree_skb(skb);
1220
1221
/* Queue backlog skbs */
1222
spin_lock_bh(&iucv->message_q.lock);
1223
rskb = skb_dequeue(&iucv->backlog_skb_q);
1224
while (rskb) {
1225
if (sock_queue_rcv_skb(sk, rskb)) {
1226
skb_queue_head(&iucv->backlog_skb_q,
1227
rskb);
1228
break;
1229
} else {
1230
rskb = skb_dequeue(&iucv->backlog_skb_q);
1231
}
1232
}
1233
if (skb_queue_empty(&iucv->backlog_skb_q)) {
1234
if (!list_empty(&iucv->message_q.list))
1235
iucv_process_message_q(sk);
1236
}
1237
spin_unlock_bh(&iucv->message_q.lock);
1238
}
1239
1240
done:
1241
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1242
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1243
copied = rlen;
1244
1245
return copied;
1246
}
1247
1248
static inline unsigned int iucv_accept_poll(struct sock *parent)
1249
{
1250
struct iucv_sock *isk, *n;
1251
struct sock *sk;
1252
1253
list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1254
sk = (struct sock *) isk;
1255
1256
if (sk->sk_state == IUCV_CONNECTED)
1257
return POLLIN | POLLRDNORM;
1258
}
1259
1260
return 0;
1261
}
1262
1263
unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1264
poll_table *wait)
1265
{
1266
struct sock *sk = sock->sk;
1267
unsigned int mask = 0;
1268
1269
sock_poll_wait(file, sk_sleep(sk), wait);
1270
1271
if (sk->sk_state == IUCV_LISTEN)
1272
return iucv_accept_poll(sk);
1273
1274
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1275
mask |= POLLERR;
1276
1277
if (sk->sk_shutdown & RCV_SHUTDOWN)
1278
mask |= POLLRDHUP;
1279
1280
if (sk->sk_shutdown == SHUTDOWN_MASK)
1281
mask |= POLLHUP;
1282
1283
if (!skb_queue_empty(&sk->sk_receive_queue) ||
1284
(sk->sk_shutdown & RCV_SHUTDOWN))
1285
mask |= POLLIN | POLLRDNORM;
1286
1287
if (sk->sk_state == IUCV_CLOSED)
1288
mask |= POLLHUP;
1289
1290
if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1291
mask |= POLLIN;
1292
1293
if (sock_writeable(sk))
1294
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1295
else
1296
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1297
1298
return mask;
1299
}
1300
1301
static int iucv_sock_shutdown(struct socket *sock, int how)
1302
{
1303
struct sock *sk = sock->sk;
1304
struct iucv_sock *iucv = iucv_sk(sk);
1305
struct iucv_message txmsg;
1306
int err = 0;
1307
1308
how++;
1309
1310
if ((how & ~SHUTDOWN_MASK) || !how)
1311
return -EINVAL;
1312
1313
lock_sock(sk);
1314
switch (sk->sk_state) {
1315
case IUCV_DISCONN:
1316
case IUCV_CLOSING:
1317
case IUCV_SEVERED:
1318
case IUCV_CLOSED:
1319
err = -ENOTCONN;
1320
goto fail;
1321
1322
default:
1323
sk->sk_shutdown |= how;
1324
break;
1325
}
1326
1327
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1328
txmsg.class = 0;
1329
txmsg.tag = 0;
1330
err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1331
(void *) iprm_shutdown, 8);
1332
if (err) {
1333
switch (err) {
1334
case 1:
1335
err = -ENOTCONN;
1336
break;
1337
case 2:
1338
err = -ECONNRESET;
1339
break;
1340
default:
1341
err = -ENOTCONN;
1342
break;
1343
}
1344
}
1345
}
1346
1347
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1348
err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1349
if (err)
1350
err = -ENOTCONN;
1351
1352
skb_queue_purge(&sk->sk_receive_queue);
1353
}
1354
1355
/* Wake up anyone sleeping in poll */
1356
sk->sk_state_change(sk);
1357
1358
fail:
1359
release_sock(sk);
1360
return err;
1361
}
1362
1363
static int iucv_sock_release(struct socket *sock)
1364
{
1365
struct sock *sk = sock->sk;
1366
int err = 0;
1367
1368
if (!sk)
1369
return 0;
1370
1371
iucv_sock_close(sk);
1372
1373
/* Unregister with IUCV base support */
1374
if (iucv_sk(sk)->path) {
1375
iucv_path_sever(iucv_sk(sk)->path, NULL);
1376
iucv_path_free(iucv_sk(sk)->path);
1377
iucv_sk(sk)->path = NULL;
1378
}
1379
1380
sock_orphan(sk);
1381
iucv_sock_kill(sk);
1382
return err;
1383
}
1384
1385
/* getsockopt and setsockopt */
1386
static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1387
char __user *optval, unsigned int optlen)
1388
{
1389
struct sock *sk = sock->sk;
1390
struct iucv_sock *iucv = iucv_sk(sk);
1391
int val;
1392
int rc;
1393
1394
if (level != SOL_IUCV)
1395
return -ENOPROTOOPT;
1396
1397
if (optlen < sizeof(int))
1398
return -EINVAL;
1399
1400
if (get_user(val, (int __user *) optval))
1401
return -EFAULT;
1402
1403
rc = 0;
1404
1405
lock_sock(sk);
1406
switch (optname) {
1407
case SO_IPRMDATA_MSG:
1408
if (val)
1409
iucv->flags |= IUCV_IPRMDATA;
1410
else
1411
iucv->flags &= ~IUCV_IPRMDATA;
1412
break;
1413
case SO_MSGLIMIT:
1414
switch (sk->sk_state) {
1415
case IUCV_OPEN:
1416
case IUCV_BOUND:
1417
if (val < 1 || val > (u16)(~0))
1418
rc = -EINVAL;
1419
else
1420
iucv->msglimit = val;
1421
break;
1422
default:
1423
rc = -EINVAL;
1424
break;
1425
}
1426
break;
1427
default:
1428
rc = -ENOPROTOOPT;
1429
break;
1430
}
1431
release_sock(sk);
1432
1433
return rc;
1434
}
1435
1436
static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1437
char __user *optval, int __user *optlen)
1438
{
1439
struct sock *sk = sock->sk;
1440
struct iucv_sock *iucv = iucv_sk(sk);
1441
int val, len;
1442
1443
if (level != SOL_IUCV)
1444
return -ENOPROTOOPT;
1445
1446
if (get_user(len, optlen))
1447
return -EFAULT;
1448
1449
if (len < 0)
1450
return -EINVAL;
1451
1452
len = min_t(unsigned int, len, sizeof(int));
1453
1454
switch (optname) {
1455
case SO_IPRMDATA_MSG:
1456
val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1457
break;
1458
case SO_MSGLIMIT:
1459
lock_sock(sk);
1460
val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1461
: iucv->msglimit; /* default */
1462
release_sock(sk);
1463
break;
1464
default:
1465
return -ENOPROTOOPT;
1466
}
1467
1468
if (put_user(len, optlen))
1469
return -EFAULT;
1470
if (copy_to_user(optval, &val, len))
1471
return -EFAULT;
1472
1473
return 0;
1474
}
1475
1476
1477
/* Callback wrappers - called from iucv base support */
1478
static int iucv_callback_connreq(struct iucv_path *path,
1479
u8 ipvmid[8], u8 ipuser[16])
1480
{
1481
unsigned char user_data[16];
1482
unsigned char nuser_data[16];
1483
unsigned char src_name[8];
1484
struct hlist_node *node;
1485
struct sock *sk, *nsk;
1486
struct iucv_sock *iucv, *niucv;
1487
int err;
1488
1489
memcpy(src_name, ipuser, 8);
1490
EBCASC(src_name, 8);
1491
/* Find out if this path belongs to af_iucv. */
1492
read_lock(&iucv_sk_list.lock);
1493
iucv = NULL;
1494
sk = NULL;
1495
sk_for_each(sk, node, &iucv_sk_list.head)
1496
if (sk->sk_state == IUCV_LISTEN &&
1497
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1498
/*
1499
* Found a listening socket with
1500
* src_name == ipuser[0-7].
1501
*/
1502
iucv = iucv_sk(sk);
1503
break;
1504
}
1505
read_unlock(&iucv_sk_list.lock);
1506
if (!iucv)
1507
/* No socket found, not one of our paths. */
1508
return -EINVAL;
1509
1510
bh_lock_sock(sk);
1511
1512
/* Check if parent socket is listening */
1513
low_nmcpy(user_data, iucv->src_name);
1514
high_nmcpy(user_data, iucv->dst_name);
1515
ASCEBC(user_data, sizeof(user_data));
1516
if (sk->sk_state != IUCV_LISTEN) {
1517
err = iucv_path_sever(path, user_data);
1518
iucv_path_free(path);
1519
goto fail;
1520
}
1521
1522
/* Check for backlog size */
1523
if (sk_acceptq_is_full(sk)) {
1524
err = iucv_path_sever(path, user_data);
1525
iucv_path_free(path);
1526
goto fail;
1527
}
1528
1529
/* Create the new socket */
1530
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1531
if (!nsk) {
1532
err = iucv_path_sever(path, user_data);
1533
iucv_path_free(path);
1534
goto fail;
1535
}
1536
1537
niucv = iucv_sk(nsk);
1538
iucv_sock_init(nsk, sk);
1539
1540
/* Set the new iucv_sock */
1541
memcpy(niucv->dst_name, ipuser + 8, 8);
1542
EBCASC(niucv->dst_name, 8);
1543
memcpy(niucv->dst_user_id, ipvmid, 8);
1544
memcpy(niucv->src_name, iucv->src_name, 8);
1545
memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1546
niucv->path = path;
1547
1548
/* Call iucv_accept */
1549
high_nmcpy(nuser_data, ipuser + 8);
1550
memcpy(nuser_data + 8, niucv->src_name, 8);
1551
ASCEBC(nuser_data + 8, 8);
1552
1553
/* set message limit for path based on msglimit of accepting socket */
1554
niucv->msglimit = iucv->msglimit;
1555
path->msglim = iucv->msglimit;
1556
err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1557
if (err) {
1558
err = iucv_path_sever(path, user_data);
1559
iucv_path_free(path);
1560
iucv_sock_kill(nsk);
1561
goto fail;
1562
}
1563
1564
iucv_accept_enqueue(sk, nsk);
1565
1566
/* Wake up accept */
1567
nsk->sk_state = IUCV_CONNECTED;
1568
sk->sk_data_ready(sk, 1);
1569
err = 0;
1570
fail:
1571
bh_unlock_sock(sk);
1572
return 0;
1573
}
1574
1575
static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1576
{
1577
struct sock *sk = path->private;
1578
1579
sk->sk_state = IUCV_CONNECTED;
1580
sk->sk_state_change(sk);
1581
}
1582
1583
static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1584
{
1585
struct sock *sk = path->private;
1586
struct iucv_sock *iucv = iucv_sk(sk);
1587
struct sk_buff *skb;
1588
struct sock_msg_q *save_msg;
1589
int len;
1590
1591
if (sk->sk_shutdown & RCV_SHUTDOWN) {
1592
iucv_message_reject(path, msg);
1593
return;
1594
}
1595
1596
spin_lock(&iucv->message_q.lock);
1597
1598
if (!list_empty(&iucv->message_q.list) ||
1599
!skb_queue_empty(&iucv->backlog_skb_q))
1600
goto save_message;
1601
1602
len = atomic_read(&sk->sk_rmem_alloc);
1603
len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1604
if (len > sk->sk_rcvbuf)
1605
goto save_message;
1606
1607
skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1608
if (!skb)
1609
goto save_message;
1610
1611
iucv_process_message(sk, skb, path, msg);
1612
goto out_unlock;
1613
1614
save_message:
1615
save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1616
if (!save_msg)
1617
goto out_unlock;
1618
save_msg->path = path;
1619
save_msg->msg = *msg;
1620
1621
list_add_tail(&save_msg->list, &iucv->message_q.list);
1622
1623
out_unlock:
1624
spin_unlock(&iucv->message_q.lock);
1625
}
1626
1627
static void iucv_callback_txdone(struct iucv_path *path,
1628
struct iucv_message *msg)
1629
{
1630
struct sock *sk = path->private;
1631
struct sk_buff *this = NULL;
1632
struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1633
struct sk_buff *list_skb = list->next;
1634
unsigned long flags;
1635
1636
if (!skb_queue_empty(list)) {
1637
spin_lock_irqsave(&list->lock, flags);
1638
1639
while (list_skb != (struct sk_buff *)list) {
1640
if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1641
this = list_skb;
1642
break;
1643
}
1644
list_skb = list_skb->next;
1645
}
1646
if (this)
1647
__skb_unlink(this, list);
1648
1649
spin_unlock_irqrestore(&list->lock, flags);
1650
1651
if (this) {
1652
kfree_skb(this);
1653
/* wake up any process waiting for sending */
1654
iucv_sock_wake_msglim(sk);
1655
}
1656
}
1657
BUG_ON(!this);
1658
1659
if (sk->sk_state == IUCV_CLOSING) {
1660
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1661
sk->sk_state = IUCV_CLOSED;
1662
sk->sk_state_change(sk);
1663
}
1664
}
1665
1666
}
1667
1668
static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1669
{
1670
struct sock *sk = path->private;
1671
1672
if (!list_empty(&iucv_sk(sk)->accept_q))
1673
sk->sk_state = IUCV_SEVERED;
1674
else
1675
sk->sk_state = IUCV_DISCONN;
1676
1677
sk->sk_state_change(sk);
1678
}
1679
1680
/* called if the other communication side shuts down its RECV direction;
1681
* in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1682
*/
1683
static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1684
{
1685
struct sock *sk = path->private;
1686
1687
bh_lock_sock(sk);
1688
if (sk->sk_state != IUCV_CLOSED) {
1689
sk->sk_shutdown |= SEND_SHUTDOWN;
1690
sk->sk_state_change(sk);
1691
}
1692
bh_unlock_sock(sk);
1693
}
1694
1695
static const struct proto_ops iucv_sock_ops = {
1696
.family = PF_IUCV,
1697
.owner = THIS_MODULE,
1698
.release = iucv_sock_release,
1699
.bind = iucv_sock_bind,
1700
.connect = iucv_sock_connect,
1701
.listen = iucv_sock_listen,
1702
.accept = iucv_sock_accept,
1703
.getname = iucv_sock_getname,
1704
.sendmsg = iucv_sock_sendmsg,
1705
.recvmsg = iucv_sock_recvmsg,
1706
.poll = iucv_sock_poll,
1707
.ioctl = sock_no_ioctl,
1708
.mmap = sock_no_mmap,
1709
.socketpair = sock_no_socketpair,
1710
.shutdown = iucv_sock_shutdown,
1711
.setsockopt = iucv_sock_setsockopt,
1712
.getsockopt = iucv_sock_getsockopt,
1713
};
1714
1715
static const struct net_proto_family iucv_sock_family_ops = {
1716
.family = AF_IUCV,
1717
.owner = THIS_MODULE,
1718
.create = iucv_sock_create,
1719
};
1720
1721
static int __init afiucv_init(void)
1722
{
1723
int err;
1724
1725
if (!MACHINE_IS_VM) {
1726
pr_err("The af_iucv module cannot be loaded"
1727
" without z/VM\n");
1728
err = -EPROTONOSUPPORT;
1729
goto out;
1730
}
1731
cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1732
if (unlikely(err)) {
1733
WARN_ON(err);
1734
err = -EPROTONOSUPPORT;
1735
goto out;
1736
}
1737
1738
err = iucv_register(&af_iucv_handler, 0);
1739
if (err)
1740
goto out;
1741
err = proto_register(&iucv_proto, 0);
1742
if (err)
1743
goto out_iucv;
1744
err = sock_register(&iucv_sock_family_ops);
1745
if (err)
1746
goto out_proto;
1747
/* establish dummy device */
1748
err = driver_register(&af_iucv_driver);
1749
if (err)
1750
goto out_sock;
1751
af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752
if (!af_iucv_dev) {
1753
err = -ENOMEM;
1754
goto out_driver;
1755
}
1756
dev_set_name(af_iucv_dev, "af_iucv");
1757
af_iucv_dev->bus = &iucv_bus;
1758
af_iucv_dev->parent = iucv_root;
1759
af_iucv_dev->release = (void (*)(struct device *))kfree;
1760
af_iucv_dev->driver = &af_iucv_driver;
1761
err = device_register(af_iucv_dev);
1762
if (err)
1763
goto out_driver;
1764
1765
return 0;
1766
1767
out_driver:
1768
driver_unregister(&af_iucv_driver);
1769
out_sock:
1770
sock_unregister(PF_IUCV);
1771
out_proto:
1772
proto_unregister(&iucv_proto);
1773
out_iucv:
1774
iucv_unregister(&af_iucv_handler, 0);
1775
out:
1776
return err;
1777
}
1778
1779
static void __exit afiucv_exit(void)
1780
{
1781
device_unregister(af_iucv_dev);
1782
driver_unregister(&af_iucv_driver);
1783
sock_unregister(PF_IUCV);
1784
proto_unregister(&iucv_proto);
1785
iucv_unregister(&af_iucv_handler, 0);
1786
}
1787
1788
module_init(afiucv_init);
1789
module_exit(afiucv_exit);
1790
1791
MODULE_AUTHOR("Jennifer Hunt <[email protected]>");
1792
MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1793
MODULE_VERSION(VERSION);
1794
MODULE_LICENSE("GPL");
1795
MODULE_ALIAS_NETPROTO(PF_IUCV);
1796
1797