Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/9p/trans_fd.c
15111 views
1
/*
2
* linux/fs/9p/trans_fd.c
3
*
4
* Fd transport layer. Includes deprecated socket layer.
5
*
6
* Copyright (C) 2006 by Russ Cox <[email protected]>
7
* Copyright (C) 2004-2005 by Latchesar Ionkov <[email protected]>
8
* Copyright (C) 2004-2008 by Eric Van Hensbergen <[email protected]>
9
* Copyright (C) 1997-2002 by Ron Minnich <[email protected]>
10
*
11
* This program is free software; you can redistribute it and/or modify
12
* it under the terms of the GNU General Public License version 2
13
* as published by the Free Software Foundation.
14
*
15
* This program is distributed in the hope that it will be useful,
16
* but WITHOUT ANY WARRANTY; without even the implied warranty of
17
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
* GNU General Public License for more details.
19
*
20
* You should have received a copy of the GNU General Public License
21
* along with this program; if not, write to:
22
* Free Software Foundation
23
* 51 Franklin Street, Fifth Floor
24
* Boston, MA 02111-1301 USA
25
*
26
*/
27
28
#include <linux/in.h>
29
#include <linux/module.h>
30
#include <linux/net.h>
31
#include <linux/ipv6.h>
32
#include <linux/kthread.h>
33
#include <linux/errno.h>
34
#include <linux/kernel.h>
35
#include <linux/un.h>
36
#include <linux/uaccess.h>
37
#include <linux/inet.h>
38
#include <linux/idr.h>
39
#include <linux/file.h>
40
#include <linux/parser.h>
41
#include <linux/slab.h>
42
#include <net/9p/9p.h>
43
#include <net/9p/client.h>
44
#include <net/9p/transport.h>
45
46
#include <linux/syscalls.h> /* killme */
47
48
#define P9_PORT 564
49
#define MAX_SOCK_BUF (64*1024)
50
#define MAXPOLLWADDR 2
51
52
/**
53
* struct p9_fd_opts - per-transport options
54
* @rfd: file descriptor for reading (trans=fd)
55
* @wfd: file descriptor for writing (trans=fd)
56
* @port: port to connect to (trans=tcp)
57
*
58
*/
59
60
struct p9_fd_opts {
61
int rfd;
62
int wfd;
63
u16 port;
64
};
65
66
/**
67
* struct p9_trans_fd - transport state
68
* @rd: reference to file to read from
69
* @wr: reference of file to write to
70
* @conn: connection state reference
71
*
72
*/
73
74
struct p9_trans_fd {
75
struct file *rd;
76
struct file *wr;
77
struct p9_conn *conn;
78
};
79
80
/*
81
* Option Parsing (code inspired by NFS code)
82
* - a little lazy - parse all fd-transport options
83
*/
84
85
enum {
86
/* Options that take integer arguments */
87
Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
88
};
89
90
static const match_table_t tokens = {
91
{Opt_port, "port=%u"},
92
{Opt_rfdno, "rfdno=%u"},
93
{Opt_wfdno, "wfdno=%u"},
94
{Opt_err, NULL},
95
};
96
97
enum {
98
Rworksched = 1, /* read work scheduled or running */
99
Rpending = 2, /* can read */
100
Wworksched = 4, /* write work scheduled or running */
101
Wpending = 8, /* can write */
102
};
103
104
struct p9_poll_wait {
105
struct p9_conn *conn;
106
wait_queue_t wait;
107
wait_queue_head_t *wait_addr;
108
};
109
110
/**
111
* struct p9_conn - fd mux connection state information
112
* @mux_list: list link for mux to manage multiple connections (?)
113
* @client: reference to client instance for this connection
114
* @err: error state
115
* @req_list: accounting for requests which have been sent
116
* @unsent_req_list: accounting for requests that haven't been sent
117
* @req: current request being processed (if any)
118
* @tmp_buf: temporary buffer to read in header
119
* @rsize: amount to read for current frame
120
* @rpos: read position in current frame
121
* @rbuf: current read buffer
122
* @wpos: write position for current frame
123
* @wsize: amount of data to write for current frame
124
* @wbuf: current write buffer
125
* @poll_pending_link: pending links to be polled per conn
126
* @poll_wait: array of wait_q's for various worker threads
127
* @pt: poll state
128
* @rq: current read work
129
* @wq: current write work
130
* @wsched: ????
131
*
132
*/
133
134
struct p9_conn {
135
struct list_head mux_list;
136
struct p9_client *client;
137
int err;
138
struct list_head req_list;
139
struct list_head unsent_req_list;
140
struct p9_req_t *req;
141
char tmp_buf[7];
142
int rsize;
143
int rpos;
144
char *rbuf;
145
int wpos;
146
int wsize;
147
char *wbuf;
148
struct list_head poll_pending_link;
149
struct p9_poll_wait poll_wait[MAXPOLLWADDR];
150
poll_table pt;
151
struct work_struct rq;
152
struct work_struct wq;
153
unsigned long wsched;
154
};
155
156
static void p9_poll_workfn(struct work_struct *work);
157
158
static DEFINE_SPINLOCK(p9_poll_lock);
159
static LIST_HEAD(p9_poll_pending_list);
160
static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
161
162
static void p9_mux_poll_stop(struct p9_conn *m)
163
{
164
unsigned long flags;
165
int i;
166
167
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
168
struct p9_poll_wait *pwait = &m->poll_wait[i];
169
170
if (pwait->wait_addr) {
171
remove_wait_queue(pwait->wait_addr, &pwait->wait);
172
pwait->wait_addr = NULL;
173
}
174
}
175
176
spin_lock_irqsave(&p9_poll_lock, flags);
177
list_del_init(&m->poll_pending_link);
178
spin_unlock_irqrestore(&p9_poll_lock, flags);
179
}
180
181
/**
182
* p9_conn_cancel - cancel all pending requests with error
183
* @m: mux data
184
* @err: error code
185
*
186
*/
187
188
static void p9_conn_cancel(struct p9_conn *m, int err)
189
{
190
struct p9_req_t *req, *rtmp;
191
unsigned long flags;
192
LIST_HEAD(cancel_list);
193
194
P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
195
196
spin_lock_irqsave(&m->client->lock, flags);
197
198
if (m->err) {
199
spin_unlock_irqrestore(&m->client->lock, flags);
200
return;
201
}
202
203
m->err = err;
204
205
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
206
req->status = REQ_STATUS_ERROR;
207
if (!req->t_err)
208
req->t_err = err;
209
list_move(&req->req_list, &cancel_list);
210
}
211
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
212
req->status = REQ_STATUS_ERROR;
213
if (!req->t_err)
214
req->t_err = err;
215
list_move(&req->req_list, &cancel_list);
216
}
217
spin_unlock_irqrestore(&m->client->lock, flags);
218
219
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
220
P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
221
list_del(&req->req_list);
222
p9_client_cb(m->client, req);
223
}
224
}
225
226
static int
227
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
228
{
229
int ret, n;
230
struct p9_trans_fd *ts = NULL;
231
232
if (client && client->status == Connected)
233
ts = client->trans;
234
235
if (!ts)
236
return -EREMOTEIO;
237
238
if (!ts->rd->f_op || !ts->rd->f_op->poll)
239
return -EIO;
240
241
if (!ts->wr->f_op || !ts->wr->f_op->poll)
242
return -EIO;
243
244
ret = ts->rd->f_op->poll(ts->rd, pt);
245
if (ret < 0)
246
return ret;
247
248
if (ts->rd != ts->wr) {
249
n = ts->wr->f_op->poll(ts->wr, pt);
250
if (n < 0)
251
return n;
252
ret = (ret & ~POLLOUT) | (n & ~POLLIN);
253
}
254
255
return ret;
256
}
257
258
/**
259
* p9_fd_read- read from a fd
260
* @client: client instance
261
* @v: buffer to receive data into
262
* @len: size of receive buffer
263
*
264
*/
265
266
static int p9_fd_read(struct p9_client *client, void *v, int len)
267
{
268
int ret;
269
struct p9_trans_fd *ts = NULL;
270
271
if (client && client->status != Disconnected)
272
ts = client->trans;
273
274
if (!ts)
275
return -EREMOTEIO;
276
277
if (!(ts->rd->f_flags & O_NONBLOCK))
278
P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
279
280
ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
281
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
282
client->status = Disconnected;
283
return ret;
284
}
285
286
/**
287
* p9_read_work - called when there is some data to be read from a transport
288
* @work: container of work to be done
289
*
290
*/
291
292
static void p9_read_work(struct work_struct *work)
293
{
294
int n, err;
295
struct p9_conn *m;
296
297
m = container_of(work, struct p9_conn, rq);
298
299
if (m->err < 0)
300
return;
301
302
P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
303
304
if (!m->rbuf) {
305
m->rbuf = m->tmp_buf;
306
m->rpos = 0;
307
m->rsize = 7; /* start by reading header */
308
}
309
310
clear_bit(Rpending, &m->wsched);
311
P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
312
m->rpos, m->rsize, m->rsize-m->rpos);
313
err = p9_fd_read(m->client, m->rbuf + m->rpos,
314
m->rsize - m->rpos);
315
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
316
if (err == -EAGAIN) {
317
clear_bit(Rworksched, &m->wsched);
318
return;
319
}
320
321
if (err <= 0)
322
goto error;
323
324
m->rpos += err;
325
326
if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
327
u16 tag;
328
P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
329
330
n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
331
if (n >= m->client->msize) {
332
P9_DPRINTK(P9_DEBUG_ERROR,
333
"requested packet size too big: %d\n", n);
334
err = -EIO;
335
goto error;
336
}
337
338
tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
339
P9_DPRINTK(P9_DEBUG_TRANS,
340
"mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
341
342
m->req = p9_tag_lookup(m->client, tag);
343
if (!m->req || (m->req->status != REQ_STATUS_SENT &&
344
m->req->status != REQ_STATUS_FLSH)) {
345
P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
346
tag);
347
err = -EIO;
348
goto error;
349
}
350
351
if (m->req->rc == NULL) {
352
m->req->rc = kmalloc(sizeof(struct p9_fcall) +
353
m->client->msize, GFP_NOFS);
354
if (!m->req->rc) {
355
m->req = NULL;
356
err = -ENOMEM;
357
goto error;
358
}
359
}
360
m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
361
memcpy(m->rbuf, m->tmp_buf, m->rsize);
362
m->rsize = n;
363
}
364
365
/* not an else because some packets (like clunk) have no payload */
366
if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
367
P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
368
spin_lock(&m->client->lock);
369
if (m->req->status != REQ_STATUS_ERROR)
370
m->req->status = REQ_STATUS_RCVD;
371
list_del(&m->req->req_list);
372
spin_unlock(&m->client->lock);
373
p9_client_cb(m->client, m->req);
374
m->rbuf = NULL;
375
m->rpos = 0;
376
m->rsize = 0;
377
m->req = NULL;
378
}
379
380
if (!list_empty(&m->req_list)) {
381
if (test_and_clear_bit(Rpending, &m->wsched))
382
n = POLLIN;
383
else
384
n = p9_fd_poll(m->client, NULL);
385
386
if (n & POLLIN) {
387
P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
388
schedule_work(&m->rq);
389
} else
390
clear_bit(Rworksched, &m->wsched);
391
} else
392
clear_bit(Rworksched, &m->wsched);
393
394
return;
395
error:
396
p9_conn_cancel(m, err);
397
clear_bit(Rworksched, &m->wsched);
398
}
399
400
/**
401
* p9_fd_write - write to a socket
402
* @client: client instance
403
* @v: buffer to send data from
404
* @len: size of send buffer
405
*
406
*/
407
408
static int p9_fd_write(struct p9_client *client, void *v, int len)
409
{
410
int ret;
411
mm_segment_t oldfs;
412
struct p9_trans_fd *ts = NULL;
413
414
if (client && client->status != Disconnected)
415
ts = client->trans;
416
417
if (!ts)
418
return -EREMOTEIO;
419
420
if (!(ts->wr->f_flags & O_NONBLOCK))
421
P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
422
423
oldfs = get_fs();
424
set_fs(get_ds());
425
/* The cast to a user pointer is valid due to the set_fs() */
426
ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
427
set_fs(oldfs);
428
429
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
430
client->status = Disconnected;
431
return ret;
432
}
433
434
/**
435
* p9_write_work - called when a transport can send some data
436
* @work: container for work to be done
437
*
438
*/
439
440
static void p9_write_work(struct work_struct *work)
441
{
442
int n, err;
443
struct p9_conn *m;
444
struct p9_req_t *req;
445
446
m = container_of(work, struct p9_conn, wq);
447
448
if (m->err < 0) {
449
clear_bit(Wworksched, &m->wsched);
450
return;
451
}
452
453
if (!m->wsize) {
454
if (list_empty(&m->unsent_req_list)) {
455
clear_bit(Wworksched, &m->wsched);
456
return;
457
}
458
459
spin_lock(&m->client->lock);
460
req = list_entry(m->unsent_req_list.next, struct p9_req_t,
461
req_list);
462
req->status = REQ_STATUS_SENT;
463
P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
464
list_move_tail(&req->req_list, &m->req_list);
465
466
m->wbuf = req->tc->sdata;
467
m->wsize = req->tc->size;
468
m->wpos = 0;
469
spin_unlock(&m->client->lock);
470
}
471
472
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
473
m->wsize);
474
clear_bit(Wpending, &m->wsched);
475
err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
476
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
477
if (err == -EAGAIN) {
478
clear_bit(Wworksched, &m->wsched);
479
return;
480
}
481
482
if (err < 0)
483
goto error;
484
else if (err == 0) {
485
err = -EREMOTEIO;
486
goto error;
487
}
488
489
m->wpos += err;
490
if (m->wpos == m->wsize)
491
m->wpos = m->wsize = 0;
492
493
if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
494
if (test_and_clear_bit(Wpending, &m->wsched))
495
n = POLLOUT;
496
else
497
n = p9_fd_poll(m->client, NULL);
498
499
if (n & POLLOUT) {
500
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
501
schedule_work(&m->wq);
502
} else
503
clear_bit(Wworksched, &m->wsched);
504
} else
505
clear_bit(Wworksched, &m->wsched);
506
507
return;
508
509
error:
510
p9_conn_cancel(m, err);
511
clear_bit(Wworksched, &m->wsched);
512
}
513
514
static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
515
{
516
struct p9_poll_wait *pwait =
517
container_of(wait, struct p9_poll_wait, wait);
518
struct p9_conn *m = pwait->conn;
519
unsigned long flags;
520
521
spin_lock_irqsave(&p9_poll_lock, flags);
522
if (list_empty(&m->poll_pending_link))
523
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
524
spin_unlock_irqrestore(&p9_poll_lock, flags);
525
526
schedule_work(&p9_poll_work);
527
return 1;
528
}
529
530
/**
531
* p9_pollwait - add poll task to the wait queue
532
* @filp: file pointer being polled
533
* @wait_address: wait_q to block on
534
* @p: poll state
535
*
536
* called by files poll operation to add v9fs-poll task to files wait queue
537
*/
538
539
static void
540
p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
541
{
542
struct p9_conn *m = container_of(p, struct p9_conn, pt);
543
struct p9_poll_wait *pwait = NULL;
544
int i;
545
546
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
547
if (m->poll_wait[i].wait_addr == NULL) {
548
pwait = &m->poll_wait[i];
549
break;
550
}
551
}
552
553
if (!pwait) {
554
P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
555
return;
556
}
557
558
pwait->conn = m;
559
pwait->wait_addr = wait_address;
560
init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
561
add_wait_queue(wait_address, &pwait->wait);
562
}
563
564
/**
565
* p9_conn_create - allocate and initialize the per-session mux data
566
* @client: client instance
567
*
568
* Note: Creates the polling task if this is the first session.
569
*/
570
571
static struct p9_conn *p9_conn_create(struct p9_client *client)
572
{
573
int n;
574
struct p9_conn *m;
575
576
P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
577
client->msize);
578
m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
579
if (!m)
580
return ERR_PTR(-ENOMEM);
581
582
INIT_LIST_HEAD(&m->mux_list);
583
m->client = client;
584
585
INIT_LIST_HEAD(&m->req_list);
586
INIT_LIST_HEAD(&m->unsent_req_list);
587
INIT_WORK(&m->rq, p9_read_work);
588
INIT_WORK(&m->wq, p9_write_work);
589
INIT_LIST_HEAD(&m->poll_pending_link);
590
init_poll_funcptr(&m->pt, p9_pollwait);
591
592
n = p9_fd_poll(client, &m->pt);
593
if (n & POLLIN) {
594
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
595
set_bit(Rpending, &m->wsched);
596
}
597
598
if (n & POLLOUT) {
599
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
600
set_bit(Wpending, &m->wsched);
601
}
602
603
return m;
604
}
605
606
/**
607
* p9_poll_mux - polls a mux and schedules read or write works if necessary
608
* @m: connection to poll
609
*
610
*/
611
612
static void p9_poll_mux(struct p9_conn *m)
613
{
614
int n;
615
616
if (m->err < 0)
617
return;
618
619
n = p9_fd_poll(m->client, NULL);
620
if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
621
P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
622
if (n >= 0)
623
n = -ECONNRESET;
624
p9_conn_cancel(m, n);
625
}
626
627
if (n & POLLIN) {
628
set_bit(Rpending, &m->wsched);
629
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
630
if (!test_and_set_bit(Rworksched, &m->wsched)) {
631
P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
632
schedule_work(&m->rq);
633
}
634
}
635
636
if (n & POLLOUT) {
637
set_bit(Wpending, &m->wsched);
638
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
639
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
640
!test_and_set_bit(Wworksched, &m->wsched)) {
641
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
642
schedule_work(&m->wq);
643
}
644
}
645
}
646
647
/**
648
* p9_fd_request - send 9P request
649
* The function can sleep until the request is scheduled for sending.
650
* The function can be interrupted. Return from the function is not
651
* a guarantee that the request is sent successfully.
652
*
653
* @client: client instance
654
* @req: request to be sent
655
*
656
*/
657
658
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
659
{
660
int n;
661
struct p9_trans_fd *ts = client->trans;
662
struct p9_conn *m = ts->conn;
663
664
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
665
current, req->tc, req->tc->id);
666
if (m->err < 0)
667
return m->err;
668
669
spin_lock(&client->lock);
670
req->status = REQ_STATUS_UNSENT;
671
list_add_tail(&req->req_list, &m->unsent_req_list);
672
spin_unlock(&client->lock);
673
674
if (test_and_clear_bit(Wpending, &m->wsched))
675
n = POLLOUT;
676
else
677
n = p9_fd_poll(m->client, NULL);
678
679
if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
680
schedule_work(&m->wq);
681
682
return 0;
683
}
684
685
static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
686
{
687
int ret = 1;
688
689
P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
690
691
spin_lock(&client->lock);
692
693
if (req->status == REQ_STATUS_UNSENT) {
694
list_del(&req->req_list);
695
req->status = REQ_STATUS_FLSHD;
696
ret = 0;
697
} else if (req->status == REQ_STATUS_SENT)
698
req->status = REQ_STATUS_FLSH;
699
700
spin_unlock(&client->lock);
701
702
return ret;
703
}
704
705
/**
706
* parse_opts - parse mount options into p9_fd_opts structure
707
* @params: options string passed from mount
708
* @opts: fd transport-specific structure to parse options into
709
*
710
* Returns 0 upon success, -ERRNO upon failure
711
*/
712
713
static int parse_opts(char *params, struct p9_fd_opts *opts)
714
{
715
char *p;
716
substring_t args[MAX_OPT_ARGS];
717
int option;
718
char *options, *tmp_options;
719
720
opts->port = P9_PORT;
721
opts->rfd = ~0;
722
opts->wfd = ~0;
723
724
if (!params)
725
return 0;
726
727
tmp_options = kstrdup(params, GFP_KERNEL);
728
if (!tmp_options) {
729
P9_DPRINTK(P9_DEBUG_ERROR,
730
"failed to allocate copy of option string\n");
731
return -ENOMEM;
732
}
733
options = tmp_options;
734
735
while ((p = strsep(&options, ",")) != NULL) {
736
int token;
737
int r;
738
if (!*p)
739
continue;
740
token = match_token(p, tokens, args);
741
if (token != Opt_err) {
742
r = match_int(&args[0], &option);
743
if (r < 0) {
744
P9_DPRINTK(P9_DEBUG_ERROR,
745
"integer field, but no integer?\n");
746
continue;
747
}
748
}
749
switch (token) {
750
case Opt_port:
751
opts->port = option;
752
break;
753
case Opt_rfdno:
754
opts->rfd = option;
755
break;
756
case Opt_wfdno:
757
opts->wfd = option;
758
break;
759
default:
760
continue;
761
}
762
}
763
764
kfree(tmp_options);
765
return 0;
766
}
767
768
static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
769
{
770
struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
771
GFP_KERNEL);
772
if (!ts)
773
return -ENOMEM;
774
775
ts->rd = fget(rfd);
776
ts->wr = fget(wfd);
777
if (!ts->rd || !ts->wr) {
778
if (ts->rd)
779
fput(ts->rd);
780
if (ts->wr)
781
fput(ts->wr);
782
kfree(ts);
783
return -EIO;
784
}
785
786
client->trans = ts;
787
client->status = Connected;
788
789
return 0;
790
}
791
792
static int p9_socket_open(struct p9_client *client, struct socket *csocket)
793
{
794
struct p9_trans_fd *p;
795
int ret, fd;
796
797
p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
798
if (!p)
799
return -ENOMEM;
800
801
csocket->sk->sk_allocation = GFP_NOIO;
802
fd = sock_map_fd(csocket, 0);
803
if (fd < 0) {
804
P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
805
sock_release(csocket);
806
kfree(p);
807
return fd;
808
}
809
810
get_file(csocket->file);
811
get_file(csocket->file);
812
p->wr = p->rd = csocket->file;
813
client->trans = p;
814
client->status = Connected;
815
816
sys_close(fd); /* still racy */
817
818
p->rd->f_flags |= O_NONBLOCK;
819
820
p->conn = p9_conn_create(client);
821
if (IS_ERR(p->conn)) {
822
ret = PTR_ERR(p->conn);
823
p->conn = NULL;
824
kfree(p);
825
sockfd_put(csocket);
826
sockfd_put(csocket);
827
return ret;
828
}
829
return 0;
830
}
831
832
/**
833
* p9_mux_destroy - cancels all pending requests and frees mux resources
834
* @m: mux to destroy
835
*
836
*/
837
838
static void p9_conn_destroy(struct p9_conn *m)
839
{
840
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
841
m->mux_list.prev, m->mux_list.next);
842
843
p9_mux_poll_stop(m);
844
cancel_work_sync(&m->rq);
845
cancel_work_sync(&m->wq);
846
847
p9_conn_cancel(m, -ECONNRESET);
848
849
m->client = NULL;
850
kfree(m);
851
}
852
853
/**
854
* p9_fd_close - shutdown file descriptor transport
855
* @client: client instance
856
*
857
*/
858
859
static void p9_fd_close(struct p9_client *client)
860
{
861
struct p9_trans_fd *ts;
862
863
if (!client)
864
return;
865
866
ts = client->trans;
867
if (!ts)
868
return;
869
870
client->status = Disconnected;
871
872
p9_conn_destroy(ts->conn);
873
874
if (ts->rd)
875
fput(ts->rd);
876
if (ts->wr)
877
fput(ts->wr);
878
879
kfree(ts);
880
}
881
882
/*
883
* stolen from NFS - maybe should be made a generic function?
884
*/
885
static inline int valid_ipaddr4(const char *buf)
886
{
887
int rc, count, in[4];
888
889
rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
890
if (rc != 4)
891
return -EINVAL;
892
for (count = 0; count < 4; count++) {
893
if (in[count] > 255)
894
return -EINVAL;
895
}
896
return 0;
897
}
898
899
static int
900
p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
901
{
902
int err;
903
struct socket *csocket;
904
struct sockaddr_in sin_server;
905
struct p9_fd_opts opts;
906
907
err = parse_opts(args, &opts);
908
if (err < 0)
909
return err;
910
911
if (valid_ipaddr4(addr) < 0)
912
return -EINVAL;
913
914
csocket = NULL;
915
916
sin_server.sin_family = AF_INET;
917
sin_server.sin_addr.s_addr = in_aton(addr);
918
sin_server.sin_port = htons(opts.port);
919
err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
920
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
921
if (err) {
922
P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
923
return err;
924
}
925
926
err = csocket->ops->connect(csocket,
927
(struct sockaddr *)&sin_server,
928
sizeof(struct sockaddr_in), 0);
929
if (err < 0) {
930
P9_EPRINTK(KERN_ERR,
931
"p9_trans_tcp: problem connecting socket to %s\n",
932
addr);
933
sock_release(csocket);
934
return err;
935
}
936
937
return p9_socket_open(client, csocket);
938
}
939
940
static int
941
p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
942
{
943
int err;
944
struct socket *csocket;
945
struct sockaddr_un sun_server;
946
947
csocket = NULL;
948
949
if (strlen(addr) >= UNIX_PATH_MAX) {
950
P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
951
addr);
952
return -ENAMETOOLONG;
953
}
954
955
sun_server.sun_family = PF_UNIX;
956
strcpy(sun_server.sun_path, addr);
957
err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
958
SOCK_STREAM, 0, &csocket, 1);
959
if (err < 0) {
960
P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
961
return err;
962
}
963
err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
964
sizeof(struct sockaddr_un) - 1, 0);
965
if (err < 0) {
966
P9_EPRINTK(KERN_ERR,
967
"p9_trans_unix: problem connecting socket: %s: %d\n",
968
addr, err);
969
sock_release(csocket);
970
return err;
971
}
972
973
return p9_socket_open(client, csocket);
974
}
975
976
static int
977
p9_fd_create(struct p9_client *client, const char *addr, char *args)
978
{
979
int err;
980
struct p9_fd_opts opts;
981
struct p9_trans_fd *p;
982
983
parse_opts(args, &opts);
984
985
if (opts.rfd == ~0 || opts.wfd == ~0) {
986
printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
987
return -ENOPROTOOPT;
988
}
989
990
err = p9_fd_open(client, opts.rfd, opts.wfd);
991
if (err < 0)
992
return err;
993
994
p = (struct p9_trans_fd *) client->trans;
995
p->conn = p9_conn_create(client);
996
if (IS_ERR(p->conn)) {
997
err = PTR_ERR(p->conn);
998
p->conn = NULL;
999
fput(p->rd);
1000
fput(p->wr);
1001
return err;
1002
}
1003
1004
return 0;
1005
}
1006
1007
static struct p9_trans_module p9_tcp_trans = {
1008
.name = "tcp",
1009
.maxsize = MAX_SOCK_BUF,
1010
.def = 1,
1011
.create = p9_fd_create_tcp,
1012
.close = p9_fd_close,
1013
.request = p9_fd_request,
1014
.cancel = p9_fd_cancel,
1015
.owner = THIS_MODULE,
1016
};
1017
1018
static struct p9_trans_module p9_unix_trans = {
1019
.name = "unix",
1020
.maxsize = MAX_SOCK_BUF,
1021
.def = 0,
1022
.create = p9_fd_create_unix,
1023
.close = p9_fd_close,
1024
.request = p9_fd_request,
1025
.cancel = p9_fd_cancel,
1026
.owner = THIS_MODULE,
1027
};
1028
1029
static struct p9_trans_module p9_fd_trans = {
1030
.name = "fd",
1031
.maxsize = MAX_SOCK_BUF,
1032
.def = 0,
1033
.create = p9_fd_create,
1034
.close = p9_fd_close,
1035
.request = p9_fd_request,
1036
.cancel = p9_fd_cancel,
1037
.owner = THIS_MODULE,
1038
};
1039
1040
/**
1041
* p9_poll_proc - poll worker thread
1042
* @a: thread state and arguments
1043
*
1044
* polls all v9fs transports for new events and queues the appropriate
1045
* work to the work queue
1046
*
1047
*/
1048
1049
static void p9_poll_workfn(struct work_struct *work)
1050
{
1051
unsigned long flags;
1052
1053
P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
1054
1055
spin_lock_irqsave(&p9_poll_lock, flags);
1056
while (!list_empty(&p9_poll_pending_list)) {
1057
struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1058
struct p9_conn,
1059
poll_pending_link);
1060
list_del_init(&conn->poll_pending_link);
1061
spin_unlock_irqrestore(&p9_poll_lock, flags);
1062
1063
p9_poll_mux(conn);
1064
1065
spin_lock_irqsave(&p9_poll_lock, flags);
1066
}
1067
spin_unlock_irqrestore(&p9_poll_lock, flags);
1068
1069
P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
1070
}
1071
1072
int p9_trans_fd_init(void)
1073
{
1074
v9fs_register_trans(&p9_tcp_trans);
1075
v9fs_register_trans(&p9_unix_trans);
1076
v9fs_register_trans(&p9_fd_trans);
1077
1078
return 0;
1079
}
1080
1081
void p9_trans_fd_exit(void)
1082
{
1083
flush_work_sync(&p9_poll_work);
1084
v9fs_unregister_trans(&p9_tcp_trans);
1085
v9fs_unregister_trans(&p9_unix_trans);
1086
v9fs_unregister_trans(&p9_fd_trans);
1087
}
1088
1089