Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/rds/message.c
15109 views
1
/*
2
* Copyright (c) 2006 Oracle. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*
32
*/
33
#include <linux/kernel.h>
34
#include <linux/slab.h>
35
36
#include "rds.h"
37
38
static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
39
[RDS_EXTHDR_NONE] = 0,
40
[RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
41
[RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
42
[RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
43
};
44
45
46
void rds_message_addref(struct rds_message *rm)
47
{
48
rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
49
atomic_inc(&rm->m_refcount);
50
}
51
EXPORT_SYMBOL_GPL(rds_message_addref);
52
53
/*
54
* This relies on dma_map_sg() not touching sg[].page during merging.
55
*/
56
static void rds_message_purge(struct rds_message *rm)
57
{
58
unsigned long i;
59
60
if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
61
return;
62
63
for (i = 0; i < rm->data.op_nents; i++) {
64
rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
65
/* XXX will have to put_page for page refs */
66
__free_page(sg_page(&rm->data.op_sg[i]));
67
}
68
rm->data.op_nents = 0;
69
70
if (rm->rdma.op_active)
71
rds_rdma_free_op(&rm->rdma);
72
if (rm->rdma.op_rdma_mr)
73
rds_mr_put(rm->rdma.op_rdma_mr);
74
75
if (rm->atomic.op_active)
76
rds_atomic_free_op(&rm->atomic);
77
if (rm->atomic.op_rdma_mr)
78
rds_mr_put(rm->atomic.op_rdma_mr);
79
}
80
81
void rds_message_put(struct rds_message *rm)
82
{
83
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
84
if (atomic_read(&rm->m_refcount) == 0) {
85
printk(KERN_CRIT "danger refcount zero on %p\n", rm);
86
WARN_ON(1);
87
}
88
if (atomic_dec_and_test(&rm->m_refcount)) {
89
BUG_ON(!list_empty(&rm->m_sock_item));
90
BUG_ON(!list_empty(&rm->m_conn_item));
91
rds_message_purge(rm);
92
93
kfree(rm);
94
}
95
}
96
EXPORT_SYMBOL_GPL(rds_message_put);
97
98
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
99
__be16 dport, u64 seq)
100
{
101
hdr->h_flags = 0;
102
hdr->h_sport = sport;
103
hdr->h_dport = dport;
104
hdr->h_sequence = cpu_to_be64(seq);
105
hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
106
}
107
EXPORT_SYMBOL_GPL(rds_message_populate_header);
108
109
int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
110
const void *data, unsigned int len)
111
{
112
unsigned int ext_len = sizeof(u8) + len;
113
unsigned char *dst;
114
115
/* For now, refuse to add more than one extension header */
116
if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
117
return 0;
118
119
if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
120
return 0;
121
122
if (ext_len >= RDS_HEADER_EXT_SPACE)
123
return 0;
124
dst = hdr->h_exthdr;
125
126
*dst++ = type;
127
memcpy(dst, data, len);
128
129
dst[len] = RDS_EXTHDR_NONE;
130
return 1;
131
}
132
EXPORT_SYMBOL_GPL(rds_message_add_extension);
133
134
/*
135
* If a message has extension headers, retrieve them here.
136
* Call like this:
137
*
138
* unsigned int pos = 0;
139
*
140
* while (1) {
141
* buflen = sizeof(buffer);
142
* type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
143
* if (type == RDS_EXTHDR_NONE)
144
* break;
145
* ...
146
* }
147
*/
148
int rds_message_next_extension(struct rds_header *hdr,
149
unsigned int *pos, void *buf, unsigned int *buflen)
150
{
151
unsigned int offset, ext_type, ext_len;
152
u8 *src = hdr->h_exthdr;
153
154
offset = *pos;
155
if (offset >= RDS_HEADER_EXT_SPACE)
156
goto none;
157
158
/* Get the extension type and length. For now, the
159
* length is implied by the extension type. */
160
ext_type = src[offset++];
161
162
if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
163
goto none;
164
ext_len = rds_exthdr_size[ext_type];
165
if (offset + ext_len > RDS_HEADER_EXT_SPACE)
166
goto none;
167
168
*pos = offset + ext_len;
169
if (ext_len < *buflen)
170
*buflen = ext_len;
171
memcpy(buf, src + offset, *buflen);
172
return ext_type;
173
174
none:
175
*pos = RDS_HEADER_EXT_SPACE;
176
*buflen = 0;
177
return RDS_EXTHDR_NONE;
178
}
179
180
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
181
{
182
struct rds_ext_header_rdma_dest ext_hdr;
183
184
ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
185
ext_hdr.h_rdma_offset = cpu_to_be32(offset);
186
return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
187
}
188
EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
189
190
/*
191
* Each rds_message is allocated with extra space for the scatterlist entries
192
* rds ops will need. This is to minimize memory allocation count. Then, each rds op
193
* can grab SGs when initializing its part of the rds_message.
194
*/
195
struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
196
{
197
struct rds_message *rm;
198
199
rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
200
if (!rm)
201
goto out;
202
203
rm->m_used_sgs = 0;
204
rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
205
206
atomic_set(&rm->m_refcount, 1);
207
INIT_LIST_HEAD(&rm->m_sock_item);
208
INIT_LIST_HEAD(&rm->m_conn_item);
209
spin_lock_init(&rm->m_rs_lock);
210
init_waitqueue_head(&rm->m_flush_wait);
211
212
out:
213
return rm;
214
}
215
216
/*
217
* RDS ops use this to grab SG entries from the rm's sg pool.
218
*/
219
struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
220
{
221
struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
222
struct scatterlist *sg_ret;
223
224
WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
225
WARN_ON(!nents);
226
227
if (rm->m_used_sgs + nents > rm->m_total_sgs)
228
return NULL;
229
230
sg_ret = &sg_first[rm->m_used_sgs];
231
sg_init_table(sg_ret, nents);
232
rm->m_used_sgs += nents;
233
234
return sg_ret;
235
}
236
237
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
238
{
239
struct rds_message *rm;
240
unsigned int i;
241
int num_sgs = ceil(total_len, PAGE_SIZE);
242
int extra_bytes = num_sgs * sizeof(struct scatterlist);
243
244
rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
245
if (!rm)
246
return ERR_PTR(-ENOMEM);
247
248
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
249
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
250
rm->data.op_nents = ceil(total_len, PAGE_SIZE);
251
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
252
if (!rm->data.op_sg) {
253
rds_message_put(rm);
254
return ERR_PTR(-ENOMEM);
255
}
256
257
for (i = 0; i < rm->data.op_nents; ++i) {
258
sg_set_page(&rm->data.op_sg[i],
259
virt_to_page(page_addrs[i]),
260
PAGE_SIZE, 0);
261
}
262
263
return rm;
264
}
265
266
int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
267
size_t total_len)
268
{
269
unsigned long to_copy;
270
unsigned long iov_off;
271
unsigned long sg_off;
272
struct iovec *iov;
273
struct scatterlist *sg;
274
int ret = 0;
275
276
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
277
278
/*
279
* now allocate and copy in the data payload.
280
*/
281
sg = rm->data.op_sg;
282
iov = first_iov;
283
iov_off = 0;
284
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
285
286
while (total_len) {
287
if (!sg_page(sg)) {
288
ret = rds_page_remainder_alloc(sg, total_len,
289
GFP_HIGHUSER);
290
if (ret)
291
goto out;
292
rm->data.op_nents++;
293
sg_off = 0;
294
}
295
296
while (iov_off == iov->iov_len) {
297
iov_off = 0;
298
iov++;
299
}
300
301
to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
302
to_copy = min_t(size_t, to_copy, total_len);
303
304
rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
305
"sg [%p, %u, %u] + %lu\n",
306
to_copy, iov->iov_base, iov->iov_len, iov_off,
307
(void *)sg_page(sg), sg->offset, sg->length, sg_off);
308
309
ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
310
iov->iov_base + iov_off,
311
to_copy);
312
if (ret)
313
goto out;
314
315
iov_off += to_copy;
316
total_len -= to_copy;
317
sg_off += to_copy;
318
319
if (sg_off == sg->length)
320
sg++;
321
}
322
323
out:
324
return ret;
325
}
326
327
int rds_message_inc_copy_to_user(struct rds_incoming *inc,
328
struct iovec *first_iov, size_t size)
329
{
330
struct rds_message *rm;
331
struct iovec *iov;
332
struct scatterlist *sg;
333
unsigned long to_copy;
334
unsigned long iov_off;
335
unsigned long vec_off;
336
int copied;
337
int ret;
338
u32 len;
339
340
rm = container_of(inc, struct rds_message, m_inc);
341
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
342
343
iov = first_iov;
344
iov_off = 0;
345
sg = rm->data.op_sg;
346
vec_off = 0;
347
copied = 0;
348
349
while (copied < size && copied < len) {
350
while (iov_off == iov->iov_len) {
351
iov_off = 0;
352
iov++;
353
}
354
355
to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
356
to_copy = min_t(size_t, to_copy, size - copied);
357
to_copy = min_t(unsigned long, to_copy, len - copied);
358
359
rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
360
"sg [%p, %u, %u] + %lu\n",
361
to_copy, iov->iov_base, iov->iov_len, iov_off,
362
sg_page(sg), sg->offset, sg->length, vec_off);
363
364
ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
365
iov->iov_base + iov_off,
366
to_copy);
367
if (ret) {
368
copied = ret;
369
break;
370
}
371
372
iov_off += to_copy;
373
vec_off += to_copy;
374
copied += to_copy;
375
376
if (vec_off == sg->length) {
377
vec_off = 0;
378
sg++;
379
}
380
}
381
382
return copied;
383
}
384
385
/*
386
* If the message is still on the send queue, wait until the transport
387
* is done with it. This is particularly important for RDMA operations.
388
*/
389
void rds_message_wait(struct rds_message *rm)
390
{
391
wait_event_interruptible(rm->m_flush_wait,
392
!test_bit(RDS_MSG_MAPPED, &rm->m_flags));
393
}
394
395
void rds_message_unmapped(struct rds_message *rm)
396
{
397
clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
398
wake_up_interruptible(&rm->m_flush_wait);
399
}
400
EXPORT_SYMBOL_GPL(rds_message_unmapped);
401
402
403