Path: blob/master/net/sunrpc/xprtrdma/svc_rdma_sendto.c
15111 views
/*1* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the BSD-type7* license below:8*9* Redistribution and use in source and binary forms, with or without10* modification, are permitted provided that the following conditions11* are met:12*13* Redistributions of source code must retain the above copyright14* notice, this list of conditions and the following disclaimer.15*16* Redistributions in binary form must reproduce the above17* copyright notice, this list of conditions and the following18* disclaimer in the documentation and/or other materials provided19* with the distribution.20*21* Neither the name of the Network Appliance, Inc. nor the names of22* its contributors may be used to endorse or promote products23* derived from this software without specific prior written24* permission.25*26* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS27* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT28* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR29* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT30* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,31* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT32* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,33* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY34* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT35* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE36* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.37*38* Author: Tom Tucker <[email protected]>39*/4041#include <linux/sunrpc/debug.h>42#include <linux/sunrpc/rpc_rdma.h>43#include <linux/spinlock.h>44#include <asm/unaligned.h>45#include <rdma/ib_verbs.h>46#include <rdma/rdma_cm.h>47#include <linux/sunrpc/svc_rdma.h>4849#define RPCDBG_FACILITY RPCDBG_SVCXPRT5051/* Encode an XDR as an array of IB SGE52*53* Assumptions:54* - head[0] is physically contiguous.55* - tail[0] is physically contiguous.56* - pages[] is not physically or virtually contiguous and consists of57* PAGE_SIZE elements.58*59* Output:60* SGE[0] reserved for RCPRDMA header61* SGE[1] data from xdr->head[]62* SGE[2..sge_count-2] data from xdr->pages[]63* SGE[sge_count-1] data from xdr->tail.64*65* The max SGE we need is the length of the XDR / pagesize + one for66* head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES67* reserves a page for both the request and the reply header, and this68* array is only concerned with the reply we are assured that we have69* on extra page for the RPCRMDA header.70*/71static int fast_reg_xdr(struct svcxprt_rdma *xprt,72struct xdr_buf *xdr,73struct svc_rdma_req_map *vec)74{75int sge_no;76u32 sge_bytes;77u32 page_bytes;78u32 page_off;79int page_no = 0;80u8 *frva;81struct svc_rdma_fastreg_mr *frmr;8283frmr = svc_rdma_get_frmr(xprt);84if (IS_ERR(frmr))85return -ENOMEM;86vec->frmr = frmr;8788/* Skip the RPCRDMA header */89sge_no = 1;9091/* Map the head. */92frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);93vec->sge[sge_no].iov_base = xdr->head[0].iov_base;94vec->sge[sge_no].iov_len = xdr->head[0].iov_len;95vec->count = 2;96sge_no++;9798/* Map the XDR head */99frmr->kva = frva;100frmr->direction = DMA_TO_DEVICE;101frmr->access_flags = 0;102frmr->map_len = PAGE_SIZE;103frmr->page_list_len = 1;104page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;105frmr->page_list->page_list[page_no] =106ib_dma_map_page(xprt->sc_cm_id->device,107virt_to_page(xdr->head[0].iov_base),108page_off,109PAGE_SIZE - page_off,110DMA_TO_DEVICE);111if (ib_dma_mapping_error(xprt->sc_cm_id->device,112frmr->page_list->page_list[page_no]))113goto fatal_err;114atomic_inc(&xprt->sc_dma_used);115116/* Map the XDR page list */117page_off = xdr->page_base;118page_bytes = xdr->page_len + page_off;119if (!page_bytes)120goto encode_tail;121122/* Map the pages */123vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;124vec->sge[sge_no].iov_len = page_bytes;125sge_no++;126while (page_bytes) {127struct page *page;128129page = xdr->pages[page_no++];130sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));131page_bytes -= sge_bytes;132133frmr->page_list->page_list[page_no] =134ib_dma_map_page(xprt->sc_cm_id->device,135page, page_off,136sge_bytes, DMA_TO_DEVICE);137if (ib_dma_mapping_error(xprt->sc_cm_id->device,138frmr->page_list->page_list[page_no]))139goto fatal_err;140141atomic_inc(&xprt->sc_dma_used);142page_off = 0; /* reset for next time through loop */143frmr->map_len += PAGE_SIZE;144frmr->page_list_len++;145}146vec->count++;147148encode_tail:149/* Map tail */150if (0 == xdr->tail[0].iov_len)151goto done;152153vec->count++;154vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;155156if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==157((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {158/*159* If head and tail use the same page, we don't need160* to map it again.161*/162vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;163} else {164void *va;165166/* Map another page for the tail */167page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;168va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);169vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;170171frmr->page_list->page_list[page_no] =172ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),173page_off,174PAGE_SIZE,175DMA_TO_DEVICE);176if (ib_dma_mapping_error(xprt->sc_cm_id->device,177frmr->page_list->page_list[page_no]))178goto fatal_err;179atomic_inc(&xprt->sc_dma_used);180frmr->map_len += PAGE_SIZE;181frmr->page_list_len++;182}183184done:185if (svc_rdma_fastreg(xprt, frmr))186goto fatal_err;187188return 0;189190fatal_err:191printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);192vec->frmr = NULL;193svc_rdma_put_frmr(xprt, frmr);194return -EIO;195}196197static int map_xdr(struct svcxprt_rdma *xprt,198struct xdr_buf *xdr,199struct svc_rdma_req_map *vec)200{201int sge_no;202u32 sge_bytes;203u32 page_bytes;204u32 page_off;205int page_no;206207BUG_ON(xdr->len !=208(xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));209210if (xprt->sc_frmr_pg_list_len)211return fast_reg_xdr(xprt, xdr, vec);212213/* Skip the first sge, this is for the RPCRDMA header */214sge_no = 1;215216/* Head SGE */217vec->sge[sge_no].iov_base = xdr->head[0].iov_base;218vec->sge[sge_no].iov_len = xdr->head[0].iov_len;219sge_no++;220221/* pages SGE */222page_no = 0;223page_bytes = xdr->page_len;224page_off = xdr->page_base;225while (page_bytes) {226vec->sge[sge_no].iov_base =227page_address(xdr->pages[page_no]) + page_off;228sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));229page_bytes -= sge_bytes;230vec->sge[sge_no].iov_len = sge_bytes;231232sge_no++;233page_no++;234page_off = 0; /* reset for next time through loop */235}236237/* Tail SGE */238if (xdr->tail[0].iov_len) {239vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;240vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;241sge_no++;242}243244dprintk("svcrdma: map_xdr: sge_no %d page_no %d "245"page_base %u page_len %u head_len %zu tail_len %zu\n",246sge_no, page_no, xdr->page_base, xdr->page_len,247xdr->head[0].iov_len, xdr->tail[0].iov_len);248249vec->count = sge_no;250return 0;251}252253static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,254struct xdr_buf *xdr,255u32 xdr_off, size_t len, int dir)256{257struct page *page;258dma_addr_t dma_addr;259if (xdr_off < xdr->head[0].iov_len) {260/* This offset is in the head */261xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;262page = virt_to_page(xdr->head[0].iov_base);263} else {264xdr_off -= xdr->head[0].iov_len;265if (xdr_off < xdr->page_len) {266/* This offset is in the page list */267page = xdr->pages[xdr_off >> PAGE_SHIFT];268xdr_off &= ~PAGE_MASK;269} else {270/* This offset is in the tail */271xdr_off -= xdr->page_len;272xdr_off += (unsigned long)273xdr->tail[0].iov_base & ~PAGE_MASK;274page = virt_to_page(xdr->tail[0].iov_base);275}276}277dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,278min_t(size_t, PAGE_SIZE, len), dir);279return dma_addr;280}281282/* Assumptions:283* - We are using FRMR284* - or -285* - The specified write_len can be represented in sc_max_sge * PAGE_SIZE286*/287static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,288u32 rmr, u64 to,289u32 xdr_off, int write_len,290struct svc_rdma_req_map *vec)291{292struct ib_send_wr write_wr;293struct ib_sge *sge;294int xdr_sge_no;295int sge_no;296int sge_bytes;297int sge_off;298int bc;299struct svc_rdma_op_ctxt *ctxt;300301BUG_ON(vec->count > RPCSVC_MAXPAGES);302dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "303"write_len=%d, vec->sge=%p, vec->count=%lu\n",304rmr, (unsigned long long)to, xdr_off,305write_len, vec->sge, vec->count);306307ctxt = svc_rdma_get_context(xprt);308ctxt->direction = DMA_TO_DEVICE;309sge = ctxt->sge;310311/* Find the SGE associated with xdr_off */312for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;313xdr_sge_no++) {314if (vec->sge[xdr_sge_no].iov_len > bc)315break;316bc -= vec->sge[xdr_sge_no].iov_len;317}318319sge_off = bc;320bc = write_len;321sge_no = 0;322323/* Copy the remaining SGE */324while (bc != 0) {325sge_bytes = min_t(size_t,326bc, vec->sge[xdr_sge_no].iov_len-sge_off);327sge[sge_no].length = sge_bytes;328if (!vec->frmr) {329sge[sge_no].addr =330dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,331sge_bytes, DMA_TO_DEVICE);332xdr_off += sge_bytes;333if (ib_dma_mapping_error(xprt->sc_cm_id->device,334sge[sge_no].addr))335goto err;336atomic_inc(&xprt->sc_dma_used);337sge[sge_no].lkey = xprt->sc_dma_lkey;338} else {339sge[sge_no].addr = (unsigned long)340vec->sge[xdr_sge_no].iov_base + sge_off;341sge[sge_no].lkey = vec->frmr->mr->lkey;342}343ctxt->count++;344ctxt->frmr = vec->frmr;345sge_off = 0;346sge_no++;347xdr_sge_no++;348BUG_ON(xdr_sge_no > vec->count);349bc -= sge_bytes;350}351352/* Prepare WRITE WR */353memset(&write_wr, 0, sizeof write_wr);354ctxt->wr_op = IB_WR_RDMA_WRITE;355write_wr.wr_id = (unsigned long)ctxt;356write_wr.sg_list = &sge[0];357write_wr.num_sge = sge_no;358write_wr.opcode = IB_WR_RDMA_WRITE;359write_wr.send_flags = IB_SEND_SIGNALED;360write_wr.wr.rdma.rkey = rmr;361write_wr.wr.rdma.remote_addr = to;362363/* Post It */364atomic_inc(&rdma_stat_write);365if (svc_rdma_send(xprt, &write_wr))366goto err;367return 0;368err:369svc_rdma_unmap_dma(ctxt);370svc_rdma_put_frmr(xprt, vec->frmr);371svc_rdma_put_context(ctxt, 0);372/* Fatal error, close transport */373return -EIO;374}375376static int send_write_chunks(struct svcxprt_rdma *xprt,377struct rpcrdma_msg *rdma_argp,378struct rpcrdma_msg *rdma_resp,379struct svc_rqst *rqstp,380struct svc_rdma_req_map *vec)381{382u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;383int write_len;384int max_write;385u32 xdr_off;386int chunk_off;387int chunk_no;388struct rpcrdma_write_array *arg_ary;389struct rpcrdma_write_array *res_ary;390int ret;391392arg_ary = svc_rdma_get_write_array(rdma_argp);393if (!arg_ary)394return 0;395res_ary = (struct rpcrdma_write_array *)396&rdma_resp->rm_body.rm_chunks[1];397398if (vec->frmr)399max_write = vec->frmr->map_len;400else401max_write = xprt->sc_max_sge * PAGE_SIZE;402403/* Write chunks start at the pagelist */404for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;405xfer_len && chunk_no < arg_ary->wc_nchunks;406chunk_no++) {407struct rpcrdma_segment *arg_ch;408u64 rs_offset;409410arg_ch = &arg_ary->wc_array[chunk_no].wc_target;411write_len = min(xfer_len, arg_ch->rs_length);412413/* Prepare the response chunk given the length actually414* written */415rs_offset = get_unaligned(&(arg_ch->rs_offset));416svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,417arg_ch->rs_handle,418rs_offset,419write_len);420chunk_off = 0;421while (write_len) {422int this_write;423this_write = min(write_len, max_write);424ret = send_write(xprt, rqstp,425arg_ch->rs_handle,426rs_offset + chunk_off,427xdr_off,428this_write,429vec);430if (ret) {431dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",432ret);433return -EIO;434}435chunk_off += this_write;436xdr_off += this_write;437xfer_len -= this_write;438write_len -= this_write;439}440}441/* Update the req with the number of chunks actually used */442svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);443444return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;445}446447static int send_reply_chunks(struct svcxprt_rdma *xprt,448struct rpcrdma_msg *rdma_argp,449struct rpcrdma_msg *rdma_resp,450struct svc_rqst *rqstp,451struct svc_rdma_req_map *vec)452{453u32 xfer_len = rqstp->rq_res.len;454int write_len;455int max_write;456u32 xdr_off;457int chunk_no;458int chunk_off;459struct rpcrdma_segment *ch;460struct rpcrdma_write_array *arg_ary;461struct rpcrdma_write_array *res_ary;462int ret;463464arg_ary = svc_rdma_get_reply_array(rdma_argp);465if (!arg_ary)466return 0;467/* XXX: need to fix when reply lists occur with read-list and or468* write-list */469res_ary = (struct rpcrdma_write_array *)470&rdma_resp->rm_body.rm_chunks[2];471472if (vec->frmr)473max_write = vec->frmr->map_len;474else475max_write = xprt->sc_max_sge * PAGE_SIZE;476477/* xdr offset starts at RPC message */478for (xdr_off = 0, chunk_no = 0;479xfer_len && chunk_no < arg_ary->wc_nchunks;480chunk_no++) {481u64 rs_offset;482ch = &arg_ary->wc_array[chunk_no].wc_target;483write_len = min(xfer_len, ch->rs_length);484485/* Prepare the reply chunk given the length actually486* written */487rs_offset = get_unaligned(&(ch->rs_offset));488svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,489ch->rs_handle, rs_offset,490write_len);491chunk_off = 0;492while (write_len) {493int this_write;494495this_write = min(write_len, max_write);496ret = send_write(xprt, rqstp,497ch->rs_handle,498rs_offset + chunk_off,499xdr_off,500this_write,501vec);502if (ret) {503dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",504ret);505return -EIO;506}507chunk_off += this_write;508xdr_off += this_write;509xfer_len -= this_write;510write_len -= this_write;511}512}513/* Update the req with the number of chunks actually used */514svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);515516return rqstp->rq_res.len;517}518519/* This function prepares the portion of the RPCRDMA message to be520* sent in the RDMA_SEND. This function is called after data sent via521* RDMA has already been transmitted. There are three cases:522* - The RPCRDMA header, RPC header, and payload are all sent in a523* single RDMA_SEND. This is the "inline" case.524* - The RPCRDMA header and some portion of the RPC header and data525* are sent via this RDMA_SEND and another portion of the data is526* sent via RDMA.527* - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC528* header and data are all transmitted via RDMA.529* In all three cases, this function prepares the RPCRDMA header in530* sge[0], the 'type' parameter indicates the type to place in the531* RPCRDMA header, and the 'byte_count' field indicates how much of532* the XDR to include in this RDMA_SEND. NB: The offset of the payload533* to send is zero in the XDR.534*/535static int send_reply(struct svcxprt_rdma *rdma,536struct svc_rqst *rqstp,537struct page *page,538struct rpcrdma_msg *rdma_resp,539struct svc_rdma_op_ctxt *ctxt,540struct svc_rdma_req_map *vec,541int byte_count)542{543struct ib_send_wr send_wr;544struct ib_send_wr inv_wr;545int sge_no;546int sge_bytes;547int page_no;548int ret;549550/* Post a recv buffer to handle another request. */551ret = svc_rdma_post_recv(rdma);552if (ret) {553printk(KERN_INFO554"svcrdma: could not post a receive buffer, err=%d."555"Closing transport %p.\n", ret, rdma);556set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);557svc_rdma_put_frmr(rdma, vec->frmr);558svc_rdma_put_context(ctxt, 0);559return -ENOTCONN;560}561562/* Prepare the context */563ctxt->pages[0] = page;564ctxt->count = 1;565ctxt->frmr = vec->frmr;566if (vec->frmr)567set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);568else569clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);570571/* Prepare the SGE for the RPCRDMA Header */572ctxt->sge[0].lkey = rdma->sc_dma_lkey;573ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);574ctxt->sge[0].addr =575ib_dma_map_page(rdma->sc_cm_id->device, page, 0,576ctxt->sge[0].length, DMA_TO_DEVICE);577if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))578goto err;579atomic_inc(&rdma->sc_dma_used);580581ctxt->direction = DMA_TO_DEVICE;582583/* Map the payload indicated by 'byte_count' */584for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {585int xdr_off = 0;586sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);587byte_count -= sge_bytes;588if (!vec->frmr) {589ctxt->sge[sge_no].addr =590dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,591sge_bytes, DMA_TO_DEVICE);592xdr_off += sge_bytes;593if (ib_dma_mapping_error(rdma->sc_cm_id->device,594ctxt->sge[sge_no].addr))595goto err;596atomic_inc(&rdma->sc_dma_used);597ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;598} else {599ctxt->sge[sge_no].addr = (unsigned long)600vec->sge[sge_no].iov_base;601ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;602}603ctxt->sge[sge_no].length = sge_bytes;604}605BUG_ON(byte_count != 0);606607/* Save all respages in the ctxt and remove them from the608* respages array. They are our pages until the I/O609* completes.610*/611for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {612ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];613ctxt->count++;614rqstp->rq_respages[page_no] = NULL;615/*616* If there are more pages than SGE, terminate SGE617* list so that svc_rdma_unmap_dma doesn't attempt to618* unmap garbage.619*/620if (page_no+1 >= sge_no)621ctxt->sge[page_no+1].length = 0;622}623BUG_ON(sge_no > rdma->sc_max_sge);624memset(&send_wr, 0, sizeof send_wr);625ctxt->wr_op = IB_WR_SEND;626send_wr.wr_id = (unsigned long)ctxt;627send_wr.sg_list = ctxt->sge;628send_wr.num_sge = sge_no;629send_wr.opcode = IB_WR_SEND;630send_wr.send_flags = IB_SEND_SIGNALED;631if (vec->frmr) {632/* Prepare INVALIDATE WR */633memset(&inv_wr, 0, sizeof inv_wr);634inv_wr.opcode = IB_WR_LOCAL_INV;635inv_wr.send_flags = IB_SEND_SIGNALED;636inv_wr.ex.invalidate_rkey =637vec->frmr->mr->lkey;638send_wr.next = &inv_wr;639}640641ret = svc_rdma_send(rdma, &send_wr);642if (ret)643goto err;644645return 0;646647err:648svc_rdma_unmap_dma(ctxt);649svc_rdma_put_frmr(rdma, vec->frmr);650svc_rdma_put_context(ctxt, 1);651return -EIO;652}653654void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)655{656}657658/*659* Return the start of an xdr buffer.660*/661static void *xdr_start(struct xdr_buf *xdr)662{663return xdr->head[0].iov_base -664(xdr->len -665xdr->page_len -666xdr->tail[0].iov_len -667xdr->head[0].iov_len);668}669670int svc_rdma_sendto(struct svc_rqst *rqstp)671{672struct svc_xprt *xprt = rqstp->rq_xprt;673struct svcxprt_rdma *rdma =674container_of(xprt, struct svcxprt_rdma, sc_xprt);675struct rpcrdma_msg *rdma_argp;676struct rpcrdma_msg *rdma_resp;677struct rpcrdma_write_array *reply_ary;678enum rpcrdma_proc reply_type;679int ret;680int inline_bytes;681struct page *res_page;682struct svc_rdma_op_ctxt *ctxt;683struct svc_rdma_req_map *vec;684685dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);686687/* Get the RDMA request header. */688rdma_argp = xdr_start(&rqstp->rq_arg);689690/* Build an req vec for the XDR */691ctxt = svc_rdma_get_context(rdma);692ctxt->direction = DMA_TO_DEVICE;693vec = svc_rdma_get_req_map();694ret = map_xdr(rdma, &rqstp->rq_res, vec);695if (ret)696goto err0;697inline_bytes = rqstp->rq_res.len;698699/* Create the RDMA response header */700res_page = svc_rdma_get_page();701rdma_resp = page_address(res_page);702reply_ary = svc_rdma_get_reply_array(rdma_argp);703if (reply_ary)704reply_type = RDMA_NOMSG;705else706reply_type = RDMA_MSG;707svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,708rdma_resp, reply_type);709710/* Send any write-chunk data and build resp write-list */711ret = send_write_chunks(rdma, rdma_argp, rdma_resp,712rqstp, vec);713if (ret < 0) {714printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",715ret);716goto err1;717}718inline_bytes -= ret;719720/* Send any reply-list data and update resp reply-list */721ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,722rqstp, vec);723if (ret < 0) {724printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",725ret);726goto err1;727}728inline_bytes -= ret;729730ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,731inline_bytes);732svc_rdma_put_req_map(vec);733dprintk("svcrdma: send_reply returns %d\n", ret);734return ret;735736err1:737put_page(res_page);738err0:739svc_rdma_put_req_map(vec);740svc_rdma_put_context(ctxt, 0);741return ret;742}743744745