Path: blob/master/drivers/infiniband/hw/cxgb4/cq.c
15112 views
/*1* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/3132#include "iw_cxgb4.h"3334static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,35struct c4iw_dev_ucontext *uctx)36{37struct fw_ri_res_wr *res_wr;38struct fw_ri_res *res;39int wr_len;40struct c4iw_wr_wait wr_wait;41struct sk_buff *skb;42int ret;4344wr_len = sizeof *res_wr + sizeof *res;45skb = alloc_skb(wr_len, GFP_KERNEL);46if (!skb)47return -ENOMEM;48set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);4950res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);51memset(res_wr, 0, wr_len);52res_wr->op_nres = cpu_to_be32(53FW_WR_OP(FW_RI_RES_WR) |54V_FW_RI_RES_WR_NRES(1) |55FW_WR_COMPL(1));56res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));57res_wr->cookie = (unsigned long) &wr_wait;58res = res_wr->res;59res->u.cq.restype = FW_RI_RES_TYPE_CQ;60res->u.cq.op = FW_RI_RES_OP_RESET;61res->u.cq.iqid = cpu_to_be32(cq->cqid);6263c4iw_init_wr_wait(&wr_wait);64ret = c4iw_ofld_send(rdev, skb);65if (!ret) {66ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);67}6869kfree(cq->sw_queue);70dma_free_coherent(&(rdev->lldi.pdev->dev),71cq->memsize, cq->queue,72dma_unmap_addr(cq, mapping));73c4iw_put_cqid(rdev, cq->cqid, uctx);74return ret;75}7677static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,78struct c4iw_dev_ucontext *uctx)79{80struct fw_ri_res_wr *res_wr;81struct fw_ri_res *res;82int wr_len;83int user = (uctx != &rdev->uctx);84struct c4iw_wr_wait wr_wait;85int ret;86struct sk_buff *skb;8788cq->cqid = c4iw_get_cqid(rdev, uctx);89if (!cq->cqid) {90ret = -ENOMEM;91goto err1;92}9394if (!user) {95cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);96if (!cq->sw_queue) {97ret = -ENOMEM;98goto err2;99}100}101cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,102&cq->dma_addr, GFP_KERNEL);103if (!cq->queue) {104ret = -ENOMEM;105goto err3;106}107dma_unmap_addr_set(cq, mapping, cq->dma_addr);108memset(cq->queue, 0, cq->memsize);109110/* build fw_ri_res_wr */111wr_len = sizeof *res_wr + sizeof *res;112113skb = alloc_skb(wr_len, GFP_KERNEL);114if (!skb) {115ret = -ENOMEM;116goto err4;117}118set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);119120res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);121memset(res_wr, 0, wr_len);122res_wr->op_nres = cpu_to_be32(123FW_WR_OP(FW_RI_RES_WR) |124V_FW_RI_RES_WR_NRES(1) |125FW_WR_COMPL(1));126res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));127res_wr->cookie = (unsigned long) &wr_wait;128res = res_wr->res;129res->u.cq.restype = FW_RI_RES_TYPE_CQ;130res->u.cq.op = FW_RI_RES_OP_WRITE;131res->u.cq.iqid = cpu_to_be32(cq->cqid);132res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(133V_FW_RI_RES_WR_IQANUS(0) |134V_FW_RI_RES_WR_IQANUD(1) |135F_FW_RI_RES_WR_IQANDST |136V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));137res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(138F_FW_RI_RES_WR_IQDROPRSS |139V_FW_RI_RES_WR_IQPCIECH(2) |140V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |141F_FW_RI_RES_WR_IQO |142V_FW_RI_RES_WR_IQESIZE(1));143res->u.cq.iqsize = cpu_to_be16(cq->size);144res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);145146c4iw_init_wr_wait(&wr_wait);147148ret = c4iw_ofld_send(rdev, skb);149if (ret)150goto err4;151PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);152ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);153if (ret)154goto err4;155156cq->gen = 1;157cq->gts = rdev->lldi.gts_reg;158cq->rdev = rdev;159if (user) {160cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +161(cq->cqid << rdev->cqshift);162cq->ugts &= PAGE_MASK;163}164return 0;165err4:166dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,167dma_unmap_addr(cq, mapping));168err3:169kfree(cq->sw_queue);170err2:171c4iw_put_cqid(rdev, cq->cqid, uctx);172err1:173return ret;174}175176static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)177{178struct t4_cqe cqe;179180PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,181wq, cq, cq->sw_cidx, cq->sw_pidx);182memset(&cqe, 0, sizeof(cqe));183cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |184V_CQE_OPCODE(FW_RI_SEND) |185V_CQE_TYPE(0) |186V_CQE_SWCQE(1) |187V_CQE_QPID(wq->rq.qid));188cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));189cq->sw_queue[cq->sw_pidx] = cqe;190t4_swcq_produce(cq);191}192193int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)194{195int flushed = 0;196int in_use = wq->rq.in_use - count;197198BUG_ON(in_use < 0);199PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,200wq, cq, wq->rq.in_use, count);201while (in_use--) {202insert_recv_cqe(wq, cq);203flushed++;204}205return flushed;206}207208static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,209struct t4_swsqe *swcqe)210{211struct t4_cqe cqe;212213PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,214wq, cq, cq->sw_cidx, cq->sw_pidx);215memset(&cqe, 0, sizeof(cqe));216cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |217V_CQE_OPCODE(swcqe->opcode) |218V_CQE_TYPE(1) |219V_CQE_SWCQE(1) |220V_CQE_QPID(wq->sq.qid));221CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;222cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));223cq->sw_queue[cq->sw_pidx] = cqe;224t4_swcq_produce(cq);225}226227int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)228{229int flushed = 0;230struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];231int in_use = wq->sq.in_use - count;232233BUG_ON(in_use < 0);234while (in_use--) {235swsqe->signaled = 0;236insert_sq_cqe(wq, cq, swsqe);237swsqe++;238if (swsqe == (wq->sq.sw_sq + wq->sq.size))239swsqe = wq->sq.sw_sq;240flushed++;241}242return flushed;243}244245/*246* Move all CQEs from the HWCQ into the SWCQ.247*/248void c4iw_flush_hw_cq(struct t4_cq *cq)249{250struct t4_cqe *cqe = NULL, *swcqe;251int ret;252253PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);254ret = t4_next_hw_cqe(cq, &cqe);255while (!ret) {256PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",257__func__, cq->cidx, cq->sw_pidx);258swcqe = &cq->sw_queue[cq->sw_pidx];259*swcqe = *cqe;260swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));261t4_swcq_produce(cq);262t4_hwcq_consume(cq);263ret = t4_next_hw_cqe(cq, &cqe);264}265}266267static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)268{269if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)270return 0;271272if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))273return 0;274275if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))276return 0;277278if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))279return 0;280return 1;281}282283void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)284{285struct t4_cqe *cqe;286u32 ptr;287288*count = 0;289ptr = cq->sw_cidx;290while (ptr != cq->sw_pidx) {291cqe = &cq->sw_queue[ptr];292if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&293wq->sq.oldest_read)) &&294(CQE_QPID(cqe) == wq->sq.qid))295(*count)++;296if (++ptr == cq->size)297ptr = 0;298}299PDBG("%s cq %p count %d\n", __func__, cq, *count);300}301302void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)303{304struct t4_cqe *cqe;305u32 ptr;306307*count = 0;308PDBG("%s count zero %d\n", __func__, *count);309ptr = cq->sw_cidx;310while (ptr != cq->sw_pidx) {311cqe = &cq->sw_queue[ptr];312if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&313(CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))314(*count)++;315if (++ptr == cq->size)316ptr = 0;317}318PDBG("%s cq %p count %d\n", __func__, cq, *count);319}320321static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)322{323struct t4_swsqe *swsqe;324u16 ptr = wq->sq.cidx;325int count = wq->sq.in_use;326int unsignaled = 0;327328swsqe = &wq->sq.sw_sq[ptr];329while (count--)330if (!swsqe->signaled) {331if (++ptr == wq->sq.size)332ptr = 0;333swsqe = &wq->sq.sw_sq[ptr];334unsignaled++;335} else if (swsqe->complete) {336337/*338* Insert this completed cqe into the swcq.339*/340PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",341__func__, ptr, cq->sw_pidx);342swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));343cq->sw_queue[cq->sw_pidx] = swsqe->cqe;344t4_swcq_produce(cq);345swsqe->signaled = 0;346wq->sq.in_use -= unsignaled;347break;348} else349break;350}351352static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,353struct t4_cqe *read_cqe)354{355read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;356read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);357read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |358V_CQE_SWCQE(SW_CQE(hw_cqe)) |359V_CQE_OPCODE(FW_RI_READ_REQ) |360V_CQE_TYPE(1));361read_cqe->bits_type_ts = hw_cqe->bits_type_ts;362}363364/*365* Return a ptr to the next read wr in the SWSQ or NULL.366*/367static void advance_oldest_read(struct t4_wq *wq)368{369370u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;371372if (rptr == wq->sq.size)373rptr = 0;374while (rptr != wq->sq.pidx) {375wq->sq.oldest_read = &wq->sq.sw_sq[rptr];376377if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)378return;379if (++rptr == wq->sq.size)380rptr = 0;381}382wq->sq.oldest_read = NULL;383}384385/*386* poll_cq387*388* Caller must:389* check the validity of the first CQE,390* supply the wq assicated with the qpid.391*392* credit: cq credit to return to sge.393* cqe_flushed: 1 iff the CQE is flushed.394* cqe: copy of the polled CQE.395*396* return value:397* 0 CQE returned ok.398* -EAGAIN CQE skipped, try again.399* -EOVERFLOW CQ overflow detected.400*/401static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,402u8 *cqe_flushed, u64 *cookie, u32 *credit)403{404int ret = 0;405struct t4_cqe *hw_cqe, read_cqe;406407*cqe_flushed = 0;408*credit = 0;409ret = t4_next_cqe(cq, &hw_cqe);410if (ret)411return ret;412413PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"414" opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",415__func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),416CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),417CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),418CQE_WRID_LOW(hw_cqe));419420/*421* skip cqe's not affiliated with a QP.422*/423if (wq == NULL) {424ret = -EAGAIN;425goto skip_cqe;426}427428/*429* Gotta tweak READ completions:430* 1) the cqe doesn't contain the sq_wptr from the wr.431* 2) opcode not reflected from the wr.432* 3) read_len not reflected from the wr.433* 4) cq_type is RQ_TYPE not SQ_TYPE.434*/435if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {436437/*438* If this is an unsolicited read response, then the read439* was generated by the kernel driver as part of peer-2-peer440* connection setup. So ignore the completion.441*/442if (!wq->sq.oldest_read) {443if (CQE_STATUS(hw_cqe))444t4_set_wq_in_error(wq);445ret = -EAGAIN;446goto skip_cqe;447}448449/*450* Don't write to the HWCQ, so create a new read req CQE451* in local memory.452*/453create_read_req_cqe(wq, hw_cqe, &read_cqe);454hw_cqe = &read_cqe;455advance_oldest_read(wq);456}457458if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {459*cqe_flushed = t4_wq_in_error(wq);460t4_set_wq_in_error(wq);461goto proc_cqe;462}463464if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {465ret = -EAGAIN;466goto skip_cqe;467}468469/*470* RECV completion.471*/472if (RQ_TYPE(hw_cqe)) {473474/*475* HW only validates 4 bits of MSN. So we must validate that476* the MSN in the SEND is the next expected MSN. If its not,477* then we complete this with T4_ERR_MSN and mark the wq in478* error.479*/480481if (t4_rq_empty(wq)) {482t4_set_wq_in_error(wq);483ret = -EAGAIN;484goto skip_cqe;485}486if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {487t4_set_wq_in_error(wq);488hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));489goto proc_cqe;490}491goto proc_cqe;492}493494/*495* If we get here its a send completion.496*497* Handle out of order completion. These get stuffed498* in the SW SQ. Then the SW SQ is walked to move any499* now in-order completions into the SW CQ. This handles500* 2 cases:501* 1) reaping unsignaled WRs when the first subsequent502* signaled WR is completed.503* 2) out of order read completions.504*/505if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {506struct t4_swsqe *swsqe;507508PDBG("%s out of order completion going in sw_sq at idx %u\n",509__func__, CQE_WRID_SQ_IDX(hw_cqe));510swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];511swsqe->cqe = *hw_cqe;512swsqe->complete = 1;513ret = -EAGAIN;514goto flush_wq;515}516517proc_cqe:518*cqe = *hw_cqe;519520/*521* Reap the associated WR(s) that are freed up with this522* completion.523*/524if (SQ_TYPE(hw_cqe)) {525wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);526PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);527*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;528t4_sq_consume(wq);529} else {530PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);531*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;532BUG_ON(t4_rq_empty(wq));533t4_rq_consume(wq);534}535536flush_wq:537/*538* Flush any completed cqes that are now in-order.539*/540flush_completed_wrs(wq, cq);541542skip_cqe:543if (SW_CQE(hw_cqe)) {544PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",545__func__, cq, cq->cqid, cq->sw_cidx);546t4_swcq_consume(cq);547} else {548PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",549__func__, cq, cq->cqid, cq->cidx);550t4_hwcq_consume(cq);551}552return ret;553}554555/*556* Get one cq entry from c4iw and map it to openib.557*558* Returns:559* 0 cqe returned560* -ENODATA EMPTY;561* -EAGAIN caller must try again562* any other -errno fatal error563*/564static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)565{566struct c4iw_qp *qhp = NULL;567struct t4_cqe cqe = {0, 0}, *rd_cqe;568struct t4_wq *wq;569u32 credit = 0;570u8 cqe_flushed;571u64 cookie = 0;572int ret;573574ret = t4_next_cqe(&chp->cq, &rd_cqe);575576if (ret)577return ret;578579qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));580if (!qhp)581wq = NULL;582else {583spin_lock(&qhp->lock);584wq = &(qhp->wq);585}586ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);587if (ret)588goto out;589590wc->wr_id = cookie;591wc->qp = &qhp->ibqp;592wc->vendor_err = CQE_STATUS(&cqe);593wc->wc_flags = 0;594595PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "596"lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),597CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),598CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);599600if (CQE_TYPE(&cqe) == 0) {601if (!CQE_STATUS(&cqe))602wc->byte_len = CQE_LEN(&cqe);603else604wc->byte_len = 0;605wc->opcode = IB_WC_RECV;606if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||607CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {608wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);609wc->wc_flags |= IB_WC_WITH_INVALIDATE;610}611} else {612switch (CQE_OPCODE(&cqe)) {613case FW_RI_RDMA_WRITE:614wc->opcode = IB_WC_RDMA_WRITE;615break;616case FW_RI_READ_REQ:617wc->opcode = IB_WC_RDMA_READ;618wc->byte_len = CQE_LEN(&cqe);619break;620case FW_RI_SEND_WITH_INV:621case FW_RI_SEND_WITH_SE_INV:622wc->opcode = IB_WC_SEND;623wc->wc_flags |= IB_WC_WITH_INVALIDATE;624break;625case FW_RI_SEND:626case FW_RI_SEND_WITH_SE:627wc->opcode = IB_WC_SEND;628break;629case FW_RI_BIND_MW:630wc->opcode = IB_WC_BIND_MW;631break;632633case FW_RI_LOCAL_INV:634wc->opcode = IB_WC_LOCAL_INV;635break;636case FW_RI_FAST_REGISTER:637wc->opcode = IB_WC_FAST_REG_MR;638break;639default:640printk(KERN_ERR MOD "Unexpected opcode %d "641"in the CQE received for QPID=0x%0x\n",642CQE_OPCODE(&cqe), CQE_QPID(&cqe));643ret = -EINVAL;644goto out;645}646}647648if (cqe_flushed)649wc->status = IB_WC_WR_FLUSH_ERR;650else {651652switch (CQE_STATUS(&cqe)) {653case T4_ERR_SUCCESS:654wc->status = IB_WC_SUCCESS;655break;656case T4_ERR_STAG:657wc->status = IB_WC_LOC_ACCESS_ERR;658break;659case T4_ERR_PDID:660wc->status = IB_WC_LOC_PROT_ERR;661break;662case T4_ERR_QPID:663case T4_ERR_ACCESS:664wc->status = IB_WC_LOC_ACCESS_ERR;665break;666case T4_ERR_WRAP:667wc->status = IB_WC_GENERAL_ERR;668break;669case T4_ERR_BOUND:670wc->status = IB_WC_LOC_LEN_ERR;671break;672case T4_ERR_INVALIDATE_SHARED_MR:673case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:674wc->status = IB_WC_MW_BIND_ERR;675break;676case T4_ERR_CRC:677case T4_ERR_MARKER:678case T4_ERR_PDU_LEN_ERR:679case T4_ERR_OUT_OF_RQE:680case T4_ERR_DDP_VERSION:681case T4_ERR_RDMA_VERSION:682case T4_ERR_DDP_QUEUE_NUM:683case T4_ERR_MSN:684case T4_ERR_TBIT:685case T4_ERR_MO:686case T4_ERR_MSN_RANGE:687case T4_ERR_IRD_OVERFLOW:688case T4_ERR_OPCODE:689case T4_ERR_INTERNAL_ERR:690wc->status = IB_WC_FATAL_ERR;691break;692case T4_ERR_SWFLUSH:693wc->status = IB_WC_WR_FLUSH_ERR;694break;695default:696printk(KERN_ERR MOD697"Unexpected cqe_status 0x%x for QPID=0x%0x\n",698CQE_STATUS(&cqe), CQE_QPID(&cqe));699ret = -EINVAL;700}701}702out:703if (wq)704spin_unlock(&qhp->lock);705return ret;706}707708int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)709{710struct c4iw_cq *chp;711unsigned long flags;712int npolled;713int err = 0;714715chp = to_c4iw_cq(ibcq);716717spin_lock_irqsave(&chp->lock, flags);718for (npolled = 0; npolled < num_entries; ++npolled) {719do {720err = c4iw_poll_cq_one(chp, wc + npolled);721} while (err == -EAGAIN);722if (err)723break;724}725spin_unlock_irqrestore(&chp->lock, flags);726return !err || err == -ENODATA ? npolled : err;727}728729int c4iw_destroy_cq(struct ib_cq *ib_cq)730{731struct c4iw_cq *chp;732struct c4iw_ucontext *ucontext;733734PDBG("%s ib_cq %p\n", __func__, ib_cq);735chp = to_c4iw_cq(ib_cq);736737remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);738atomic_dec(&chp->refcnt);739wait_event(chp->wait, !atomic_read(&chp->refcnt));740741ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)742: NULL;743destroy_cq(&chp->rhp->rdev, &chp->cq,744ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);745kfree(chp);746return 0;747}748749struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,750int vector, struct ib_ucontext *ib_context,751struct ib_udata *udata)752{753struct c4iw_dev *rhp;754struct c4iw_cq *chp;755struct c4iw_create_cq_resp uresp;756struct c4iw_ucontext *ucontext = NULL;757int ret;758size_t memsize, hwentries;759struct c4iw_mm_entry *mm, *mm2;760761PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);762763rhp = to_c4iw_dev(ibdev);764765chp = kzalloc(sizeof(*chp), GFP_KERNEL);766if (!chp)767return ERR_PTR(-ENOMEM);768769if (ib_context)770ucontext = to_c4iw_ucontext(ib_context);771772/* account for the status page. */773entries++;774775/* IQ needs one extra entry to differentiate full vs empty. */776entries++;777778/*779* entries must be multiple of 16 for HW.780*/781entries = roundup(entries, 16);782783/*784* Make actual HW queue 2x to avoid cdix_inc overflows.785*/786hwentries = entries * 2;787788/*789* Make HW queue at least 64 entries so GTS updates aren't too790* frequent.791*/792if (hwentries < 64)793hwentries = 64;794795memsize = hwentries * sizeof *chp->cq.queue;796797/*798* memsize must be a multiple of the page size if its a user cq.799*/800if (ucontext) {801memsize = roundup(memsize, PAGE_SIZE);802hwentries = memsize / sizeof *chp->cq.queue;803while (hwentries > T4_MAX_IQ_SIZE) {804memsize -= PAGE_SIZE;805hwentries = memsize / sizeof *chp->cq.queue;806}807}808chp->cq.size = hwentries;809chp->cq.memsize = memsize;810811ret = create_cq(&rhp->rdev, &chp->cq,812ucontext ? &ucontext->uctx : &rhp->rdev.uctx);813if (ret)814goto err1;815816chp->rhp = rhp;817chp->cq.size--; /* status page */818chp->ibcq.cqe = entries - 2;819spin_lock_init(&chp->lock);820atomic_set(&chp->refcnt, 1);821init_waitqueue_head(&chp->wait);822ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);823if (ret)824goto err2;825826if (ucontext) {827mm = kmalloc(sizeof *mm, GFP_KERNEL);828if (!mm)829goto err3;830mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);831if (!mm2)832goto err4;833834uresp.qid_mask = rhp->rdev.cqmask;835uresp.cqid = chp->cq.cqid;836uresp.size = chp->cq.size;837uresp.memsize = chp->cq.memsize;838spin_lock(&ucontext->mmap_lock);839uresp.key = ucontext->key;840ucontext->key += PAGE_SIZE;841uresp.gts_key = ucontext->key;842ucontext->key += PAGE_SIZE;843spin_unlock(&ucontext->mmap_lock);844ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);845if (ret)846goto err5;847848mm->key = uresp.key;849mm->addr = virt_to_phys(chp->cq.queue);850mm->len = chp->cq.memsize;851insert_mmap(ucontext, mm);852853mm2->key = uresp.gts_key;854mm2->addr = chp->cq.ugts;855mm2->len = PAGE_SIZE;856insert_mmap(ucontext, mm2);857}858PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",859__func__, chp->cq.cqid, chp, chp->cq.size,860chp->cq.memsize,861(unsigned long long) chp->cq.dma_addr);862return &chp->ibcq;863err5:864kfree(mm2);865err4:866kfree(mm);867err3:868remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);869err2:870destroy_cq(&chp->rhp->rdev, &chp->cq,871ucontext ? &ucontext->uctx : &rhp->rdev.uctx);872err1:873kfree(chp);874return ERR_PTR(ret);875}876877int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)878{879return -ENOSYS;880}881882int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)883{884struct c4iw_cq *chp;885int ret;886unsigned long flag;887888chp = to_c4iw_cq(ibcq);889spin_lock_irqsave(&chp->lock, flag);890ret = t4_arm_cq(&chp->cq,891(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);892spin_unlock_irqrestore(&chp->lock, flag);893if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))894ret = 0;895return ret;896}897898899