Path: blob/master/drivers/infiniband/hw/cxgb3/cxio_wr.h
15112 views
/*1* Copyright (c) 2006 Chelsio, Inc. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/31#ifndef __CXIO_WR_H__32#define __CXIO_WR_H__3334#include <asm/io.h>35#include <linux/pci.h>36#include <linux/timer.h>37#include "firmware_exports.h"3839#define T3_MAX_SGE 440#define T3_MAX_INLINE 6441#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)42#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)43#define T3_STAG0_PAGE_SHIFT 154445#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))46#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \47((rptr)!=(wptr)) )48#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))49#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))50#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))51#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))5253static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)54{55writel(((1<<31) | qpid), doorbell);56}5758#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))5960enum t3_wr_flags {61T3_COMPLETION_FLAG = 0x01,62T3_NOTIFY_FLAG = 0x02,63T3_SOLICITED_EVENT_FLAG = 0x04,64T3_READ_FENCE_FLAG = 0x08,65T3_LOCAL_FENCE_FLAG = 0x1066} __attribute__ ((packed));6768enum t3_wr_opcode {69T3_WR_BP = FW_WROPCODE_RI_BYPASS,70T3_WR_SEND = FW_WROPCODE_RI_SEND,71T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,72T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,73T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,74T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,75T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,76T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,77T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,78T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR79} __attribute__ ((packed));8081enum t3_rdma_opcode {82T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */83T3_READ_REQ,84T3_READ_RESP,85T3_SEND,86T3_SEND_WITH_INV,87T3_SEND_WITH_SE,88T3_SEND_WITH_SE_INV,89T3_TERMINATE,90T3_RDMA_INIT, /* CHELSIO RI specific ... */91T3_BIND_MW,92T3_FAST_REGISTER,93T3_LOCAL_INV,94T3_QP_MOD,95T3_BYPASS,96T3_RDMA_READ_REQ_WITH_INV,97} __attribute__ ((packed));9899static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)100{101switch (wrop) {102case T3_WR_BP: return T3_BYPASS;103case T3_WR_SEND: return T3_SEND;104case T3_WR_WRITE: return T3_RDMA_WRITE;105case T3_WR_READ: return T3_READ_REQ;106case T3_WR_INV_STAG: return T3_LOCAL_INV;107case T3_WR_BIND: return T3_BIND_MW;108case T3_WR_INIT: return T3_RDMA_INIT;109case T3_WR_QP_MOD: return T3_QP_MOD;110case T3_WR_FASTREG: return T3_FAST_REGISTER;111default: break;112}113return -1;114}115116117/* Work request id */118union t3_wrid {119struct {120u32 hi;121u32 low;122} id0;123u64 id1;124};125126#define WRID(wrid) (wrid.id1)127#define WRID_GEN(wrid) (wrid.id0.wr_gen)128#define WRID_IDX(wrid) (wrid.id0.wr_idx)129#define WRID_LO(wrid) (wrid.id0.wr_lo)130131struct fw_riwrh {132__be32 op_seop_flags;133__be32 gen_tid_len;134};135136#define S_FW_RIWR_OP 24137#define M_FW_RIWR_OP 0xff138#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)139#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)140141#define S_FW_RIWR_SOPEOP 22142#define M_FW_RIWR_SOPEOP 0x3143#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)144145#define S_FW_RIWR_FLAGS 8146#define M_FW_RIWR_FLAGS 0x3fffff147#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)148#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)149150#define S_FW_RIWR_TID 8151#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)152153#define S_FW_RIWR_LEN 0154#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)155156#define S_FW_RIWR_GEN 31157#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)158159struct t3_sge {160__be32 stag;161__be32 len;162__be64 to;163};164165/* If num_sgle is zero, flit 5+ contains immediate data.*/166struct t3_send_wr {167struct fw_riwrh wrh; /* 0 */168union t3_wrid wrid; /* 1 */169170u8 rdmaop; /* 2 */171u8 reserved[3];172__be32 rem_stag;173__be32 plen; /* 3 */174__be32 num_sgle;175struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */176};177178#define T3_MAX_FASTREG_DEPTH 10179#define T3_MAX_FASTREG_FRAG 10180181struct t3_fastreg_wr {182struct fw_riwrh wrh; /* 0 */183union t3_wrid wrid; /* 1 */184__be32 stag; /* 2 */185__be32 len;186__be32 va_base_hi; /* 3 */187__be32 va_base_lo_fbo;188__be32 page_type_perms; /* 4 */189__be32 reserved1;190__be64 pbl_addrs[0]; /* 5+ */191};192193/*194* If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.195*/196struct t3_pbl_frag {197struct fw_riwrh wrh; /* 0 */198__be64 pbl_addrs[14]; /* 1..14 */199};200201#define S_FR_PAGE_COUNT 24202#define M_FR_PAGE_COUNT 0xff203#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)204#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)205206#define S_FR_PAGE_SIZE 16207#define M_FR_PAGE_SIZE 0x1f208#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)209#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)210211#define S_FR_TYPE 8212#define M_FR_TYPE 0x1213#define V_FR_TYPE(x) ((x) << S_FR_TYPE)214#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)215216#define S_FR_PERMS 0217#define M_FR_PERMS 0xff218#define V_FR_PERMS(x) ((x) << S_FR_PERMS)219#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)220221struct t3_local_inv_wr {222struct fw_riwrh wrh; /* 0 */223union t3_wrid wrid; /* 1 */224__be32 stag; /* 2 */225__be32 reserved;226};227228struct t3_rdma_write_wr {229struct fw_riwrh wrh; /* 0 */230union t3_wrid wrid; /* 1 */231u8 rdmaop; /* 2 */232u8 reserved[3];233__be32 stag_sink;234__be64 to_sink; /* 3 */235__be32 plen; /* 4 */236__be32 num_sgle;237struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */238};239240struct t3_rdma_read_wr {241struct fw_riwrh wrh; /* 0 */242union t3_wrid wrid; /* 1 */243u8 rdmaop; /* 2 */244u8 local_inv;245u8 reserved[2];246__be32 rem_stag;247__be64 rem_to; /* 3 */248__be32 local_stag; /* 4 */249__be32 local_len;250__be64 local_to; /* 5 */251};252253struct t3_bind_mw_wr {254struct fw_riwrh wrh; /* 0 */255union t3_wrid wrid; /* 1 */256u16 reserved; /* 2 */257u8 type;258u8 perms;259__be32 mr_stag;260__be32 mw_stag; /* 3 */261__be32 mw_len;262__be64 mw_va; /* 4 */263__be32 mr_pbl_addr; /* 5 */264u8 reserved2[3];265u8 mr_pagesz;266};267268struct t3_receive_wr {269struct fw_riwrh wrh; /* 0 */270union t3_wrid wrid; /* 1 */271u8 pagesz[T3_MAX_SGE];272__be32 num_sgle; /* 2 */273struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */274__be32 pbl_addr[T3_MAX_SGE];275};276277struct t3_bypass_wr {278struct fw_riwrh wrh;279union t3_wrid wrid; /* 1 */280};281282struct t3_modify_qp_wr {283struct fw_riwrh wrh; /* 0 */284union t3_wrid wrid; /* 1 */285__be32 flags; /* 2 */286__be32 quiesce; /* 2 */287__be32 max_ird; /* 3 */288__be32 max_ord; /* 3 */289__be64 sge_cmd; /* 4 */290__be64 ctx1; /* 5 */291__be64 ctx0; /* 6 */292};293294enum t3_modify_qp_flags {295MODQP_QUIESCE = 0x01,296MODQP_MAX_IRD = 0x02,297MODQP_MAX_ORD = 0x04,298MODQP_WRITE_EC = 0x08,299MODQP_READ_EC = 0x10,300};301302303enum t3_mpa_attrs {304uP_RI_MPA_RX_MARKER_ENABLE = 0x1,305uP_RI_MPA_TX_MARKER_ENABLE = 0x2,306uP_RI_MPA_CRC_ENABLE = 0x4,307uP_RI_MPA_IETF_ENABLE = 0x8308} __attribute__ ((packed));309310enum t3_qp_caps {311uP_RI_QP_RDMA_READ_ENABLE = 0x01,312uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,313uP_RI_QP_BIND_ENABLE = 0x04,314uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,315uP_RI_QP_STAG0_ENABLE = 0x10316} __attribute__ ((packed));317318enum rdma_init_rtr_types {319RTR_READ = 1,320RTR_WRITE = 2,321RTR_SEND = 3,322};323324#define S_RTR_TYPE 2325#define M_RTR_TYPE 0x3326#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)327#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)328329#define S_CHAN 4330#define M_CHAN 0x3331#define V_CHAN(x) ((x) << S_CHAN)332#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)333334struct t3_rdma_init_attr {335u32 tid;336u32 qpid;337u32 pdid;338u32 scqid;339u32 rcqid;340u32 rq_addr;341u32 rq_size;342enum t3_mpa_attrs mpaattrs;343enum t3_qp_caps qpcaps;344u16 tcp_emss;345u32 ord;346u32 ird;347u64 qp_dma_addr;348u32 qp_dma_size;349enum rdma_init_rtr_types rtr_type;350u16 flags;351u16 rqe_count;352u32 irs;353u32 chan;354};355356struct t3_rdma_init_wr {357struct fw_riwrh wrh; /* 0 */358union t3_wrid wrid; /* 1 */359__be32 qpid; /* 2 */360__be32 pdid;361__be32 scqid; /* 3 */362__be32 rcqid;363__be32 rq_addr; /* 4 */364__be32 rq_size;365u8 mpaattrs; /* 5 */366u8 qpcaps;367__be16 ulpdu_size;368__be16 flags_rtr_type;369__be16 rqe_count;370__be32 ord; /* 6 */371__be32 ird;372__be64 qp_dma_addr; /* 7 */373__be32 qp_dma_size; /* 8 */374__be32 irs;375};376377struct t3_genbit {378u64 flit[15];379__be64 genbit;380};381382struct t3_wq_in_err {383u64 flit[13];384u64 err;385};386387enum rdma_init_wr_flags {388MPA_INITIATOR = (1<<0),389PRIV_QP = (1<<1),390};391392union t3_wr {393struct t3_send_wr send;394struct t3_rdma_write_wr write;395struct t3_rdma_read_wr read;396struct t3_receive_wr recv;397struct t3_fastreg_wr fastreg;398struct t3_pbl_frag pbl_frag;399struct t3_local_inv_wr local_inv;400struct t3_bind_mw_wr bind;401struct t3_bypass_wr bypass;402struct t3_rdma_init_wr init;403struct t3_modify_qp_wr qp_mod;404struct t3_genbit genbit;405struct t3_wq_in_err wq_in_err;406__be64 flit[16];407};408409#define T3_SQ_CQE_FLIT 13410#define T3_SQ_COOKIE_FLIT 14411412#define T3_RQ_COOKIE_FLIT 13413#define T3_RQ_CQE_FLIT 14414415static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)416{417return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));418}419420enum t3_wr_hdr_bits {421T3_EOP = 1,422T3_SOP = 2,423T3_SOPEOP = T3_EOP|T3_SOP,424};425426static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,427enum t3_wr_flags flags, u8 genbit, u32 tid,428u8 len, u8 sopeop)429{430wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |431V_FW_RIWR_SOPEOP(sopeop) |432V_FW_RIWR_FLAGS(flags));433wmb();434wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |435V_FW_RIWR_TID(tid) |436V_FW_RIWR_LEN(len));437/* 2nd gen bit... */438((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);439}440441/*442* T3 ULP2_TX commands443*/444enum t3_utx_mem_op {445T3_UTX_MEM_READ = 2,446T3_UTX_MEM_WRITE = 3447};448449/* T3 MC7 RDMA TPT entry format */450451enum tpt_mem_type {452TPT_NON_SHARED_MR = 0x0,453TPT_SHARED_MR = 0x1,454TPT_MW = 0x2,455TPT_MW_RELAXED_PROTECTION = 0x3456};457458enum tpt_addr_type {459TPT_ZBTO = 0,460TPT_VATO = 1461};462463enum tpt_mem_perm {464TPT_MW_BIND = 0x10,465TPT_LOCAL_READ = 0x8,466TPT_LOCAL_WRITE = 0x4,467TPT_REMOTE_READ = 0x2,468TPT_REMOTE_WRITE = 0x1469};470471struct tpt_entry {472__be32 valid_stag_pdid;473__be32 flags_pagesize_qpid;474475__be32 rsvd_pbl_addr;476__be32 len;477__be32 va_hi;478__be32 va_low_or_fbo;479480__be32 rsvd_bind_cnt_or_pstag;481__be32 rsvd_pbl_size;482};483484#define S_TPT_VALID 31485#define V_TPT_VALID(x) ((x) << S_TPT_VALID)486#define F_TPT_VALID V_TPT_VALID(1U)487488#define S_TPT_STAG_KEY 23489#define M_TPT_STAG_KEY 0xFF490#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)491#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)492493#define S_TPT_STAG_STATE 22494#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)495#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)496497#define S_TPT_STAG_TYPE 20498#define M_TPT_STAG_TYPE 0x3499#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)500#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)501502#define S_TPT_PDID 0503#define M_TPT_PDID 0xFFFFF504#define V_TPT_PDID(x) ((x) << S_TPT_PDID)505#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)506507#define S_TPT_PERM 28508#define M_TPT_PERM 0xF509#define V_TPT_PERM(x) ((x) << S_TPT_PERM)510#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)511512#define S_TPT_REM_INV_DIS 27513#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)514#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)515516#define S_TPT_ADDR_TYPE 26517#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)518#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)519520#define S_TPT_MW_BIND_ENABLE 25521#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)522#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)523524#define S_TPT_PAGE_SIZE 20525#define M_TPT_PAGE_SIZE 0x1F526#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)527#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)528529#define S_TPT_PBL_ADDR 0530#define M_TPT_PBL_ADDR 0x1FFFFFFF531#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)532#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)533534#define S_TPT_QPID 0535#define M_TPT_QPID 0xFFFFF536#define V_TPT_QPID(x) ((x) << S_TPT_QPID)537#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)538539#define S_TPT_PSTAG 0540#define M_TPT_PSTAG 0xFFFFFF541#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)542#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)543544#define S_TPT_PBL_SIZE 0545#define M_TPT_PBL_SIZE 0xFFFFF546#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)547#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)548549/*550* CQE defs551*/552struct t3_cqe {553__be32 header;554__be32 len;555union {556struct {557__be32 stag;558__be32 msn;559} rcqe;560struct {561u32 wrid_hi;562u32 wrid_low;563} scqe;564} u;565};566567#define S_CQE_OOO 31568#define M_CQE_OOO 0x1569#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)570#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)571572#define S_CQE_QPID 12573#define M_CQE_QPID 0x7FFFF574#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)575#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)576577#define S_CQE_SWCQE 11578#define M_CQE_SWCQE 0x1579#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)580#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)581582#define S_CQE_GENBIT 10583#define M_CQE_GENBIT 0x1584#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)585#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)586587#define S_CQE_STATUS 5588#define M_CQE_STATUS 0x1F589#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)590#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)591592#define S_CQE_TYPE 4593#define M_CQE_TYPE 0x1594#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)595#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)596597#define S_CQE_OPCODE 0598#define M_CQE_OPCODE 0xF599#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)600#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)601602#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))603#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))604#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))605#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))606#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))607#define SQ_TYPE(x) (CQE_TYPE((x)))608#define RQ_TYPE(x) (!CQE_TYPE((x)))609#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))610#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))611612#define CQE_SEND_OPCODE(x)( \613(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \614(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \615(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \616(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))617618#define CQE_LEN(x) (be32_to_cpu((x).len))619620/* used for RQ completion processing */621#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))622#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))623624/* used for SQ completion processing */625#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)626#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)627628/* generic accessor macros */629#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)630#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)631632#define TPT_ERR_SUCCESS 0x0633#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */634/* STAG is offlimt, being 0, */635/* or STAG_key mismatch */636#define TPT_ERR_PDID 0x2 /* PDID mismatch */637#define TPT_ERR_QPID 0x3 /* QPID mismatch */638#define TPT_ERR_ACCESS 0x4 /* Invalid access right */639#define TPT_ERR_WRAP 0x5 /* Wrap error */640#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */641#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */642/* shared memory region */643#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */644/* shared memory region */645#define TPT_ERR_ECC 0x9 /* ECC error detected */646#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */647/* reading PSTAG for a MW */648/* Invalidate */649#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */650/* software error */651#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */652#define TPT_ERR_CRC 0x10 /* CRC error */653#define TPT_ERR_MARKER 0x11 /* Marker error */654#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */655#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */656#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */657#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */658#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */659#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */660#define TPT_ERR_MSN 0x18 /* MSN error */661#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */662#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */663/* or READ_REQ */664#define TPT_ERR_MSN_GAP 0x1B665#define TPT_ERR_MSN_RANGE 0x1C666#define TPT_ERR_IRD_OVERFLOW 0x1D667#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */668/* software error */669#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */670/* mismatch) */671672struct t3_swsq {673__u64 wr_id;674struct t3_cqe cqe;675__u32 sq_wptr;676__be32 read_len;677int opcode;678int complete;679int signaled;680};681682struct t3_swrq {683__u64 wr_id;684__u32 pbl_addr;685};686687/*688* A T3 WQ implements both the SQ and RQ.689*/690struct t3_wq {691union t3_wr *queue; /* DMA accessible memory */692dma_addr_t dma_addr; /* DMA address for HW */693DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */694u32 error; /* 1 once we go to ERROR */695u32 qpid;696u32 wptr; /* idx to next available WR slot */697u32 size_log2; /* total wq size */698struct t3_swsq *sq; /* SW SQ */699struct t3_swsq *oldest_read; /* tracks oldest pending read */700u32 sq_wptr; /* sq_wptr - sq_rptr == count of */701u32 sq_rptr; /* pending wrs */702u32 sq_size_log2; /* sq size */703struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */704u32 rq_wptr; /* rq_wptr - rq_rptr == count of */705u32 rq_rptr; /* pending wrs */706struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */707u32 rq_size_log2; /* rq size */708u32 rq_addr; /* rq adapter address */709void __iomem *doorbell; /* kernel db */710u64 udb; /* user db if any */711struct cxio_rdev *rdev;712};713714struct t3_cq {715u32 cqid;716u32 rptr;717u32 wptr;718u32 size_log2;719dma_addr_t dma_addr;720DEFINE_DMA_UNMAP_ADDR(mapping);721struct t3_cqe *queue;722struct t3_cqe *sw_queue;723u32 sw_rptr;724u32 sw_wptr;725};726727#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \728CQE_GENBIT(*cqe))729730struct t3_cq_status_page {731u32 cq_err;732};733734static inline int cxio_cq_in_error(struct t3_cq *cq)735{736return ((struct t3_cq_status_page *)737&cq->queue[1 << cq->size_log2])->cq_err;738}739740static inline void cxio_set_cq_in_error(struct t3_cq *cq)741{742((struct t3_cq_status_page *)743&cq->queue[1 << cq->size_log2])->cq_err = 1;744}745746static inline void cxio_set_wq_in_error(struct t3_wq *wq)747{748wq->queue->wq_in_err.err |= 1;749}750751static inline void cxio_disable_wq_db(struct t3_wq *wq)752{753wq->queue->wq_in_err.err |= 2;754}755756static inline void cxio_enable_wq_db(struct t3_wq *wq)757{758wq->queue->wq_in_err.err &= ~2;759}760761static inline int cxio_wq_db_enabled(struct t3_wq *wq)762{763return !(wq->queue->wq_in_err.err & 2);764}765766static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)767{768struct t3_cqe *cqe;769770cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));771if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))772return cqe;773return NULL;774}775776static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)777{778struct t3_cqe *cqe;779780if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {781cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));782return cqe;783}784return NULL;785}786787static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)788{789struct t3_cqe *cqe;790791if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {792cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));793return cqe;794}795cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));796if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))797return cqe;798return NULL;799}800801#endif802803804