Path: blob/master/drivers/infiniband/hw/qib/qib_verbs.h
15112 views
/*1* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.2* All rights reserved.3* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/3334#ifndef QIB_VERBS_H35#define QIB_VERBS_H3637#include <linux/types.h>38#include <linux/spinlock.h>39#include <linux/kernel.h>40#include <linux/interrupt.h>41#include <linux/kref.h>42#include <linux/workqueue.h>43#include <rdma/ib_pack.h>44#include <rdma/ib_user_verbs.h>4546struct qib_ctxtdata;47struct qib_pportdata;48struct qib_devdata;49struct qib_verbs_txreq;5051#define QIB_MAX_RDMA_ATOMIC 1652#define QIB_GUIDS_PER_PORT 55354#define QPN_MAX (1 << 24)55#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)5657/*58* Increment this value if any changes that break userspace ABI59* compatibility are made.60*/61#define QIB_UVERBS_ABI_VERSION 26263/*64* Define an ib_cq_notify value that is not valid so we know when CQ65* notifications are armed.66*/67#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)6869#define IB_SEQ_NAK (3 << 29)7071/* AETH NAK opcode values */72#define IB_RNR_NAK 0x2073#define IB_NAK_PSN_ERROR 0x6074#define IB_NAK_INVALID_REQUEST 0x6175#define IB_NAK_REMOTE_ACCESS_ERROR 0x6276#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x6377#define IB_NAK_INVALID_RD_REQUEST 0x647879/* Flags for checking QP state (see ib_qib_state_ops[]) */80#define QIB_POST_SEND_OK 0x0181#define QIB_POST_RECV_OK 0x0282#define QIB_PROCESS_RECV_OK 0x0483#define QIB_PROCESS_SEND_OK 0x0884#define QIB_PROCESS_NEXT_SEND_OK 0x1085#define QIB_FLUSH_SEND 0x2086#define QIB_FLUSH_RECV 0x4087#define QIB_PROCESS_OR_FLUSH_SEND \88(QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)8990/* IB Performance Manager status values */91#define IB_PMA_SAMPLE_STATUS_DONE 0x0092#define IB_PMA_SAMPLE_STATUS_STARTED 0x0193#define IB_PMA_SAMPLE_STATUS_RUNNING 0x029495/* Mandatory IB performance counter select values. */96#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)97#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)98#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)99#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)100#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)101102#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)103104#define IB_BTH_REQ_ACK (1 << 31)105#define IB_BTH_SOLICITED (1 << 23)106#define IB_BTH_MIG_REQ (1 << 22)107108/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */109#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)110111#define IB_GRH_VERSION 6112#define IB_GRH_VERSION_MASK 0xF113#define IB_GRH_VERSION_SHIFT 28114#define IB_GRH_TCLASS_MASK 0xFF115#define IB_GRH_TCLASS_SHIFT 20116#define IB_GRH_FLOW_MASK 0xFFFFF117#define IB_GRH_FLOW_SHIFT 0118#define IB_GRH_NEXT_HDR 0x1B119120#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)121122/* Values for set/get portinfo VLCap OperationalVLs */123#define IB_VL_VL0 1124#define IB_VL_VL0_1 2125#define IB_VL_VL0_3 3126#define IB_VL_VL0_7 4127#define IB_VL_VL0_14 5128129static inline int qib_num_vls(int vls)130{131switch (vls) {132default:133case IB_VL_VL0:134return 1;135case IB_VL_VL0_1:136return 2;137case IB_VL_VL0_3:138return 4;139case IB_VL_VL0_7:140return 8;141case IB_VL_VL0_14:142return 15;143}144}145146struct ib_reth {147__be64 vaddr;148__be32 rkey;149__be32 length;150} __attribute__ ((packed));151152struct ib_atomic_eth {153__be32 vaddr[2]; /* unaligned so access as 2 32-bit words */154__be32 rkey;155__be64 swap_data;156__be64 compare_data;157} __attribute__ ((packed));158159struct qib_other_headers {160__be32 bth[3];161union {162struct {163__be32 deth[2];164__be32 imm_data;165} ud;166struct {167struct ib_reth reth;168__be32 imm_data;169} rc;170struct {171__be32 aeth;172__be32 atomic_ack_eth[2];173} at;174__be32 imm_data;175__be32 aeth;176struct ib_atomic_eth atomic_eth;177} u;178} __attribute__ ((packed));179180/*181* Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes182* long (72 w/ imm_data). Only the first 56 bytes of the IB header183* will be in the eager header buffer. The remaining 12 or 16 bytes184* are in the data buffer.185*/186struct qib_ib_header {187__be16 lrh[4];188union {189struct {190struct ib_grh grh;191struct qib_other_headers oth;192} l;193struct qib_other_headers oth;194} u;195} __attribute__ ((packed));196197struct qib_pio_header {198__le32 pbc[2];199struct qib_ib_header hdr;200} __attribute__ ((packed));201202/*203* There is one struct qib_mcast for each multicast GID.204* All attached QPs are then stored as a list of205* struct qib_mcast_qp.206*/207struct qib_mcast_qp {208struct list_head list;209struct qib_qp *qp;210};211212struct qib_mcast {213struct rb_node rb_node;214union ib_gid mgid;215struct list_head qp_list;216wait_queue_head_t wait;217atomic_t refcount;218int n_attached;219};220221/* Protection domain */222struct qib_pd {223struct ib_pd ibpd;224int user; /* non-zero if created from user space */225};226227/* Address Handle */228struct qib_ah {229struct ib_ah ibah;230struct ib_ah_attr attr;231atomic_t refcount;232};233234/*235* This structure is used by qib_mmap() to validate an offset236* when an mmap() request is made. The vm_area_struct then uses237* this as its vm_private_data.238*/239struct qib_mmap_info {240struct list_head pending_mmaps;241struct ib_ucontext *context;242void *obj;243__u64 offset;244struct kref ref;245unsigned size;246};247248/*249* This structure is used to contain the head pointer, tail pointer,250* and completion queue entries as a single memory allocation so251* it can be mmap'ed into user space.252*/253struct qib_cq_wc {254u32 head; /* index of next entry to fill */255u32 tail; /* index of next ib_poll_cq() entry */256union {257/* these are actually size ibcq.cqe + 1 */258struct ib_uverbs_wc uqueue[0];259struct ib_wc kqueue[0];260};261};262263/*264* The completion queue structure.265*/266struct qib_cq {267struct ib_cq ibcq;268struct work_struct comptask;269spinlock_t lock; /* protect changes in this struct */270u8 notify;271u8 triggered;272struct qib_cq_wc *queue;273struct qib_mmap_info *ip;274};275276/*277* A segment is a linear region of low physical memory.278* XXX Maybe we should use phys addr here and kmap()/kunmap().279* Used by the verbs layer.280*/281struct qib_seg {282void *vaddr;283size_t length;284};285286/* The number of qib_segs that fit in a page. */287#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))288289struct qib_segarray {290struct qib_seg segs[QIB_SEGSZ];291};292293struct qib_mregion {294struct ib_pd *pd; /* shares refcnt of ibmr.pd */295u64 user_base; /* User's address for this region */296u64 iova; /* IB start address of this region */297size_t length;298u32 lkey;299u32 offset; /* offset (bytes) to start of region */300int access_flags;301u32 max_segs; /* number of qib_segs in all the arrays */302u32 mapsz; /* size of the map array */303u8 page_shift; /* 0 - non unform/non powerof2 sizes */304atomic_t refcount;305struct qib_segarray *map[0]; /* the segments */306};307308/*309* These keep track of the copy progress within a memory region.310* Used by the verbs layer.311*/312struct qib_sge {313struct qib_mregion *mr;314void *vaddr; /* kernel virtual address of segment */315u32 sge_length; /* length of the SGE */316u32 length; /* remaining length of the segment */317u16 m; /* current index: mr->map[m] */318u16 n; /* current index: mr->map[m]->segs[n] */319};320321/* Memory region */322struct qib_mr {323struct ib_mr ibmr;324struct ib_umem *umem;325struct qib_mregion mr; /* must be last */326};327328/*329* Send work request queue entry.330* The size of the sg_list is determined when the QP is created and stored331* in qp->s_max_sge.332*/333struct qib_swqe {334struct ib_send_wr wr; /* don't use wr.sg_list */335u32 psn; /* first packet sequence number */336u32 lpsn; /* last packet sequence number */337u32 ssn; /* send sequence number */338u32 length; /* total length of data in sg_list */339struct qib_sge sg_list[0];340};341342/*343* Receive work request queue entry.344* The size of the sg_list is determined when the QP (or SRQ) is created345* and stored in qp->r_rq.max_sge (or srq->rq.max_sge).346*/347struct qib_rwqe {348u64 wr_id;349u8 num_sge;350struct ib_sge sg_list[0];351};352353/*354* This structure is used to contain the head pointer, tail pointer,355* and receive work queue entries as a single memory allocation so356* it can be mmap'ed into user space.357* Note that the wq array elements are variable size so you can't358* just index into the array to get the N'th element;359* use get_rwqe_ptr() instead.360*/361struct qib_rwq {362u32 head; /* new work requests posted to the head */363u32 tail; /* receives pull requests from here. */364struct qib_rwqe wq[0];365};366367struct qib_rq {368struct qib_rwq *wq;369spinlock_t lock; /* protect changes in this struct */370u32 size; /* size of RWQE array */371u8 max_sge;372};373374struct qib_srq {375struct ib_srq ibsrq;376struct qib_rq rq;377struct qib_mmap_info *ip;378/* send signal when number of RWQEs < limit */379u32 limit;380};381382struct qib_sge_state {383struct qib_sge *sg_list; /* next SGE to be used if any */384struct qib_sge sge; /* progress state for the current SGE */385u32 total_len;386u8 num_sge;387};388389/*390* This structure holds the information that the send tasklet needs391* to send a RDMA read response or atomic operation.392*/393struct qib_ack_entry {394u8 opcode;395u8 sent;396u32 psn;397u32 lpsn;398union {399struct qib_sge rdma_sge;400u64 atomic_data;401};402};403404/*405* Variables prefixed with s_ are for the requester (sender).406* Variables prefixed with r_ are for the responder (receiver).407* Variables prefixed with ack_ are for responder replies.408*409* Common variables are protected by both r_rq.lock and s_lock in that order410* which only happens in modify_qp() or changing the QP 'state'.411*/412struct qib_qp {413struct ib_qp ibqp;414struct qib_qp *next; /* link list for QPN hash table */415struct qib_qp *timer_next; /* link list for qib_ib_timer() */416struct list_head iowait; /* link for wait PIO buf */417struct list_head rspwait; /* link for waititing to respond */418struct ib_ah_attr remote_ah_attr;419struct ib_ah_attr alt_ah_attr;420struct qib_ib_header s_hdr; /* next packet header to send */421atomic_t refcount;422wait_queue_head_t wait;423wait_queue_head_t wait_dma;424struct timer_list s_timer;425struct work_struct s_work;426struct qib_mmap_info *ip;427struct qib_sge_state *s_cur_sge;428struct qib_verbs_txreq *s_tx;429struct qib_mregion *s_rdma_mr;430struct qib_sge_state s_sge; /* current send request data */431struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];432struct qib_sge_state s_ack_rdma_sge;433struct qib_sge_state s_rdma_read_sge;434struct qib_sge_state r_sge; /* current receive data */435spinlock_t r_lock; /* used for APM */436spinlock_t s_lock;437atomic_t s_dma_busy;438u32 s_flags;439u32 s_cur_size; /* size of send packet in bytes */440u32 s_len; /* total length of s_sge */441u32 s_rdma_read_len; /* total length of s_rdma_read_sge */442u32 s_next_psn; /* PSN for next request */443u32 s_last_psn; /* last response PSN processed */444u32 s_sending_psn; /* lowest PSN that is being sent */445u32 s_sending_hpsn; /* highest PSN that is being sent */446u32 s_psn; /* current packet sequence number */447u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */448u32 s_ack_psn; /* PSN for acking sends and RDMA writes */449u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */450u32 r_ack_psn; /* PSN for next ACK or atomic ACK */451u64 r_wr_id; /* ID for current receive WQE */452unsigned long r_aflags;453u32 r_len; /* total length of r_sge */454u32 r_rcv_len; /* receive data len processed */455u32 r_psn; /* expected rcv packet sequence number */456u32 r_msn; /* message sequence number */457u16 s_hdrwords; /* size of s_hdr in 32 bit words */458u16 s_rdma_ack_cnt;459u8 state; /* QP state */460u8 s_state; /* opcode of last packet sent */461u8 s_ack_state; /* opcode of packet to ACK */462u8 s_nak_state; /* non-zero if NAK is pending */463u8 r_state; /* opcode of last packet received */464u8 r_nak_state; /* non-zero if NAK is pending */465u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */466u8 r_flags;467u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */468u8 r_head_ack_queue; /* index into s_ack_queue[] */469u8 qp_access_flags;470u8 s_max_sge; /* size of s_wq->sg_list */471u8 s_retry_cnt; /* number of times to retry */472u8 s_rnr_retry_cnt;473u8 s_retry; /* requester retry counter */474u8 s_rnr_retry; /* requester RNR retry counter */475u8 s_pkey_index; /* PKEY index to use */476u8 s_alt_pkey_index; /* Alternate path PKEY index to use */477u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */478u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */479u8 s_tail_ack_queue; /* index into s_ack_queue[] */480u8 s_srate;481u8 s_draining;482u8 s_mig_state;483u8 timeout; /* Timeout for this QP */484u8 alt_timeout; /* Alternate path timeout for this QP */485u8 port_num;486enum ib_mtu path_mtu;487u32 remote_qpn;488u32 qkey; /* QKEY for this QP (for UD or RD) */489u32 s_size; /* send work queue size */490u32 s_head; /* new entries added here */491u32 s_tail; /* next entry to process */492u32 s_cur; /* current work queue entry */493u32 s_acked; /* last un-ACK'ed entry */494u32 s_last; /* last completed entry */495u32 s_ssn; /* SSN of tail entry */496u32 s_lsn; /* limit sequence number (credit) */497struct qib_swqe *s_wq; /* send work queue */498struct qib_swqe *s_wqe;499struct qib_rq r_rq; /* receive work queue */500struct qib_sge r_sg_list[0]; /* verified SGEs */501};502503/*504* Atomic bit definitions for r_aflags.505*/506#define QIB_R_WRID_VALID 0507#define QIB_R_REWIND_SGE 1508509/*510* Bit definitions for r_flags.511*/512#define QIB_R_REUSE_SGE 0x01513#define QIB_R_RDMAR_SEQ 0x02514#define QIB_R_RSP_NAK 0x04515#define QIB_R_RSP_SEND 0x08516#define QIB_R_COMM_EST 0x10517518/*519* Bit definitions for s_flags.520*521* QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled522* QIB_S_BUSY - send tasklet is processing the QP523* QIB_S_TIMER - the RC retry timer is active524* QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics525* QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs526* before processing the next SWQE527* QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete528* before processing the next SWQE529* QIB_S_WAIT_RNR - waiting for RNR timeout530* QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE531* QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating532* next send completion entry not via send DMA533* QIB_S_WAIT_PIO - waiting for a send buffer to be available534* QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available535* QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available536* QIB_S_WAIT_KMEM - waiting for kernel memory to be available537* QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue538* QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests539* QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK540*/541#define QIB_S_SIGNAL_REQ_WR 0x0001542#define QIB_S_BUSY 0x0002543#define QIB_S_TIMER 0x0004544#define QIB_S_RESP_PENDING 0x0008545#define QIB_S_ACK_PENDING 0x0010546#define QIB_S_WAIT_FENCE 0x0020547#define QIB_S_WAIT_RDMAR 0x0040548#define QIB_S_WAIT_RNR 0x0080549#define QIB_S_WAIT_SSN_CREDIT 0x0100550#define QIB_S_WAIT_DMA 0x0200551#define QIB_S_WAIT_PIO 0x0400552#define QIB_S_WAIT_TX 0x0800553#define QIB_S_WAIT_DMA_DESC 0x1000554#define QIB_S_WAIT_KMEM 0x2000555#define QIB_S_WAIT_PSN 0x4000556#define QIB_S_WAIT_ACK 0x8000557#define QIB_S_SEND_ONE 0x10000558#define QIB_S_UNLIMITED_CREDIT 0x20000559560/*561* Wait flags that would prevent any packet type from being sent.562*/563#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \564QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)565566/*567* Wait flags that would prevent send work requests from making progress.568*/569#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \570QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \571QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)572573#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)574575#define QIB_PSN_CREDIT 16576577/*578* Since struct qib_swqe is not a fixed size, we can't simply index into579* struct qib_qp.s_wq. This function does the array index computation.580*/581static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,582unsigned n)583{584return (struct qib_swqe *)((char *)qp->s_wq +585(sizeof(struct qib_swqe) +586qp->s_max_sge *587sizeof(struct qib_sge)) * n);588}589590/*591* Since struct qib_rwqe is not a fixed size, we can't simply index into592* struct qib_rwq.wq. This function does the array index computation.593*/594static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)595{596return (struct qib_rwqe *)597((char *) rq->wq->wq +598(sizeof(struct qib_rwqe) +599rq->max_sge * sizeof(struct ib_sge)) * n);600}601602/*603* QPN-map pages start out as NULL, they get allocated upon604* first use and are never deallocated. This way,605* large bitmaps are not allocated unless large numbers of QPs are used.606*/607struct qpn_map {608void *page;609};610611struct qib_qpn_table {612spinlock_t lock; /* protect changes in this struct */613unsigned flags; /* flags for QP0/1 allocated for each port */614u32 last; /* last QP number allocated */615u32 nmaps; /* size of the map table */616u16 limit;617u16 mask;618/* bit map of free QP numbers other than 0/1 */619struct qpn_map map[QPNMAP_ENTRIES];620};621622struct qib_lkey_table {623spinlock_t lock; /* protect changes in this struct */624u32 next; /* next unused index (speeds search) */625u32 gen; /* generation count */626u32 max; /* size of the table */627struct qib_mregion **table;628};629630struct qib_opcode_stats {631u64 n_packets; /* number of packets */632u64 n_bytes; /* total number of bytes */633};634635struct qib_ibport {636struct qib_qp *qp0;637struct qib_qp *qp1;638struct ib_mad_agent *send_agent; /* agent for SMI (traps) */639struct qib_ah *sm_ah;640struct qib_ah *smi_ah;641struct rb_root mcast_tree;642spinlock_t lock; /* protect changes in this struct */643644/* non-zero when timer is set */645unsigned long mkey_lease_timeout;646unsigned long trap_timeout;647__be64 gid_prefix; /* in network order */648__be64 mkey;649__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */650u64 tid; /* TID for traps */651u64 n_unicast_xmit; /* total unicast packets sent */652u64 n_unicast_rcv; /* total unicast packets received */653u64 n_multicast_xmit; /* total multicast packets sent */654u64 n_multicast_rcv; /* total multicast packets received */655u64 z_symbol_error_counter; /* starting count for PMA */656u64 z_link_error_recovery_counter; /* starting count for PMA */657u64 z_link_downed_counter; /* starting count for PMA */658u64 z_port_rcv_errors; /* starting count for PMA */659u64 z_port_rcv_remphys_errors; /* starting count for PMA */660u64 z_port_xmit_discards; /* starting count for PMA */661u64 z_port_xmit_data; /* starting count for PMA */662u64 z_port_rcv_data; /* starting count for PMA */663u64 z_port_xmit_packets; /* starting count for PMA */664u64 z_port_rcv_packets; /* starting count for PMA */665u32 z_local_link_integrity_errors; /* starting count for PMA */666u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */667u32 z_vl15_dropped; /* starting count for PMA */668u32 n_rc_resends;669u32 n_rc_acks;670u32 n_rc_qacks;671u32 n_rc_delayed_comp;672u32 n_seq_naks;673u32 n_rdma_seq;674u32 n_rnr_naks;675u32 n_other_naks;676u32 n_loop_pkts;677u32 n_pkt_drops;678u32 n_vl15_dropped;679u32 n_rc_timeouts;680u32 n_dmawait;681u32 n_unaligned;682u32 n_rc_dupreq;683u32 n_rc_seqnak;684u32 port_cap_flags;685u32 pma_sample_start;686u32 pma_sample_interval;687__be16 pma_counter_select[5];688u16 pma_tag;689u16 pkey_violations;690u16 qkey_violations;691u16 mkey_violations;692u16 mkey_lease_period;693u16 sm_lid;694u16 repress_traps;695u8 sm_sl;696u8 mkeyprot;697u8 subnet_timeout;698u8 vl_high_limit;699u8 sl_to_vl[16];700701struct qib_opcode_stats opstats[128];702};703704struct qib_ibdev {705struct ib_device ibdev;706struct list_head pending_mmaps;707spinlock_t mmap_offset_lock; /* protect mmap_offset */708u32 mmap_offset;709struct qib_mregion *dma_mr;710711/* QP numbers are shared by all IB ports */712struct qib_qpn_table qpn_table;713struct qib_lkey_table lk_table;714struct list_head piowait; /* list for wait PIO buf */715struct list_head dmawait; /* list for wait DMA */716struct list_head txwait; /* list for wait qib_verbs_txreq */717struct list_head memwait; /* list for wait kernel memory */718struct list_head txreq_free;719struct timer_list mem_timer;720struct qib_qp **qp_table;721struct qib_pio_header *pio_hdrs;722dma_addr_t pio_hdrs_phys;723/* list of QPs waiting for RNR timer */724spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */725unsigned qp_table_size; /* size of the hash table */726spinlock_t qpt_lock;727728u32 n_piowait;729u32 n_txwait;730731u32 n_pds_allocated; /* number of PDs allocated for device */732spinlock_t n_pds_lock;733u32 n_ahs_allocated; /* number of AHs allocated for device */734spinlock_t n_ahs_lock;735u32 n_cqs_allocated; /* number of CQs allocated for device */736spinlock_t n_cqs_lock;737u32 n_qps_allocated; /* number of QPs allocated for device */738spinlock_t n_qps_lock;739u32 n_srqs_allocated; /* number of SRQs allocated for device */740spinlock_t n_srqs_lock;741u32 n_mcast_grps_allocated; /* number of mcast groups allocated */742spinlock_t n_mcast_grps_lock;743};744745struct qib_verbs_counters {746u64 symbol_error_counter;747u64 link_error_recovery_counter;748u64 link_downed_counter;749u64 port_rcv_errors;750u64 port_rcv_remphys_errors;751u64 port_xmit_discards;752u64 port_xmit_data;753u64 port_rcv_data;754u64 port_xmit_packets;755u64 port_rcv_packets;756u32 local_link_integrity_errors;757u32 excessive_buffer_overrun_errors;758u32 vl15_dropped;759};760761static inline struct qib_mr *to_imr(struct ib_mr *ibmr)762{763return container_of(ibmr, struct qib_mr, ibmr);764}765766static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)767{768return container_of(ibpd, struct qib_pd, ibpd);769}770771static inline struct qib_ah *to_iah(struct ib_ah *ibah)772{773return container_of(ibah, struct qib_ah, ibah);774}775776static inline struct qib_cq *to_icq(struct ib_cq *ibcq)777{778return container_of(ibcq, struct qib_cq, ibcq);779}780781static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)782{783return container_of(ibsrq, struct qib_srq, ibsrq);784}785786static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)787{788return container_of(ibqp, struct qib_qp, ibqp);789}790791static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)792{793return container_of(ibdev, struct qib_ibdev, ibdev);794}795796/*797* Send if not busy or waiting for I/O and either798* a RC response is pending or we can process send work requests.799*/800static inline int qib_send_ok(struct qib_qp *qp)801{802return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&803(qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||804!(qp->s_flags & QIB_S_ANY_WAIT_SEND));805}806807extern struct workqueue_struct *qib_cq_wq;808809/*810* This must be called with s_lock held.811*/812static inline void qib_schedule_send(struct qib_qp *qp)813{814if (qib_send_ok(qp))815queue_work(ib_wq, &qp->s_work);816}817818static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)819{820u16 p1 = pkey1 & 0x7FFF;821u16 p2 = pkey2 & 0x7FFF;822823/*824* Low 15 bits must be non-zero and match, and825* one of the two must be a full member.826*/827return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);828}829830void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,831u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);832void qib_cap_mask_chg(struct qib_ibport *ibp);833void qib_sys_guid_chg(struct qib_ibport *ibp);834void qib_node_desc_chg(struct qib_ibport *ibp);835int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,836struct ib_wc *in_wc, struct ib_grh *in_grh,837struct ib_mad *in_mad, struct ib_mad *out_mad);838int qib_create_agents(struct qib_ibdev *dev);839void qib_free_agents(struct qib_ibdev *dev);840841/*842* Compare the lower 24 bits of the two values.843* Returns an integer <, ==, or > than zero.844*/845static inline int qib_cmp24(u32 a, u32 b)846{847return (((int) a) - ((int) b)) << 8;848}849850struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);851852int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,853u64 *rwords, u64 *spkts, u64 *rpkts,854u64 *xmit_wait);855856int qib_get_counters(struct qib_pportdata *ppd,857struct qib_verbs_counters *cntrs);858859int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);860861int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);862863int qib_mcast_tree_empty(struct qib_ibport *ibp);864865__be32 qib_compute_aeth(struct qib_qp *qp);866867struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);868869struct ib_qp *qib_create_qp(struct ib_pd *ibpd,870struct ib_qp_init_attr *init_attr,871struct ib_udata *udata);872873int qib_destroy_qp(struct ib_qp *ibqp);874875int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);876877int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,878int attr_mask, struct ib_udata *udata);879880int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,881int attr_mask, struct ib_qp_init_attr *init_attr);882883unsigned qib_free_all_qps(struct qib_devdata *dd);884885void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);886887void qib_free_qpn_table(struct qib_qpn_table *qpt);888889void qib_get_credit(struct qib_qp *qp, u32 aeth);890891unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);892893void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);894895void qib_put_txreq(struct qib_verbs_txreq *tx);896897int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,898u32 hdrwords, struct qib_sge_state *ss, u32 len);899900void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,901int release);902903void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);904905void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,906int has_grh, void *data, u32 tlen, struct qib_qp *qp);907908void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,909int has_grh, void *data, u32 tlen, struct qib_qp *qp);910911int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);912913void qib_rc_rnr_retry(unsigned long arg);914915void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);916917void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);918919int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);920921void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,922int has_grh, void *data, u32 tlen, struct qib_qp *qp);923924int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);925926int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);927928int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,929struct qib_sge *isge, struct ib_sge *sge, int acc);930931int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,932u32 len, u64 vaddr, u32 rkey, int acc);933934int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,935struct ib_recv_wr **bad_wr);936937struct ib_srq *qib_create_srq(struct ib_pd *ibpd,938struct ib_srq_init_attr *srq_init_attr,939struct ib_udata *udata);940941int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,942enum ib_srq_attr_mask attr_mask,943struct ib_udata *udata);944945int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);946947int qib_destroy_srq(struct ib_srq *ibsrq);948949void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);950951int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);952953struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,954int comp_vector, struct ib_ucontext *context,955struct ib_udata *udata);956957int qib_destroy_cq(struct ib_cq *ibcq);958959int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);960961int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);962963struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);964965struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,966struct ib_phys_buf *buffer_list,967int num_phys_buf, int acc, u64 *iova_start);968969struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,970u64 virt_addr, int mr_access_flags,971struct ib_udata *udata);972973int qib_dereg_mr(struct ib_mr *ibmr);974975struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);976977struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(978struct ib_device *ibdev, int page_list_len);979980void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);981982int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);983984struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,985struct ib_fmr_attr *fmr_attr);986987int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,988int list_len, u64 iova);989990int qib_unmap_fmr(struct list_head *fmr_list);991992int qib_dealloc_fmr(struct ib_fmr *ibfmr);993994void qib_release_mmap_info(struct kref *ref);995996struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,997struct ib_ucontext *context,998void *obj);9991000void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,1001u32 size, void *obj);10021003int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);10041005int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);10061007void qib_migrate_qp(struct qib_qp *qp);10081009int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,1010int has_grh, struct qib_qp *qp, u32 bth0);10111012u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,1013struct ib_global_route *grh, u32 hwords, u32 nwords);10141015void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,1016u32 bth0, u32 bth2);10171018void qib_do_send(struct work_struct *work);10191020void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,1021enum ib_wc_status status);10221023void qib_send_rc_ack(struct qib_qp *qp);10241025int qib_make_rc_req(struct qib_qp *qp);10261027int qib_make_uc_req(struct qib_qp *qp);10281029int qib_make_ud_req(struct qib_qp *qp);10301031int qib_register_ib_device(struct qib_devdata *);10321033void qib_unregister_ib_device(struct qib_devdata *);10341035void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);10361037void qib_ib_piobufavail(struct qib_devdata *);10381039unsigned qib_get_npkeys(struct qib_devdata *);10401041unsigned qib_get_pkey(struct qib_ibport *, unsigned);10421043extern const enum ib_wc_opcode ib_qib_wc_opcode[];10441045/*1046* Below HCA-independent IB PhysPortState values, returned1047* by the f_ibphys_portstate() routine.1048*/1049#define IB_PHYSPORTSTATE_SLEEP 11050#define IB_PHYSPORTSTATE_POLL 21051#define IB_PHYSPORTSTATE_DISABLED 31052#define IB_PHYSPORTSTATE_CFG_TRAIN 41053#define IB_PHYSPORTSTATE_LINKUP 51054#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 61055#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 81056#define IB_PHYSPORTSTATE_CFG_IDLE 0xB1057#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC1058#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE1059#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF1060#define IB_PHYSPORTSTATE_CFG_ENH 0x101061#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x1310621063extern const int ib_qib_state_ops[];10641065extern __be64 ib_qib_sys_image_guid; /* in network order */10661067extern unsigned int ib_qib_lkey_table_size;10681069extern unsigned int ib_qib_max_cqes;10701071extern unsigned int ib_qib_max_cqs;10721073extern unsigned int ib_qib_max_qp_wrs;10741075extern unsigned int ib_qib_max_qps;10761077extern unsigned int ib_qib_max_sges;10781079extern unsigned int ib_qib_max_mcast_grps;10801081extern unsigned int ib_qib_max_mcast_qp_attached;10821083extern unsigned int ib_qib_max_srqs;10841085extern unsigned int ib_qib_max_srq_sges;10861087extern unsigned int ib_qib_max_srq_wrs;10881089extern const u32 ib_qib_rnr_table[];10901091extern struct ib_dma_mapping_ops qib_dma_mapping_ops;10921093#endif /* QIB_VERBS_H */109410951096