Path: blob/master/drivers/infiniband/hw/ipath/ipath_verbs.h
15112 views
/*1* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.2* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#ifndef IPATH_VERBS_H34#define IPATH_VERBS_H3536#include <linux/types.h>37#include <linux/spinlock.h>38#include <linux/kernel.h>39#include <linux/interrupt.h>40#include <linux/kref.h>41#include <rdma/ib_pack.h>42#include <rdma/ib_user_verbs.h>4344#include "ipath_kernel.h"4546#define IPATH_MAX_RDMA_ATOMIC 44748#define QPN_MAX (1 << 24)49#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)5051/*52* Increment this value if any changes that break userspace ABI53* compatibility are made.54*/55#define IPATH_UVERBS_ABI_VERSION 25657/*58* Define an ib_cq_notify value that is not valid so we know when CQ59* notifications are armed.60*/61#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)6263/* AETH NAK opcode values */64#define IB_RNR_NAK 0x2065#define IB_NAK_PSN_ERROR 0x6066#define IB_NAK_INVALID_REQUEST 0x6167#define IB_NAK_REMOTE_ACCESS_ERROR 0x6268#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x6369#define IB_NAK_INVALID_RD_REQUEST 0x647071/* Flags for checking QP state (see ib_ipath_state_ops[]) */72#define IPATH_POST_SEND_OK 0x0173#define IPATH_POST_RECV_OK 0x0274#define IPATH_PROCESS_RECV_OK 0x0475#define IPATH_PROCESS_SEND_OK 0x0876#define IPATH_PROCESS_NEXT_SEND_OK 0x1077#define IPATH_FLUSH_SEND 0x2078#define IPATH_FLUSH_RECV 0x4079#define IPATH_PROCESS_OR_FLUSH_SEND \80(IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)8182/* IB Performance Manager status values */83#define IB_PMA_SAMPLE_STATUS_DONE 0x0084#define IB_PMA_SAMPLE_STATUS_STARTED 0x0185#define IB_PMA_SAMPLE_STATUS_RUNNING 0x028687/* Mandatory IB performance counter select values. */88#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)89#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)90#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)91#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)92#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)9394struct ib_reth {95__be64 vaddr;96__be32 rkey;97__be32 length;98} __attribute__ ((packed));99100struct ib_atomic_eth {101__be32 vaddr[2]; /* unaligned so access as 2 32-bit words */102__be32 rkey;103__be64 swap_data;104__be64 compare_data;105} __attribute__ ((packed));106107struct ipath_other_headers {108__be32 bth[3];109union {110struct {111__be32 deth[2];112__be32 imm_data;113} ud;114struct {115struct ib_reth reth;116__be32 imm_data;117} rc;118struct {119__be32 aeth;120__be32 atomic_ack_eth[2];121} at;122__be32 imm_data;123__be32 aeth;124struct ib_atomic_eth atomic_eth;125} u;126} __attribute__ ((packed));127128/*129* Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes130* long (72 w/ imm_data). Only the first 56 bytes of the IB header131* will be in the eager header buffer. The remaining 12 or 16 bytes132* are in the data buffer.133*/134struct ipath_ib_header {135__be16 lrh[4];136union {137struct {138struct ib_grh grh;139struct ipath_other_headers oth;140} l;141struct ipath_other_headers oth;142} u;143} __attribute__ ((packed));144145struct ipath_pio_header {146__le32 pbc[2];147struct ipath_ib_header hdr;148} __attribute__ ((packed));149150/*151* There is one struct ipath_mcast for each multicast GID.152* All attached QPs are then stored as a list of153* struct ipath_mcast_qp.154*/155struct ipath_mcast_qp {156struct list_head list;157struct ipath_qp *qp;158};159160struct ipath_mcast {161struct rb_node rb_node;162union ib_gid mgid;163struct list_head qp_list;164wait_queue_head_t wait;165atomic_t refcount;166int n_attached;167};168169/* Protection domain */170struct ipath_pd {171struct ib_pd ibpd;172int user; /* non-zero if created from user space */173};174175/* Address Handle */176struct ipath_ah {177struct ib_ah ibah;178struct ib_ah_attr attr;179};180181/*182* This structure is used by ipath_mmap() to validate an offset183* when an mmap() request is made. The vm_area_struct then uses184* this as its vm_private_data.185*/186struct ipath_mmap_info {187struct list_head pending_mmaps;188struct ib_ucontext *context;189void *obj;190__u64 offset;191struct kref ref;192unsigned size;193};194195/*196* This structure is used to contain the head pointer, tail pointer,197* and completion queue entries as a single memory allocation so198* it can be mmap'ed into user space.199*/200struct ipath_cq_wc {201u32 head; /* index of next entry to fill */202u32 tail; /* index of next ib_poll_cq() entry */203union {204/* these are actually size ibcq.cqe + 1 */205struct ib_uverbs_wc uqueue[0];206struct ib_wc kqueue[0];207};208};209210/*211* The completion queue structure.212*/213struct ipath_cq {214struct ib_cq ibcq;215struct tasklet_struct comptask;216spinlock_t lock;217u8 notify;218u8 triggered;219struct ipath_cq_wc *queue;220struct ipath_mmap_info *ip;221};222223/*224* A segment is a linear region of low physical memory.225* XXX Maybe we should use phys addr here and kmap()/kunmap().226* Used by the verbs layer.227*/228struct ipath_seg {229void *vaddr;230size_t length;231};232233/* The number of ipath_segs that fit in a page. */234#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))235236struct ipath_segarray {237struct ipath_seg segs[IPATH_SEGSZ];238};239240struct ipath_mregion {241struct ib_pd *pd; /* shares refcnt of ibmr.pd */242u64 user_base; /* User's address for this region */243u64 iova; /* IB start address of this region */244size_t length;245u32 lkey;246u32 offset; /* offset (bytes) to start of region */247int access_flags;248u32 max_segs; /* number of ipath_segs in all the arrays */249u32 mapsz; /* size of the map array */250struct ipath_segarray *map[0]; /* the segments */251};252253/*254* These keep track of the copy progress within a memory region.255* Used by the verbs layer.256*/257struct ipath_sge {258struct ipath_mregion *mr;259void *vaddr; /* kernel virtual address of segment */260u32 sge_length; /* length of the SGE */261u32 length; /* remaining length of the segment */262u16 m; /* current index: mr->map[m] */263u16 n; /* current index: mr->map[m]->segs[n] */264};265266/* Memory region */267struct ipath_mr {268struct ib_mr ibmr;269struct ib_umem *umem;270struct ipath_mregion mr; /* must be last */271};272273/*274* Send work request queue entry.275* The size of the sg_list is determined when the QP is created and stored276* in qp->s_max_sge.277*/278struct ipath_swqe {279struct ib_send_wr wr; /* don't use wr.sg_list */280u32 psn; /* first packet sequence number */281u32 lpsn; /* last packet sequence number */282u32 ssn; /* send sequence number */283u32 length; /* total length of data in sg_list */284struct ipath_sge sg_list[0];285};286287/*288* Receive work request queue entry.289* The size of the sg_list is determined when the QP (or SRQ) is created290* and stored in qp->r_rq.max_sge (or srq->rq.max_sge).291*/292struct ipath_rwqe {293u64 wr_id;294u8 num_sge;295struct ib_sge sg_list[0];296};297298/*299* This structure is used to contain the head pointer, tail pointer,300* and receive work queue entries as a single memory allocation so301* it can be mmap'ed into user space.302* Note that the wq array elements are variable size so you can't303* just index into the array to get the N'th element;304* use get_rwqe_ptr() instead.305*/306struct ipath_rwq {307u32 head; /* new work requests posted to the head */308u32 tail; /* receives pull requests from here. */309struct ipath_rwqe wq[0];310};311312struct ipath_rq {313struct ipath_rwq *wq;314spinlock_t lock;315u32 size; /* size of RWQE array */316u8 max_sge;317};318319struct ipath_srq {320struct ib_srq ibsrq;321struct ipath_rq rq;322struct ipath_mmap_info *ip;323/* send signal when number of RWQEs < limit */324u32 limit;325};326327struct ipath_sge_state {328struct ipath_sge *sg_list; /* next SGE to be used if any */329struct ipath_sge sge; /* progress state for the current SGE */330u8 num_sge;331u8 static_rate;332};333334/*335* This structure holds the information that the send tasklet needs336* to send a RDMA read response or atomic operation.337*/338struct ipath_ack_entry {339u8 opcode;340u8 sent;341u32 psn;342union {343struct ipath_sge_state rdma_sge;344u64 atomic_data;345};346};347348/*349* Variables prefixed with s_ are for the requester (sender).350* Variables prefixed with r_ are for the responder (receiver).351* Variables prefixed with ack_ are for responder replies.352*353* Common variables are protected by both r_rq.lock and s_lock in that order354* which only happens in modify_qp() or changing the QP 'state'.355*/356struct ipath_qp {357struct ib_qp ibqp;358struct ipath_qp *next; /* link list for QPN hash table */359struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */360struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */361struct list_head piowait; /* link for wait PIO buf */362struct list_head timerwait; /* link for waiting for timeouts */363struct ib_ah_attr remote_ah_attr;364struct ipath_ib_header s_hdr; /* next packet header to send */365atomic_t refcount;366wait_queue_head_t wait;367wait_queue_head_t wait_dma;368struct tasklet_struct s_task;369struct ipath_mmap_info *ip;370struct ipath_sge_state *s_cur_sge;371struct ipath_verbs_txreq *s_tx;372struct ipath_sge_state s_sge; /* current send request data */373struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];374struct ipath_sge_state s_ack_rdma_sge;375struct ipath_sge_state s_rdma_read_sge;376struct ipath_sge_state r_sge; /* current receive data */377spinlock_t s_lock;378atomic_t s_dma_busy;379u16 s_pkt_delay;380u16 s_hdrwords; /* size of s_hdr in 32 bit words */381u32 s_cur_size; /* size of send packet in bytes */382u32 s_len; /* total length of s_sge */383u32 s_rdma_read_len; /* total length of s_rdma_read_sge */384u32 s_next_psn; /* PSN for next request */385u32 s_last_psn; /* last response PSN processed */386u32 s_psn; /* current packet sequence number */387u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */388u32 s_ack_psn; /* PSN for acking sends and RDMA writes */389u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */390u32 r_ack_psn; /* PSN for next ACK or atomic ACK */391u64 r_wr_id; /* ID for current receive WQE */392unsigned long r_aflags;393u32 r_len; /* total length of r_sge */394u32 r_rcv_len; /* receive data len processed */395u32 r_psn; /* expected rcv packet sequence number */396u32 r_msn; /* message sequence number */397u8 state; /* QP state */398u8 s_state; /* opcode of last packet sent */399u8 s_ack_state; /* opcode of packet to ACK */400u8 s_nak_state; /* non-zero if NAK is pending */401u8 r_state; /* opcode of last packet received */402u8 r_nak_state; /* non-zero if NAK is pending */403u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */404u8 r_flags;405u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */406u8 r_head_ack_queue; /* index into s_ack_queue[] */407u8 qp_access_flags;408u8 s_max_sge; /* size of s_wq->sg_list */409u8 s_retry_cnt; /* number of times to retry */410u8 s_rnr_retry_cnt;411u8 s_retry; /* requester retry counter */412u8 s_rnr_retry; /* requester RNR retry counter */413u8 s_pkey_index; /* PKEY index to use */414u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */415u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */416u8 s_tail_ack_queue; /* index into s_ack_queue[] */417u8 s_flags;418u8 s_dmult;419u8 s_draining;420u8 timeout; /* Timeout for this QP */421enum ib_mtu path_mtu;422u32 remote_qpn;423u32 qkey; /* QKEY for this QP (for UD or RD) */424u32 s_size; /* send work queue size */425u32 s_head; /* new entries added here */426u32 s_tail; /* next entry to process */427u32 s_cur; /* current work queue entry */428u32 s_last; /* last un-ACK'ed entry */429u32 s_ssn; /* SSN of tail entry */430u32 s_lsn; /* limit sequence number (credit) */431struct ipath_swqe *s_wq; /* send work queue */432struct ipath_swqe *s_wqe;433struct ipath_sge *r_ud_sg_list;434struct ipath_rq r_rq; /* receive work queue */435struct ipath_sge r_sg_list[0]; /* verified SGEs */436};437438/*439* Atomic bit definitions for r_aflags.440*/441#define IPATH_R_WRID_VALID 0442443/*444* Bit definitions for r_flags.445*/446#define IPATH_R_REUSE_SGE 0x01447#define IPATH_R_RDMAR_SEQ 0x02448449/*450* Bit definitions for s_flags.451*452* IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs453* before processing the next SWQE454* IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs455* before processing the next SWQE456* IPATH_S_WAITING - waiting for RNR timeout or send buffer available.457* IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE458* IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating459* next send completion entry not via send DMA.460*/461#define IPATH_S_SIGNAL_REQ_WR 0x01462#define IPATH_S_FENCE_PENDING 0x02463#define IPATH_S_RDMAR_PENDING 0x04464#define IPATH_S_ACK_PENDING 0x08465#define IPATH_S_BUSY 0x10466#define IPATH_S_WAITING 0x20467#define IPATH_S_WAIT_SSN_CREDIT 0x40468#define IPATH_S_WAIT_DMA 0x80469470#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \471IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)472473#define IPATH_PSN_CREDIT 512474475/*476* Since struct ipath_swqe is not a fixed size, we can't simply index into477* struct ipath_qp.s_wq. This function does the array index computation.478*/479static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,480unsigned n)481{482return (struct ipath_swqe *)((char *)qp->s_wq +483(sizeof(struct ipath_swqe) +484qp->s_max_sge *485sizeof(struct ipath_sge)) * n);486}487488/*489* Since struct ipath_rwqe is not a fixed size, we can't simply index into490* struct ipath_rwq.wq. This function does the array index computation.491*/492static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,493unsigned n)494{495return (struct ipath_rwqe *)496((char *) rq->wq->wq +497(sizeof(struct ipath_rwqe) +498rq->max_sge * sizeof(struct ib_sge)) * n);499}500501/*502* QPN-map pages start out as NULL, they get allocated upon503* first use and are never deallocated. This way,504* large bitmaps are not allocated unless large numbers of QPs are used.505*/506struct qpn_map {507atomic_t n_free;508void *page;509};510511struct ipath_qp_table {512spinlock_t lock;513u32 last; /* last QP number allocated */514u32 max; /* size of the hash table */515u32 nmaps; /* size of the map table */516struct ipath_qp **table;517/* bit map of free numbers */518struct qpn_map map[QPNMAP_ENTRIES];519};520521struct ipath_lkey_table {522spinlock_t lock;523u32 next; /* next unused index (speeds search) */524u32 gen; /* generation count */525u32 max; /* size of the table */526struct ipath_mregion **table;527};528529struct ipath_opcode_stats {530u64 n_packets; /* number of packets */531u64 n_bytes; /* total number of bytes */532};533534struct ipath_ibdev {535struct ib_device ibdev;536struct ipath_devdata *dd;537struct list_head pending_mmaps;538spinlock_t mmap_offset_lock;539u32 mmap_offset;540int ib_unit; /* This is the device number */541u16 sm_lid; /* in host order */542u8 sm_sl;543u8 mkeyprot;544/* non-zero when timer is set */545unsigned long mkey_lease_timeout;546547/* The following fields are really per port. */548struct ipath_qp_table qp_table;549struct ipath_lkey_table lk_table;550struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */551struct list_head piowait; /* list for wait PIO buf */552struct list_head txreq_free;553void *txreq_bufs;554/* list of QPs waiting for RNR timer */555struct list_head rnrwait;556spinlock_t pending_lock;557__be64 sys_image_guid; /* in network order */558__be64 gid_prefix; /* in network order */559__be64 mkey;560561u32 n_pds_allocated; /* number of PDs allocated for device */562spinlock_t n_pds_lock;563u32 n_ahs_allocated; /* number of AHs allocated for device */564spinlock_t n_ahs_lock;565u32 n_cqs_allocated; /* number of CQs allocated for device */566spinlock_t n_cqs_lock;567u32 n_qps_allocated; /* number of QPs allocated for device */568spinlock_t n_qps_lock;569u32 n_srqs_allocated; /* number of SRQs allocated for device */570spinlock_t n_srqs_lock;571u32 n_mcast_grps_allocated; /* number of mcast groups allocated */572spinlock_t n_mcast_grps_lock;573574u64 ipath_sword; /* total dwords sent (sample result) */575u64 ipath_rword; /* total dwords received (sample result) */576u64 ipath_spkts; /* total packets sent (sample result) */577u64 ipath_rpkts; /* total packets received (sample result) */578/* # of ticks no data sent (sample result) */579u64 ipath_xmit_wait;580u64 rcv_errors; /* # of packets with SW detected rcv errs */581u64 n_unicast_xmit; /* total unicast packets sent */582u64 n_unicast_rcv; /* total unicast packets received */583u64 n_multicast_xmit; /* total multicast packets sent */584u64 n_multicast_rcv; /* total multicast packets received */585u64 z_symbol_error_counter; /* starting count for PMA */586u64 z_link_error_recovery_counter; /* starting count for PMA */587u64 z_link_downed_counter; /* starting count for PMA */588u64 z_port_rcv_errors; /* starting count for PMA */589u64 z_port_rcv_remphys_errors; /* starting count for PMA */590u64 z_port_xmit_discards; /* starting count for PMA */591u64 z_port_xmit_data; /* starting count for PMA */592u64 z_port_rcv_data; /* starting count for PMA */593u64 z_port_xmit_packets; /* starting count for PMA */594u64 z_port_rcv_packets; /* starting count for PMA */595u32 z_pkey_violations; /* starting count for PMA */596u32 z_local_link_integrity_errors; /* starting count for PMA */597u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */598u32 z_vl15_dropped; /* starting count for PMA */599u32 n_rc_resends;600u32 n_rc_acks;601u32 n_rc_qacks;602u32 n_seq_naks;603u32 n_rdma_seq;604u32 n_rnr_naks;605u32 n_other_naks;606u32 n_timeouts;607u32 n_pkt_drops;608u32 n_vl15_dropped;609u32 n_wqe_errs;610u32 n_rdma_dup_busy;611u32 n_piowait;612u32 n_unaligned;613u32 port_cap_flags;614u32 pma_sample_start;615u32 pma_sample_interval;616__be16 pma_counter_select[5];617u16 pma_tag;618u16 qkey_violations;619u16 mkey_violations;620u16 mkey_lease_period;621u16 pending_index; /* which pending queue is active */622u8 pma_sample_status;623u8 subnet_timeout;624u8 vl_high_limit;625struct ipath_opcode_stats opstats[128];626};627628struct ipath_verbs_counters {629u64 symbol_error_counter;630u64 link_error_recovery_counter;631u64 link_downed_counter;632u64 port_rcv_errors;633u64 port_rcv_remphys_errors;634u64 port_xmit_discards;635u64 port_xmit_data;636u64 port_rcv_data;637u64 port_xmit_packets;638u64 port_rcv_packets;639u32 local_link_integrity_errors;640u32 excessive_buffer_overrun_errors;641u32 vl15_dropped;642};643644struct ipath_verbs_txreq {645struct ipath_qp *qp;646struct ipath_swqe *wqe;647u32 map_len;648u32 len;649struct ipath_sge_state *ss;650struct ipath_pio_header hdr;651struct ipath_sdma_txreq txreq;652};653654static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)655{656return container_of(ibmr, struct ipath_mr, ibmr);657}658659static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)660{661return container_of(ibpd, struct ipath_pd, ibpd);662}663664static inline struct ipath_ah *to_iah(struct ib_ah *ibah)665{666return container_of(ibah, struct ipath_ah, ibah);667}668669static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)670{671return container_of(ibcq, struct ipath_cq, ibcq);672}673674static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)675{676return container_of(ibsrq, struct ipath_srq, ibsrq);677}678679static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)680{681return container_of(ibqp, struct ipath_qp, ibqp);682}683684static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)685{686return container_of(ibdev, struct ipath_ibdev, ibdev);687}688689/*690* This must be called with s_lock held.691*/692static inline void ipath_schedule_send(struct ipath_qp *qp)693{694if (qp->s_flags & IPATH_S_ANY_WAIT)695qp->s_flags &= ~IPATH_S_ANY_WAIT;696if (!(qp->s_flags & IPATH_S_BUSY))697tasklet_hi_schedule(&qp->s_task);698}699700int ipath_process_mad(struct ib_device *ibdev,701int mad_flags,702u8 port_num,703struct ib_wc *in_wc,704struct ib_grh *in_grh,705struct ib_mad *in_mad, struct ib_mad *out_mad);706707/*708* Compare the lower 24 bits of the two values.709* Returns an integer <, ==, or > than zero.710*/711static inline int ipath_cmp24(u32 a, u32 b)712{713return (((int) a) - ((int) b)) << 8;714}715716struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);717718int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,719u64 *rwords, u64 *spkts, u64 *rpkts,720u64 *xmit_wait);721722int ipath_get_counters(struct ipath_devdata *dd,723struct ipath_verbs_counters *cntrs);724725int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);726727int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);728729int ipath_mcast_tree_empty(void);730731__be32 ipath_compute_aeth(struct ipath_qp *qp);732733struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);734735struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,736struct ib_qp_init_attr *init_attr,737struct ib_udata *udata);738739int ipath_destroy_qp(struct ib_qp *ibqp);740741int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);742743int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,744int attr_mask, struct ib_udata *udata);745746int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,747int attr_mask, struct ib_qp_init_attr *init_attr);748749unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);750751int ipath_init_qp_table(struct ipath_ibdev *idev, int size);752753void ipath_get_credit(struct ipath_qp *qp, u32 aeth);754755unsigned ipath_ib_rate_to_mult(enum ib_rate rate);756757int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,758u32 hdrwords, struct ipath_sge_state *ss, u32 len);759760void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);761762void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);763764void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,765int has_grh, void *data, u32 tlen, struct ipath_qp *qp);766767void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,768int has_grh, void *data, u32 tlen, struct ipath_qp *qp);769770void ipath_restart_rc(struct ipath_qp *qp, u32 psn);771772void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);773774int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);775776void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,777int has_grh, void *data, u32 tlen, struct ipath_qp *qp);778779int ipath_alloc_lkey(struct ipath_lkey_table *rkt,780struct ipath_mregion *mr);781782void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);783784int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,785struct ib_sge *sge, int acc);786787int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,788u32 len, u64 vaddr, u32 rkey, int acc);789790int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,791struct ib_recv_wr **bad_wr);792793struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,794struct ib_srq_init_attr *srq_init_attr,795struct ib_udata *udata);796797int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,798enum ib_srq_attr_mask attr_mask,799struct ib_udata *udata);800801int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);802803int ipath_destroy_srq(struct ib_srq *ibsrq);804805void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);806807int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);808809struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,810struct ib_ucontext *context,811struct ib_udata *udata);812813int ipath_destroy_cq(struct ib_cq *ibcq);814815int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);816817int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);818819struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);820821struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,822struct ib_phys_buf *buffer_list,823int num_phys_buf, int acc, u64 *iova_start);824825struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,826u64 virt_addr, int mr_access_flags,827struct ib_udata *udata);828829int ipath_dereg_mr(struct ib_mr *ibmr);830831struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,832struct ib_fmr_attr *fmr_attr);833834int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,835int list_len, u64 iova);836837int ipath_unmap_fmr(struct list_head *fmr_list);838839int ipath_dealloc_fmr(struct ib_fmr *ibfmr);840841void ipath_release_mmap_info(struct kref *ref);842843struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,844u32 size,845struct ib_ucontext *context,846void *obj);847848void ipath_update_mmap_info(struct ipath_ibdev *dev,849struct ipath_mmap_info *ip,850u32 size, void *obj);851852int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);853854void ipath_insert_rnr_queue(struct ipath_qp *qp);855856int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,857u32 *lengthp, struct ipath_sge_state *ss);858859int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);860861u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,862struct ib_global_route *grh, u32 hwords, u32 nwords);863864void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,865struct ipath_other_headers *ohdr,866u32 bth0, u32 bth2);867868void ipath_do_send(unsigned long data);869870void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,871enum ib_wc_status status);872873int ipath_make_rc_req(struct ipath_qp *qp);874875int ipath_make_uc_req(struct ipath_qp *qp);876877int ipath_make_ud_req(struct ipath_qp *qp);878879int ipath_register_ib_device(struct ipath_devdata *);880881void ipath_unregister_ib_device(struct ipath_ibdev *);882883void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);884885int ipath_ib_piobufavail(struct ipath_ibdev *);886887unsigned ipath_get_npkeys(struct ipath_devdata *);888889u32 ipath_get_cr_errpkey(struct ipath_devdata *);890891unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);892893extern const enum ib_wc_opcode ib_ipath_wc_opcode[];894895/*896* Below converts HCA-specific LinkTrainingState to IB PhysPortState897* values.898*/899extern const u8 ipath_cvt_physportstate[];900#define IB_PHYSPORTSTATE_SLEEP 1901#define IB_PHYSPORTSTATE_POLL 2902#define IB_PHYSPORTSTATE_DISABLED 3903#define IB_PHYSPORTSTATE_CFG_TRAIN 4904#define IB_PHYSPORTSTATE_LINKUP 5905#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6906907extern const int ib_ipath_state_ops[];908909extern unsigned int ib_ipath_lkey_table_size;910911extern unsigned int ib_ipath_max_cqes;912913extern unsigned int ib_ipath_max_cqs;914915extern unsigned int ib_ipath_max_qp_wrs;916917extern unsigned int ib_ipath_max_qps;918919extern unsigned int ib_ipath_max_sges;920921extern unsigned int ib_ipath_max_mcast_grps;922923extern unsigned int ib_ipath_max_mcast_qp_attached;924925extern unsigned int ib_ipath_max_srqs;926927extern unsigned int ib_ipath_max_srq_sges;928929extern unsigned int ib_ipath_max_srq_wrs;930931extern const u32 ib_ipath_rnr_table[];932933extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;934935#endif /* IPATH_VERBS_H */936937938