Path: blob/master/drivers/infiniband/hw/cxgb3/iwch_provider.c
15112 views
/*1* Copyright (c) 2006 Chelsio, Inc. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/31#include <linux/module.h>32#include <linux/moduleparam.h>33#include <linux/device.h>34#include <linux/netdevice.h>35#include <linux/etherdevice.h>36#include <linux/delay.h>37#include <linux/errno.h>38#include <linux/list.h>39#include <linux/sched.h>40#include <linux/spinlock.h>41#include <linux/ethtool.h>42#include <linux/rtnetlink.h>43#include <linux/inetdevice.h>44#include <linux/slab.h>4546#include <asm/io.h>47#include <asm/irq.h>48#include <asm/byteorder.h>4950#include <rdma/iw_cm.h>51#include <rdma/ib_verbs.h>52#include <rdma/ib_smi.h>53#include <rdma/ib_umem.h>54#include <rdma/ib_user_verbs.h>5556#include "cxio_hal.h"57#include "iwch.h"58#include "iwch_provider.h"59#include "iwch_cm.h"60#include "iwch_user.h"61#include "common.h"6263static int iwch_modify_port(struct ib_device *ibdev,64u8 port, int port_modify_mask,65struct ib_port_modify *props)66{67return -ENOSYS;68}6970static struct ib_ah *iwch_ah_create(struct ib_pd *pd,71struct ib_ah_attr *ah_attr)72{73return ERR_PTR(-ENOSYS);74}7576static int iwch_ah_destroy(struct ib_ah *ah)77{78return -ENOSYS;79}8081static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)82{83return -ENOSYS;84}8586static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)87{88return -ENOSYS;89}9091static int iwch_process_mad(struct ib_device *ibdev,92int mad_flags,93u8 port_num,94struct ib_wc *in_wc,95struct ib_grh *in_grh,96struct ib_mad *in_mad, struct ib_mad *out_mad)97{98return -ENOSYS;99}100101static int iwch_dealloc_ucontext(struct ib_ucontext *context)102{103struct iwch_dev *rhp = to_iwch_dev(context->device);104struct iwch_ucontext *ucontext = to_iwch_ucontext(context);105struct iwch_mm_entry *mm, *tmp;106107PDBG("%s context %p\n", __func__, context);108list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)109kfree(mm);110cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);111kfree(ucontext);112return 0;113}114115static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,116struct ib_udata *udata)117{118struct iwch_ucontext *context;119struct iwch_dev *rhp = to_iwch_dev(ibdev);120121PDBG("%s ibdev %p\n", __func__, ibdev);122context = kzalloc(sizeof(*context), GFP_KERNEL);123if (!context)124return ERR_PTR(-ENOMEM);125cxio_init_ucontext(&rhp->rdev, &context->uctx);126INIT_LIST_HEAD(&context->mmaps);127spin_lock_init(&context->mmap_lock);128return &context->ibucontext;129}130131static int iwch_destroy_cq(struct ib_cq *ib_cq)132{133struct iwch_cq *chp;134135PDBG("%s ib_cq %p\n", __func__, ib_cq);136chp = to_iwch_cq(ib_cq);137138remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);139atomic_dec(&chp->refcnt);140wait_event(chp->wait, !atomic_read(&chp->refcnt));141142cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);143kfree(chp);144return 0;145}146147static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,148struct ib_ucontext *ib_context,149struct ib_udata *udata)150{151struct iwch_dev *rhp;152struct iwch_cq *chp;153struct iwch_create_cq_resp uresp;154struct iwch_create_cq_req ureq;155struct iwch_ucontext *ucontext = NULL;156static int warned;157size_t resplen;158159PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);160rhp = to_iwch_dev(ibdev);161chp = kzalloc(sizeof(*chp), GFP_KERNEL);162if (!chp)163return ERR_PTR(-ENOMEM);164165if (ib_context) {166ucontext = to_iwch_ucontext(ib_context);167if (!t3a_device(rhp)) {168if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {169kfree(chp);170return ERR_PTR(-EFAULT);171}172chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;173}174}175176if (t3a_device(rhp)) {177178/*179* T3A: Add some fluff to handle extra CQEs inserted180* for various errors.181* Additional CQE possibilities:182* TERMINATE,183* incoming RDMA WRITE Failures184* incoming RDMA READ REQUEST FAILUREs185* NOTE: We cannot ensure the CQ won't overflow.186*/187entries += 16;188}189entries = roundup_pow_of_two(entries);190chp->cq.size_log2 = ilog2(entries);191192if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {193kfree(chp);194return ERR_PTR(-ENOMEM);195}196chp->rhp = rhp;197chp->ibcq.cqe = 1 << chp->cq.size_log2;198spin_lock_init(&chp->lock);199atomic_set(&chp->refcnt, 1);200init_waitqueue_head(&chp->wait);201if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {202cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);203kfree(chp);204return ERR_PTR(-ENOMEM);205}206207if (ucontext) {208struct iwch_mm_entry *mm;209210mm = kmalloc(sizeof *mm, GFP_KERNEL);211if (!mm) {212iwch_destroy_cq(&chp->ibcq);213return ERR_PTR(-ENOMEM);214}215uresp.cqid = chp->cq.cqid;216uresp.size_log2 = chp->cq.size_log2;217spin_lock(&ucontext->mmap_lock);218uresp.key = ucontext->key;219ucontext->key += PAGE_SIZE;220spin_unlock(&ucontext->mmap_lock);221mm->key = uresp.key;222mm->addr = virt_to_phys(chp->cq.queue);223if (udata->outlen < sizeof uresp) {224if (!warned++)225printk(KERN_WARNING MOD "Warning - "226"downlevel libcxgb3 (non-fatal).\n");227mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *228sizeof(struct t3_cqe));229resplen = sizeof(struct iwch_create_cq_resp_v0);230} else {231mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *232sizeof(struct t3_cqe));233uresp.memsize = mm->len;234resplen = sizeof uresp;235}236if (ib_copy_to_udata(udata, &uresp, resplen)) {237kfree(mm);238iwch_destroy_cq(&chp->ibcq);239return ERR_PTR(-EFAULT);240}241insert_mmap(ucontext, mm);242}243PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",244chp->cq.cqid, chp, (1 << chp->cq.size_log2),245(unsigned long long) chp->cq.dma_addr);246return &chp->ibcq;247}248249static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)250{251#ifdef notyet252struct iwch_cq *chp = to_iwch_cq(cq);253struct t3_cq oldcq, newcq;254int ret;255256PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);257258/* We don't downsize... */259if (cqe <= cq->cqe)260return 0;261262/* create new t3_cq with new size */263cqe = roundup_pow_of_two(cqe+1);264newcq.size_log2 = ilog2(cqe);265266/* Dont allow resize to less than the current wce count */267if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {268return -ENOMEM;269}270271/* Quiesce all QPs using this CQ */272ret = iwch_quiesce_qps(chp);273if (ret) {274return ret;275}276277ret = cxio_create_cq(&chp->rhp->rdev, &newcq);278if (ret) {279return ret;280}281282/* copy CQEs */283memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *284sizeof(struct t3_cqe));285286/* old iwch_qp gets new t3_cq but keeps old cqid */287oldcq = chp->cq;288chp->cq = newcq;289chp->cq.cqid = oldcq.cqid;290291/* resize new t3_cq to update the HW context */292ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);293if (ret) {294chp->cq = oldcq;295return ret;296}297chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;298299/* destroy old t3_cq */300oldcq.cqid = newcq.cqid;301ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);302if (ret) {303printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",304__func__, ret);305}306307/* add user hooks here */308309/* resume qps */310ret = iwch_resume_qps(chp);311return ret;312#else313return -ENOSYS;314#endif315}316317static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)318{319struct iwch_dev *rhp;320struct iwch_cq *chp;321enum t3_cq_opcode cq_op;322int err;323unsigned long flag;324u32 rptr;325326chp = to_iwch_cq(ibcq);327rhp = chp->rhp;328if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)329cq_op = CQ_ARM_SE;330else331cq_op = CQ_ARM_AN;332if (chp->user_rptr_addr) {333if (get_user(rptr, chp->user_rptr_addr))334return -EFAULT;335spin_lock_irqsave(&chp->lock, flag);336chp->cq.rptr = rptr;337} else338spin_lock_irqsave(&chp->lock, flag);339PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);340err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);341spin_unlock_irqrestore(&chp->lock, flag);342if (err < 0)343printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,344chp->cq.cqid);345if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))346err = 0;347return err;348}349350static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)351{352int len = vma->vm_end - vma->vm_start;353u32 key = vma->vm_pgoff << PAGE_SHIFT;354struct cxio_rdev *rdev_p;355int ret = 0;356struct iwch_mm_entry *mm;357struct iwch_ucontext *ucontext;358u64 addr;359360PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,361key, len);362363if (vma->vm_start & (PAGE_SIZE-1)) {364return -EINVAL;365}366367rdev_p = &(to_iwch_dev(context->device)->rdev);368ucontext = to_iwch_ucontext(context);369370mm = remove_mmap(ucontext, key, len);371if (!mm)372return -EINVAL;373addr = mm->addr;374kfree(mm);375376if ((addr >= rdev_p->rnic_info.udbell_physbase) &&377(addr < (rdev_p->rnic_info.udbell_physbase +378rdev_p->rnic_info.udbell_len))) {379380/*381* Map T3 DB register.382*/383if (vma->vm_flags & VM_READ) {384return -EPERM;385}386387vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);388vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;389vma->vm_flags &= ~VM_MAYREAD;390ret = io_remap_pfn_range(vma, vma->vm_start,391addr >> PAGE_SHIFT,392len, vma->vm_page_prot);393} else {394395/*396* Map WQ or CQ contig dma memory...397*/398ret = remap_pfn_range(vma, vma->vm_start,399addr >> PAGE_SHIFT,400len, vma->vm_page_prot);401}402403return ret;404}405406static int iwch_deallocate_pd(struct ib_pd *pd)407{408struct iwch_dev *rhp;409struct iwch_pd *php;410411php = to_iwch_pd(pd);412rhp = php->rhp;413PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);414cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);415kfree(php);416return 0;417}418419static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,420struct ib_ucontext *context,421struct ib_udata *udata)422{423struct iwch_pd *php;424u32 pdid;425struct iwch_dev *rhp;426427PDBG("%s ibdev %p\n", __func__, ibdev);428rhp = (struct iwch_dev *) ibdev;429pdid = cxio_hal_get_pdid(rhp->rdev.rscp);430if (!pdid)431return ERR_PTR(-EINVAL);432php = kzalloc(sizeof(*php), GFP_KERNEL);433if (!php) {434cxio_hal_put_pdid(rhp->rdev.rscp, pdid);435return ERR_PTR(-ENOMEM);436}437php->pdid = pdid;438php->rhp = rhp;439if (context) {440if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {441iwch_deallocate_pd(&php->ibpd);442return ERR_PTR(-EFAULT);443}444}445PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);446return &php->ibpd;447}448449static int iwch_dereg_mr(struct ib_mr *ib_mr)450{451struct iwch_dev *rhp;452struct iwch_mr *mhp;453u32 mmid;454455PDBG("%s ib_mr %p\n", __func__, ib_mr);456/* There can be no memory windows */457if (atomic_read(&ib_mr->usecnt))458return -EINVAL;459460mhp = to_iwch_mr(ib_mr);461rhp = mhp->rhp;462mmid = mhp->attr.stag >> 8;463cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,464mhp->attr.pbl_addr);465iwch_free_pbl(mhp);466remove_handle(rhp, &rhp->mmidr, mmid);467if (mhp->kva)468kfree((void *) (unsigned long) mhp->kva);469if (mhp->umem)470ib_umem_release(mhp->umem);471PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);472kfree(mhp);473return 0;474}475476static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,477struct ib_phys_buf *buffer_list,478int num_phys_buf,479int acc,480u64 *iova_start)481{482__be64 *page_list;483int shift;484u64 total_size;485int npages;486struct iwch_dev *rhp;487struct iwch_pd *php;488struct iwch_mr *mhp;489int ret;490491PDBG("%s ib_pd %p\n", __func__, pd);492php = to_iwch_pd(pd);493rhp = php->rhp;494495mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);496if (!mhp)497return ERR_PTR(-ENOMEM);498499mhp->rhp = rhp;500501/* First check that we have enough alignment */502if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {503ret = -EINVAL;504goto err;505}506507if (num_phys_buf > 1 &&508((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {509ret = -EINVAL;510goto err;511}512513ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,514&total_size, &npages, &shift, &page_list);515if (ret)516goto err;517518ret = iwch_alloc_pbl(mhp, npages);519if (ret) {520kfree(page_list);521goto err_pbl;522}523524ret = iwch_write_pbl(mhp, page_list, npages, 0);525kfree(page_list);526if (ret)527goto err_pbl;528529mhp->attr.pdid = php->pdid;530mhp->attr.zbva = 0;531532mhp->attr.perms = iwch_ib_to_tpt_access(acc);533mhp->attr.va_fbo = *iova_start;534mhp->attr.page_size = shift - 12;535536mhp->attr.len = (u32) total_size;537mhp->attr.pbl_size = npages;538ret = iwch_register_mem(rhp, php, mhp, shift);539if (ret)540goto err_pbl;541542return &mhp->ibmr;543544err_pbl:545iwch_free_pbl(mhp);546547err:548kfree(mhp);549return ERR_PTR(ret);550551}552553static int iwch_reregister_phys_mem(struct ib_mr *mr,554int mr_rereg_mask,555struct ib_pd *pd,556struct ib_phys_buf *buffer_list,557int num_phys_buf,558int acc, u64 * iova_start)559{560561struct iwch_mr mh, *mhp;562struct iwch_pd *php;563struct iwch_dev *rhp;564__be64 *page_list = NULL;565int shift = 0;566u64 total_size;567int npages;568int ret;569570PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);571572/* There can be no memory windows */573if (atomic_read(&mr->usecnt))574return -EINVAL;575576mhp = to_iwch_mr(mr);577rhp = mhp->rhp;578php = to_iwch_pd(mr->pd);579580/* make sure we are on the same adapter */581if (rhp != php->rhp)582return -EINVAL;583584memcpy(&mh, mhp, sizeof *mhp);585586if (mr_rereg_mask & IB_MR_REREG_PD)587php = to_iwch_pd(pd);588if (mr_rereg_mask & IB_MR_REREG_ACCESS)589mh.attr.perms = iwch_ib_to_tpt_access(acc);590if (mr_rereg_mask & IB_MR_REREG_TRANS) {591ret = build_phys_page_list(buffer_list, num_phys_buf,592iova_start,593&total_size, &npages,594&shift, &page_list);595if (ret)596return ret;597}598599ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);600kfree(page_list);601if (ret) {602return ret;603}604if (mr_rereg_mask & IB_MR_REREG_PD)605mhp->attr.pdid = php->pdid;606if (mr_rereg_mask & IB_MR_REREG_ACCESS)607mhp->attr.perms = iwch_ib_to_tpt_access(acc);608if (mr_rereg_mask & IB_MR_REREG_TRANS) {609mhp->attr.zbva = 0;610mhp->attr.va_fbo = *iova_start;611mhp->attr.page_size = shift - 12;612mhp->attr.len = (u32) total_size;613mhp->attr.pbl_size = npages;614}615616return 0;617}618619620static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,621u64 virt, int acc, struct ib_udata *udata)622{623__be64 *pages;624int shift, n, len;625int i, j, k;626int err = 0;627struct ib_umem_chunk *chunk;628struct iwch_dev *rhp;629struct iwch_pd *php;630struct iwch_mr *mhp;631struct iwch_reg_user_mr_resp uresp;632633PDBG("%s ib_pd %p\n", __func__, pd);634635php = to_iwch_pd(pd);636rhp = php->rhp;637mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);638if (!mhp)639return ERR_PTR(-ENOMEM);640641mhp->rhp = rhp;642643mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);644if (IS_ERR(mhp->umem)) {645err = PTR_ERR(mhp->umem);646kfree(mhp);647return ERR_PTR(err);648}649650shift = ffs(mhp->umem->page_size) - 1;651652n = 0;653list_for_each_entry(chunk, &mhp->umem->chunk_list, list)654n += chunk->nents;655656err = iwch_alloc_pbl(mhp, n);657if (err)658goto err;659660pages = (__be64 *) __get_free_page(GFP_KERNEL);661if (!pages) {662err = -ENOMEM;663goto err_pbl;664}665666i = n = 0;667668list_for_each_entry(chunk, &mhp->umem->chunk_list, list)669for (j = 0; j < chunk->nmap; ++j) {670len = sg_dma_len(&chunk->page_list[j]) >> shift;671for (k = 0; k < len; ++k) {672pages[i++] = cpu_to_be64(sg_dma_address(673&chunk->page_list[j]) +674mhp->umem->page_size * k);675if (i == PAGE_SIZE / sizeof *pages) {676err = iwch_write_pbl(mhp, pages, i, n);677if (err)678goto pbl_done;679n += i;680i = 0;681}682}683}684685if (i)686err = iwch_write_pbl(mhp, pages, i, n);687688pbl_done:689free_page((unsigned long) pages);690if (err)691goto err_pbl;692693mhp->attr.pdid = php->pdid;694mhp->attr.zbva = 0;695mhp->attr.perms = iwch_ib_to_tpt_access(acc);696mhp->attr.va_fbo = virt;697mhp->attr.page_size = shift - 12;698mhp->attr.len = (u32) length;699700err = iwch_register_mem(rhp, php, mhp, shift);701if (err)702goto err_pbl;703704if (udata && !t3a_device(rhp)) {705uresp.pbl_addr = (mhp->attr.pbl_addr -706rhp->rdev.rnic_info.pbl_base) >> 3;707PDBG("%s user resp pbl_addr 0x%x\n", __func__,708uresp.pbl_addr);709710if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {711iwch_dereg_mr(&mhp->ibmr);712err = -EFAULT;713goto err;714}715}716717return &mhp->ibmr;718719err_pbl:720iwch_free_pbl(mhp);721722err:723ib_umem_release(mhp->umem);724kfree(mhp);725return ERR_PTR(err);726}727728static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)729{730struct ib_phys_buf bl;731u64 kva;732struct ib_mr *ibmr;733734PDBG("%s ib_pd %p\n", __func__, pd);735736/*737* T3 only supports 32 bits of size.738*/739bl.size = 0xffffffff;740bl.addr = 0;741kva = 0;742ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);743return ibmr;744}745746static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)747{748struct iwch_dev *rhp;749struct iwch_pd *php;750struct iwch_mw *mhp;751u32 mmid;752u32 stag = 0;753int ret;754755php = to_iwch_pd(pd);756rhp = php->rhp;757mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);758if (!mhp)759return ERR_PTR(-ENOMEM);760ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);761if (ret) {762kfree(mhp);763return ERR_PTR(ret);764}765mhp->rhp = rhp;766mhp->attr.pdid = php->pdid;767mhp->attr.type = TPT_MW;768mhp->attr.stag = stag;769mmid = (stag) >> 8;770mhp->ibmw.rkey = stag;771if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {772cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);773kfree(mhp);774return ERR_PTR(-ENOMEM);775}776PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);777return &(mhp->ibmw);778}779780static int iwch_dealloc_mw(struct ib_mw *mw)781{782struct iwch_dev *rhp;783struct iwch_mw *mhp;784u32 mmid;785786mhp = to_iwch_mw(mw);787rhp = mhp->rhp;788mmid = (mw->rkey) >> 8;789cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);790remove_handle(rhp, &rhp->mmidr, mmid);791kfree(mhp);792PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);793return 0;794}795796static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)797{798struct iwch_dev *rhp;799struct iwch_pd *php;800struct iwch_mr *mhp;801u32 mmid;802u32 stag = 0;803int ret = 0;804805php = to_iwch_pd(pd);806rhp = php->rhp;807mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);808if (!mhp)809goto err;810811mhp->rhp = rhp;812ret = iwch_alloc_pbl(mhp, pbl_depth);813if (ret)814goto err1;815mhp->attr.pbl_size = pbl_depth;816ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,817mhp->attr.pbl_size, mhp->attr.pbl_addr);818if (ret)819goto err2;820mhp->attr.pdid = php->pdid;821mhp->attr.type = TPT_NON_SHARED_MR;822mhp->attr.stag = stag;823mhp->attr.state = 1;824mmid = (stag) >> 8;825mhp->ibmr.rkey = mhp->ibmr.lkey = stag;826if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))827goto err3;828829PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);830return &(mhp->ibmr);831err3:832cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,833mhp->attr.pbl_addr);834err2:835iwch_free_pbl(mhp);836err1:837kfree(mhp);838err:839return ERR_PTR(ret);840}841842static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(843struct ib_device *device,844int page_list_len)845{846struct ib_fast_reg_page_list *page_list;847848page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),849GFP_KERNEL);850if (!page_list)851return ERR_PTR(-ENOMEM);852853page_list->page_list = (u64 *)(page_list + 1);854page_list->max_page_list_len = page_list_len;855856return page_list;857}858859static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)860{861kfree(page_list);862}863864static int iwch_destroy_qp(struct ib_qp *ib_qp)865{866struct iwch_dev *rhp;867struct iwch_qp *qhp;868struct iwch_qp_attributes attrs;869struct iwch_ucontext *ucontext;870871qhp = to_iwch_qp(ib_qp);872rhp = qhp->rhp;873874attrs.next_state = IWCH_QP_STATE_ERROR;875iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);876wait_event(qhp->wait, !qhp->ep);877878remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);879880atomic_dec(&qhp->refcnt);881wait_event(qhp->wait, !atomic_read(&qhp->refcnt));882883ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)884: NULL;885cxio_destroy_qp(&rhp->rdev, &qhp->wq,886ucontext ? &ucontext->uctx : &rhp->rdev.uctx);887888PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,889ib_qp, qhp->wq.qpid, qhp);890kfree(qhp);891return 0;892}893894static struct ib_qp *iwch_create_qp(struct ib_pd *pd,895struct ib_qp_init_attr *attrs,896struct ib_udata *udata)897{898struct iwch_dev *rhp;899struct iwch_qp *qhp;900struct iwch_pd *php;901struct iwch_cq *schp;902struct iwch_cq *rchp;903struct iwch_create_qp_resp uresp;904int wqsize, sqsize, rqsize;905struct iwch_ucontext *ucontext;906907PDBG("%s ib_pd %p\n", __func__, pd);908if (attrs->qp_type != IB_QPT_RC)909return ERR_PTR(-EINVAL);910php = to_iwch_pd(pd);911rhp = php->rhp;912schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);913rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);914if (!schp || !rchp)915return ERR_PTR(-EINVAL);916917/* The RQT size must be # of entries + 1 rounded up to a power of two */918rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);919if (rqsize == attrs->cap.max_recv_wr)920rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);921922/* T3 doesn't support RQT depth < 16 */923if (rqsize < 16)924rqsize = 16;925926if (rqsize > T3_MAX_RQ_SIZE)927return ERR_PTR(-EINVAL);928929if (attrs->cap.max_inline_data > T3_MAX_INLINE)930return ERR_PTR(-EINVAL);931932/*933* NOTE: The SQ and total WQ sizes don't need to be934* a power of two. However, all the code assumes935* they are. EG: Q_FREECNT() and friends.936*/937sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);938wqsize = roundup_pow_of_two(rqsize + sqsize);939940/*941* Kernel users need more wq space for fastreg WRs which can take942* 2 WR fragments.943*/944ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;945if (!ucontext && wqsize < (rqsize + (2 * sqsize)))946wqsize = roundup_pow_of_two(rqsize +947roundup_pow_of_two(attrs->cap.max_send_wr * 2));948PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,949wqsize, sqsize, rqsize);950qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);951if (!qhp)952return ERR_PTR(-ENOMEM);953qhp->wq.size_log2 = ilog2(wqsize);954qhp->wq.rq_size_log2 = ilog2(rqsize);955qhp->wq.sq_size_log2 = ilog2(sqsize);956if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,957ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {958kfree(qhp);959return ERR_PTR(-ENOMEM);960}961962attrs->cap.max_recv_wr = rqsize - 1;963attrs->cap.max_send_wr = sqsize;964attrs->cap.max_inline_data = T3_MAX_INLINE;965966qhp->rhp = rhp;967qhp->attr.pd = php->pdid;968qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;969qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;970qhp->attr.sq_num_entries = attrs->cap.max_send_wr;971qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;972qhp->attr.sq_max_sges = attrs->cap.max_send_sge;973qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;974qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;975qhp->attr.state = IWCH_QP_STATE_IDLE;976qhp->attr.next_state = IWCH_QP_STATE_IDLE;977978/*979* XXX - These don't get passed in from the openib user980* at create time. The CM sets them via a QP modify.981* Need to fix... I think the CM should982*/983qhp->attr.enable_rdma_read = 1;984qhp->attr.enable_rdma_write = 1;985qhp->attr.enable_bind = 1;986qhp->attr.max_ord = 1;987qhp->attr.max_ird = 1;988989spin_lock_init(&qhp->lock);990init_waitqueue_head(&qhp->wait);991atomic_set(&qhp->refcnt, 1);992993if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {994cxio_destroy_qp(&rhp->rdev, &qhp->wq,995ucontext ? &ucontext->uctx : &rhp->rdev.uctx);996kfree(qhp);997return ERR_PTR(-ENOMEM);998}9991000if (udata) {10011002struct iwch_mm_entry *mm1, *mm2;10031004mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);1005if (!mm1) {1006iwch_destroy_qp(&qhp->ibqp);1007return ERR_PTR(-ENOMEM);1008}10091010mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);1011if (!mm2) {1012kfree(mm1);1013iwch_destroy_qp(&qhp->ibqp);1014return ERR_PTR(-ENOMEM);1015}10161017uresp.qpid = qhp->wq.qpid;1018uresp.size_log2 = qhp->wq.size_log2;1019uresp.sq_size_log2 = qhp->wq.sq_size_log2;1020uresp.rq_size_log2 = qhp->wq.rq_size_log2;1021spin_lock(&ucontext->mmap_lock);1022uresp.key = ucontext->key;1023ucontext->key += PAGE_SIZE;1024uresp.db_key = ucontext->key;1025ucontext->key += PAGE_SIZE;1026spin_unlock(&ucontext->mmap_lock);1027if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {1028kfree(mm1);1029kfree(mm2);1030iwch_destroy_qp(&qhp->ibqp);1031return ERR_PTR(-EFAULT);1032}1033mm1->key = uresp.key;1034mm1->addr = virt_to_phys(qhp->wq.queue);1035mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));1036insert_mmap(ucontext, mm1);1037mm2->key = uresp.db_key;1038mm2->addr = qhp->wq.udb & PAGE_MASK;1039mm2->len = PAGE_SIZE;1040insert_mmap(ucontext, mm2);1041}1042qhp->ibqp.qp_num = qhp->wq.qpid;1043init_timer(&(qhp->timer));1044PDBG("%s sq_num_entries %d, rq_num_entries %d "1045"qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",1046__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,1047qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,10481 << qhp->wq.size_log2, qhp->wq.rq_addr);1049return &qhp->ibqp;1050}10511052static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,1053int attr_mask, struct ib_udata *udata)1054{1055struct iwch_dev *rhp;1056struct iwch_qp *qhp;1057enum iwch_qp_attr_mask mask = 0;1058struct iwch_qp_attributes attrs;10591060PDBG("%s ib_qp %p\n", __func__, ibqp);10611062/* iwarp does not support the RTR state */1063if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))1064attr_mask &= ~IB_QP_STATE;10651066/* Make sure we still have something left to do */1067if (!attr_mask)1068return 0;10691070memset(&attrs, 0, sizeof attrs);1071qhp = to_iwch_qp(ibqp);1072rhp = qhp->rhp;10731074attrs.next_state = iwch_convert_state(attr->qp_state);1075attrs.enable_rdma_read = (attr->qp_access_flags &1076IB_ACCESS_REMOTE_READ) ? 1 : 0;1077attrs.enable_rdma_write = (attr->qp_access_flags &1078IB_ACCESS_REMOTE_WRITE) ? 1 : 0;1079attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;108010811082mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;1083mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?1084(IWCH_QP_ATTR_ENABLE_RDMA_READ |1085IWCH_QP_ATTR_ENABLE_RDMA_WRITE |1086IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;10871088return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);1089}10901091void iwch_qp_add_ref(struct ib_qp *qp)1092{1093PDBG("%s ib_qp %p\n", __func__, qp);1094atomic_inc(&(to_iwch_qp(qp)->refcnt));1095}10961097void iwch_qp_rem_ref(struct ib_qp *qp)1098{1099PDBG("%s ib_qp %p\n", __func__, qp);1100if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))1101wake_up(&(to_iwch_qp(qp)->wait));1102}11031104static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)1105{1106PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);1107return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);1108}110911101111static int iwch_query_pkey(struct ib_device *ibdev,1112u8 port, u16 index, u16 * pkey)1113{1114PDBG("%s ibdev %p\n", __func__, ibdev);1115*pkey = 0;1116return 0;1117}11181119static int iwch_query_gid(struct ib_device *ibdev, u8 port,1120int index, union ib_gid *gid)1121{1122struct iwch_dev *dev;11231124PDBG("%s ibdev %p, port %d, index %d, gid %p\n",1125__func__, ibdev, port, index, gid);1126dev = to_iwch_dev(ibdev);1127BUG_ON(port == 0 || port > 2);1128memset(&(gid->raw[0]), 0, sizeof(gid->raw));1129memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);1130return 0;1131}11321133static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)1134{1135struct ethtool_drvinfo info;1136struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;1137char *cp, *next;1138unsigned fw_maj, fw_min, fw_mic;11391140lldev->ethtool_ops->get_drvinfo(lldev, &info);11411142next = info.fw_version + 1;1143cp = strsep(&next, ".");1144sscanf(cp, "%i", &fw_maj);1145cp = strsep(&next, ".");1146sscanf(cp, "%i", &fw_min);1147cp = strsep(&next, ".");1148sscanf(cp, "%i", &fw_mic);11491150return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |1151(fw_mic & 0xffff);1152}11531154static int iwch_query_device(struct ib_device *ibdev,1155struct ib_device_attr *props)1156{11571158struct iwch_dev *dev;1159PDBG("%s ibdev %p\n", __func__, ibdev);11601161dev = to_iwch_dev(ibdev);1162memset(props, 0, sizeof *props);1163memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);1164props->hw_ver = dev->rdev.t3cdev_p->type;1165props->fw_ver = fw_vers_string_to_u64(dev);1166props->device_cap_flags = dev->device_cap_flags;1167props->page_size_cap = dev->attr.mem_pgsizes_bitmask;1168props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;1169props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;1170props->max_mr_size = dev->attr.max_mr_size;1171props->max_qp = dev->attr.max_qps;1172props->max_qp_wr = dev->attr.max_wrs;1173props->max_sge = dev->attr.max_sge_per_wr;1174props->max_sge_rd = 1;1175props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;1176props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;1177props->max_cq = dev->attr.max_cqs;1178props->max_cqe = dev->attr.max_cqes_per_cq;1179props->max_mr = dev->attr.max_mem_regs;1180props->max_pd = dev->attr.max_pds;1181props->local_ca_ack_delay = 0;1182props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;11831184return 0;1185}11861187static int iwch_query_port(struct ib_device *ibdev,1188u8 port, struct ib_port_attr *props)1189{1190struct iwch_dev *dev;1191struct net_device *netdev;1192struct in_device *inetdev;11931194PDBG("%s ibdev %p\n", __func__, ibdev);11951196dev = to_iwch_dev(ibdev);1197netdev = dev->rdev.port_info.lldevs[port-1];11981199memset(props, 0, sizeof(struct ib_port_attr));1200props->max_mtu = IB_MTU_4096;1201if (netdev->mtu >= 4096)1202props->active_mtu = IB_MTU_4096;1203else if (netdev->mtu >= 2048)1204props->active_mtu = IB_MTU_2048;1205else if (netdev->mtu >= 1024)1206props->active_mtu = IB_MTU_1024;1207else if (netdev->mtu >= 512)1208props->active_mtu = IB_MTU_512;1209else1210props->active_mtu = IB_MTU_256;12111212if (!netif_carrier_ok(netdev))1213props->state = IB_PORT_DOWN;1214else {1215inetdev = in_dev_get(netdev);1216if (inetdev) {1217if (inetdev->ifa_list)1218props->state = IB_PORT_ACTIVE;1219else1220props->state = IB_PORT_INIT;1221in_dev_put(inetdev);1222} else1223props->state = IB_PORT_INIT;1224}12251226props->port_cap_flags =1227IB_PORT_CM_SUP |1228IB_PORT_SNMP_TUNNEL_SUP |1229IB_PORT_REINIT_SUP |1230IB_PORT_DEVICE_MGMT_SUP |1231IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;1232props->gid_tbl_len = 1;1233props->pkey_tbl_len = 1;1234props->active_width = 2;1235props->active_speed = 2;1236props->max_msg_sz = -1;12371238return 0;1239}12401241static ssize_t show_rev(struct device *dev, struct device_attribute *attr,1242char *buf)1243{1244struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,1245ibdev.dev);1246PDBG("%s dev 0x%p\n", __func__, dev);1247return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);1248}12491250static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)1251{1252struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,1253ibdev.dev);1254struct ethtool_drvinfo info;1255struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;12561257PDBG("%s dev 0x%p\n", __func__, dev);1258lldev->ethtool_ops->get_drvinfo(lldev, &info);1259return sprintf(buf, "%s\n", info.fw_version);1260}12611262static ssize_t show_hca(struct device *dev, struct device_attribute *attr,1263char *buf)1264{1265struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,1266ibdev.dev);1267struct ethtool_drvinfo info;1268struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;12691270PDBG("%s dev 0x%p\n", __func__, dev);1271lldev->ethtool_ops->get_drvinfo(lldev, &info);1272return sprintf(buf, "%s\n", info.driver);1273}12741275static ssize_t show_board(struct device *dev, struct device_attribute *attr,1276char *buf)1277{1278struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,1279ibdev.dev);1280PDBG("%s dev 0x%p\n", __func__, dev);1281return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,1282iwch_dev->rdev.rnic_info.pdev->device);1283}12841285static int iwch_get_mib(struct ib_device *ibdev,1286union rdma_protocol_stats *stats)1287{1288struct iwch_dev *dev;1289struct tp_mib_stats m;1290int ret;12911292PDBG("%s ibdev %p\n", __func__, ibdev);1293dev = to_iwch_dev(ibdev);1294ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);1295if (ret)1296return -ENOSYS;12971298memset(stats, 0, sizeof *stats);1299stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +1300m.ipInReceive_lo;1301stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +1302m.ipInHdrErrors_lo;1303stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +1304m.ipInAddrErrors_lo;1305stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +1306m.ipInUnknownProtos_lo;1307stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +1308m.ipInDiscards_lo;1309stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +1310m.ipInDelivers_lo;1311stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +1312m.ipOutRequests_lo;1313stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +1314m.ipOutDiscards_lo;1315stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +1316m.ipOutNoRoutes_lo;1317stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;1318stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;1319stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;1320stats->iw.ipReasmFails = (u64) m.ipReasmFails;1321stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;1322stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;1323stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;1324stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;1325stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;1326stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;1327stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +1328m.tcpInSegs_lo;1329stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +1330m.tcpOutSegs_lo;1331stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +1332m.tcpRetransSeg_lo;1333stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +1334m.tcpInErrs_lo;1335stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;1336stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;1337return 0;1338}13391340static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);1341static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);1342static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);1343static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);13441345static struct device_attribute *iwch_class_attributes[] = {1346&dev_attr_hw_rev,1347&dev_attr_fw_ver,1348&dev_attr_hca_type,1349&dev_attr_board_id,1350};13511352int iwch_register_device(struct iwch_dev *dev)1353{1354int ret;1355int i;13561357PDBG("%s iwch_dev %p\n", __func__, dev);1358strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);1359memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));1360memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);1361dev->ibdev.owner = THIS_MODULE;1362dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |1363IB_DEVICE_MEM_WINDOW |1364IB_DEVICE_MEM_MGT_EXTENSIONS;13651366/* cxgb3 supports STag 0. */1367dev->ibdev.local_dma_lkey = 0;13681369dev->ibdev.uverbs_cmd_mask =1370(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |1371(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |1372(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |1373(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |1374(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |1375(1ull << IB_USER_VERBS_CMD_REG_MR) |1376(1ull << IB_USER_VERBS_CMD_DEREG_MR) |1377(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |1378(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |1379(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |1380(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |1381(1ull << IB_USER_VERBS_CMD_CREATE_QP) |1382(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |1383(1ull << IB_USER_VERBS_CMD_POLL_CQ) |1384(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |1385(1ull << IB_USER_VERBS_CMD_POST_SEND) |1386(1ull << IB_USER_VERBS_CMD_POST_RECV);1387dev->ibdev.node_type = RDMA_NODE_RNIC;1388memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));1389dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;1390dev->ibdev.num_comp_vectors = 1;1391dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);1392dev->ibdev.query_device = iwch_query_device;1393dev->ibdev.query_port = iwch_query_port;1394dev->ibdev.modify_port = iwch_modify_port;1395dev->ibdev.query_pkey = iwch_query_pkey;1396dev->ibdev.query_gid = iwch_query_gid;1397dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;1398dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;1399dev->ibdev.mmap = iwch_mmap;1400dev->ibdev.alloc_pd = iwch_allocate_pd;1401dev->ibdev.dealloc_pd = iwch_deallocate_pd;1402dev->ibdev.create_ah = iwch_ah_create;1403dev->ibdev.destroy_ah = iwch_ah_destroy;1404dev->ibdev.create_qp = iwch_create_qp;1405dev->ibdev.modify_qp = iwch_ib_modify_qp;1406dev->ibdev.destroy_qp = iwch_destroy_qp;1407dev->ibdev.create_cq = iwch_create_cq;1408dev->ibdev.destroy_cq = iwch_destroy_cq;1409dev->ibdev.resize_cq = iwch_resize_cq;1410dev->ibdev.poll_cq = iwch_poll_cq;1411dev->ibdev.get_dma_mr = iwch_get_dma_mr;1412dev->ibdev.reg_phys_mr = iwch_register_phys_mem;1413dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;1414dev->ibdev.reg_user_mr = iwch_reg_user_mr;1415dev->ibdev.dereg_mr = iwch_dereg_mr;1416dev->ibdev.alloc_mw = iwch_alloc_mw;1417dev->ibdev.bind_mw = iwch_bind_mw;1418dev->ibdev.dealloc_mw = iwch_dealloc_mw;1419dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;1420dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;1421dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;1422dev->ibdev.attach_mcast = iwch_multicast_attach;1423dev->ibdev.detach_mcast = iwch_multicast_detach;1424dev->ibdev.process_mad = iwch_process_mad;1425dev->ibdev.req_notify_cq = iwch_arm_cq;1426dev->ibdev.post_send = iwch_post_send;1427dev->ibdev.post_recv = iwch_post_receive;1428dev->ibdev.get_protocol_stats = iwch_get_mib;1429dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;14301431dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);1432if (!dev->ibdev.iwcm)1433return -ENOMEM;14341435dev->ibdev.iwcm->connect = iwch_connect;1436dev->ibdev.iwcm->accept = iwch_accept_cr;1437dev->ibdev.iwcm->reject = iwch_reject_cr;1438dev->ibdev.iwcm->create_listen = iwch_create_listen;1439dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;1440dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;1441dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;1442dev->ibdev.iwcm->get_qp = iwch_get_qp;14431444ret = ib_register_device(&dev->ibdev, NULL);1445if (ret)1446goto bail1;14471448for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {1449ret = device_create_file(&dev->ibdev.dev,1450iwch_class_attributes[i]);1451if (ret) {1452goto bail2;1453}1454}1455return 0;1456bail2:1457ib_unregister_device(&dev->ibdev);1458bail1:1459kfree(dev->ibdev.iwcm);1460return ret;1461}14621463void iwch_unregister_device(struct iwch_dev *dev)1464{1465int i;14661467PDBG("%s iwch_dev %p\n", __func__, dev);1468for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)1469device_remove_file(&dev->ibdev.dev,1470iwch_class_attributes[i]);1471ib_unregister_device(&dev->ibdev);1472kfree(dev->ibdev.iwcm);1473return;1474}147514761477