Path: blob/master/drivers/infiniband/hw/mthca/mthca_eq.c
15112 views
/*1* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.2* Copyright (c) 2005 Mellanox Technologies. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#include <linux/errno.h>34#include <linux/interrupt.h>35#include <linux/pci.h>36#include <linux/slab.h>3738#include "mthca_dev.h"39#include "mthca_cmd.h"40#include "mthca_config_reg.h"4142enum {43MTHCA_NUM_ASYNC_EQE = 0x80,44MTHCA_NUM_CMD_EQE = 0x80,45MTHCA_NUM_SPARE_EQE = 0x80,46MTHCA_EQ_ENTRY_SIZE = 0x2047};4849/*50* Must be packed because start is 64 bits but only aligned to 32 bits.51*/52struct mthca_eq_context {53__be32 flags;54__be64 start;55__be32 logsize_usrpage;56__be32 tavor_pd; /* reserved for Arbel */57u8 reserved1[3];58u8 intr;59__be32 arbel_pd; /* lost_count for Tavor */60__be32 lkey;61u32 reserved2[2];62__be32 consumer_index;63__be32 producer_index;64u32 reserved3[4];65} __attribute__((packed));6667#define MTHCA_EQ_STATUS_OK ( 0 << 28)68#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)69#define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)70#define MTHCA_EQ_OWNER_SW ( 0 << 24)71#define MTHCA_EQ_OWNER_HW ( 1 << 24)72#define MTHCA_EQ_FLAG_TR ( 1 << 18)73#define MTHCA_EQ_FLAG_OI ( 1 << 17)74#define MTHCA_EQ_STATE_ARMED ( 1 << 8)75#define MTHCA_EQ_STATE_FIRED ( 2 << 8)76#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)77#define MTHCA_EQ_STATE_ARBEL ( 8 << 8)7879enum {80MTHCA_EVENT_TYPE_COMP = 0x00,81MTHCA_EVENT_TYPE_PATH_MIG = 0x01,82MTHCA_EVENT_TYPE_COMM_EST = 0x02,83MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,84MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,85MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,86MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,87MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,88MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,89MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,90MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,91MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,92MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,93MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,94MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,95MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,96MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,97MTHCA_EVENT_TYPE_CMD = 0x0a98};99100#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \101(1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \102(1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \103(1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \104(1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \105(1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \106(1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \107(1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \108(1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \109(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \110(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \111(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))112#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \113(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \114(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))115#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)116117#define MTHCA_EQ_DB_INC_CI (1 << 24)118#define MTHCA_EQ_DB_REQ_NOT (2 << 24)119#define MTHCA_EQ_DB_DISARM_CQ (3 << 24)120#define MTHCA_EQ_DB_SET_CI (4 << 24)121#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)122123struct mthca_eqe {124u8 reserved1;125u8 type;126u8 reserved2;127u8 subtype;128union {129u32 raw[6];130struct {131__be32 cqn;132} __attribute__((packed)) comp;133struct {134u16 reserved1;135__be16 token;136u32 reserved2;137u8 reserved3[3];138u8 status;139__be64 out_param;140} __attribute__((packed)) cmd;141struct {142__be32 qpn;143} __attribute__((packed)) qp;144struct {145__be32 srqn;146} __attribute__((packed)) srq;147struct {148__be32 cqn;149u32 reserved1;150u8 reserved2[3];151u8 syndrome;152} __attribute__((packed)) cq_err;153struct {154u32 reserved1[2];155__be32 port;156} __attribute__((packed)) port_change;157} event;158u8 reserved3[3];159u8 owner;160} __attribute__((packed));161162#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)163#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)164165static inline u64 async_mask(struct mthca_dev *dev)166{167return dev->mthca_flags & MTHCA_FLAG_SRQ ?168MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :169MTHCA_ASYNC_EVENT_MASK;170}171172static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)173{174/*175* This barrier makes sure that all updates to ownership bits176* done by set_eqe_hw() hit memory before the consumer index177* is updated. set_eq_ci() allows the HCA to possibly write178* more EQ entries, and we want to avoid the exceedingly179* unlikely possibility of the HCA writing an entry and then180* having set_eqe_hw() overwrite the owner field.181*/182wmb();183mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),184dev->kar + MTHCA_EQ_DOORBELL,185MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));186}187188static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)189{190/* See comment in tavor_set_eq_ci() above. */191wmb();192__raw_writel((__force u32) cpu_to_be32(ci),193dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);194/* We still want ordering, just not swabbing, so add a barrier */195mb();196}197198static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)199{200if (mthca_is_memfree(dev))201arbel_set_eq_ci(dev, eq, ci);202else203tavor_set_eq_ci(dev, eq, ci);204}205206static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)207{208mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,209dev->kar + MTHCA_EQ_DOORBELL,210MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));211}212213static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)214{215writel(eqn_mask, dev->eq_regs.arbel.eq_arm);216}217218static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)219{220if (!mthca_is_memfree(dev)) {221mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,222dev->kar + MTHCA_EQ_DOORBELL,223MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));224}225}226227static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)228{229unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;230return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;231}232233static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)234{235struct mthca_eqe *eqe;236eqe = get_eqe(eq, eq->cons_index);237return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;238}239240static inline void set_eqe_hw(struct mthca_eqe *eqe)241{242eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;243}244245static void port_change(struct mthca_dev *dev, int port, int active)246{247struct ib_event record;248249mthca_dbg(dev, "Port change to %s for port %d\n",250active ? "active" : "down", port);251252record.device = &dev->ib_dev;253record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;254record.element.port_num = port;255256ib_dispatch_event(&record);257}258259static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)260{261struct mthca_eqe *eqe;262int disarm_cqn;263int eqes_found = 0;264int set_ci = 0;265266while ((eqe = next_eqe_sw(eq))) {267/*268* Make sure we read EQ entry contents after we've269* checked the ownership bit.270*/271rmb();272273switch (eqe->type) {274case MTHCA_EVENT_TYPE_COMP:275disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;276disarm_cq(dev, eq->eqn, disarm_cqn);277mthca_cq_completion(dev, disarm_cqn);278break;279280case MTHCA_EVENT_TYPE_PATH_MIG:281mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,282IB_EVENT_PATH_MIG);283break;284285case MTHCA_EVENT_TYPE_COMM_EST:286mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,287IB_EVENT_COMM_EST);288break;289290case MTHCA_EVENT_TYPE_SQ_DRAINED:291mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,292IB_EVENT_SQ_DRAINED);293break;294295case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:296mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,297IB_EVENT_QP_LAST_WQE_REACHED);298break;299300case MTHCA_EVENT_TYPE_SRQ_LIMIT:301mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,302IB_EVENT_SRQ_LIMIT_REACHED);303break;304305case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:306mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,307IB_EVENT_QP_FATAL);308break;309310case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:311mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,312IB_EVENT_PATH_MIG_ERR);313break;314315case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:316mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,317IB_EVENT_QP_REQ_ERR);318break;319320case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:321mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,322IB_EVENT_QP_ACCESS_ERR);323break;324325case MTHCA_EVENT_TYPE_CMD:326mthca_cmd_event(dev,327be16_to_cpu(eqe->event.cmd.token),328eqe->event.cmd.status,329be64_to_cpu(eqe->event.cmd.out_param));330break;331332case MTHCA_EVENT_TYPE_PORT_CHANGE:333port_change(dev,334(be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,335eqe->subtype == 0x4);336break;337338case MTHCA_EVENT_TYPE_CQ_ERROR:339mthca_warn(dev, "CQ %s on CQN %06x\n",340eqe->event.cq_err.syndrome == 1 ?341"overrun" : "access violation",342be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);343mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),344IB_EVENT_CQ_ERR);345break;346347case MTHCA_EVENT_TYPE_EQ_OVERFLOW:348mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);349break;350351case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:352case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:353case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:354case MTHCA_EVENT_TYPE_ECC_DETECT:355default:356mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",357eqe->type, eqe->subtype, eq->eqn);358break;359};360361set_eqe_hw(eqe);362++eq->cons_index;363eqes_found = 1;364++set_ci;365366/*367* The HCA will think the queue has overflowed if we368* don't tell it we've been processing events. We369* create our EQs with MTHCA_NUM_SPARE_EQE extra370* entries, so we must update our consumer index at371* least that often.372*/373if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {374/*375* Conditional on hca_type is OK here because376* this is a rare case, not the fast path.377*/378set_eq_ci(dev, eq, eq->cons_index);379set_ci = 0;380}381}382383/*384* Rely on caller to set consumer index so that we don't have385* to test hca_type in our interrupt handling fast path.386*/387return eqes_found;388}389390static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)391{392struct mthca_dev *dev = dev_ptr;393u32 ecr;394int i;395396if (dev->eq_table.clr_mask)397writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);398399ecr = readl(dev->eq_regs.tavor.ecr_base + 4);400if (!ecr)401return IRQ_NONE;402403writel(ecr, dev->eq_regs.tavor.ecr_base +404MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);405406for (i = 0; i < MTHCA_NUM_EQ; ++i)407if (ecr & dev->eq_table.eq[i].eqn_mask) {408if (mthca_eq_int(dev, &dev->eq_table.eq[i]))409tavor_set_eq_ci(dev, &dev->eq_table.eq[i],410dev->eq_table.eq[i].cons_index);411tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);412}413414return IRQ_HANDLED;415}416417static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)418{419struct mthca_eq *eq = eq_ptr;420struct mthca_dev *dev = eq->dev;421422mthca_eq_int(dev, eq);423tavor_set_eq_ci(dev, eq, eq->cons_index);424tavor_eq_req_not(dev, eq->eqn);425426/* MSI-X vectors always belong to us */427return IRQ_HANDLED;428}429430static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)431{432struct mthca_dev *dev = dev_ptr;433int work = 0;434int i;435436if (dev->eq_table.clr_mask)437writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);438439for (i = 0; i < MTHCA_NUM_EQ; ++i)440if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {441work = 1;442arbel_set_eq_ci(dev, &dev->eq_table.eq[i],443dev->eq_table.eq[i].cons_index);444}445446arbel_eq_req_not(dev, dev->eq_table.arm_mask);447448return IRQ_RETVAL(work);449}450451static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)452{453struct mthca_eq *eq = eq_ptr;454struct mthca_dev *dev = eq->dev;455456mthca_eq_int(dev, eq);457arbel_set_eq_ci(dev, eq, eq->cons_index);458arbel_eq_req_not(dev, eq->eqn_mask);459460/* MSI-X vectors always belong to us */461return IRQ_HANDLED;462}463464static int mthca_create_eq(struct mthca_dev *dev,465int nent,466u8 intr,467struct mthca_eq *eq)468{469int npages;470u64 *dma_list = NULL;471dma_addr_t t;472struct mthca_mailbox *mailbox;473struct mthca_eq_context *eq_context;474int err = -ENOMEM;475int i;476u8 status;477478eq->dev = dev;479eq->nent = roundup_pow_of_two(max(nent, 2));480npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;481482eq->page_list = kmalloc(npages * sizeof *eq->page_list,483GFP_KERNEL);484if (!eq->page_list)485goto err_out;486487for (i = 0; i < npages; ++i)488eq->page_list[i].buf = NULL;489490dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);491if (!dma_list)492goto err_out_free;493494mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);495if (IS_ERR(mailbox))496goto err_out_free;497eq_context = mailbox->buf;498499for (i = 0; i < npages; ++i) {500eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,501PAGE_SIZE, &t, GFP_KERNEL);502if (!eq->page_list[i].buf)503goto err_out_free_pages;504505dma_list[i] = t;506dma_unmap_addr_set(&eq->page_list[i], mapping, t);507508clear_page(eq->page_list[i].buf);509}510511for (i = 0; i < eq->nent; ++i)512set_eqe_hw(get_eqe(eq, i));513514eq->eqn = mthca_alloc(&dev->eq_table.alloc);515if (eq->eqn == -1)516goto err_out_free_pages;517518err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,519dma_list, PAGE_SHIFT, npages,5200, npages * PAGE_SIZE,521MTHCA_MPT_FLAG_LOCAL_WRITE |522MTHCA_MPT_FLAG_LOCAL_READ,523&eq->mr);524if (err)525goto err_out_free_eq;526527memset(eq_context, 0, sizeof *eq_context);528eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |529MTHCA_EQ_OWNER_HW |530MTHCA_EQ_STATE_ARMED |531MTHCA_EQ_FLAG_TR);532if (mthca_is_memfree(dev))533eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);534535eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);536if (mthca_is_memfree(dev)) {537eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);538} else {539eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);540eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);541}542eq_context->intr = intr;543eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);544545err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);546if (err) {547mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);548goto err_out_free_mr;549}550if (status) {551mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",552status);553err = -EINVAL;554goto err_out_free_mr;555}556557kfree(dma_list);558mthca_free_mailbox(dev, mailbox);559560eq->eqn_mask = swab32(1 << eq->eqn);561eq->cons_index = 0;562563dev->eq_table.arm_mask |= eq->eqn_mask;564565mthca_dbg(dev, "Allocated EQ %d with %d entries\n",566eq->eqn, eq->nent);567568return err;569570err_out_free_mr:571mthca_free_mr(dev, &eq->mr);572573err_out_free_eq:574mthca_free(&dev->eq_table.alloc, eq->eqn);575576err_out_free_pages:577for (i = 0; i < npages; ++i)578if (eq->page_list[i].buf)579dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,580eq->page_list[i].buf,581dma_unmap_addr(&eq->page_list[i],582mapping));583584mthca_free_mailbox(dev, mailbox);585586err_out_free:587kfree(eq->page_list);588kfree(dma_list);589590err_out:591return err;592}593594static void mthca_free_eq(struct mthca_dev *dev,595struct mthca_eq *eq)596{597struct mthca_mailbox *mailbox;598int err;599u8 status;600int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /601PAGE_SIZE;602int i;603604mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);605if (IS_ERR(mailbox))606return;607608err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);609if (err)610mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);611if (status)612mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);613614dev->eq_table.arm_mask &= ~eq->eqn_mask;615616if (0) {617mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);618for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {619if (i % 4 == 0)620printk("[%02x] ", i * 4);621printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));622if ((i + 1) % 4 == 0)623printk("\n");624}625}626627mthca_free_mr(dev, &eq->mr);628for (i = 0; i < npages; ++i)629pci_free_consistent(dev->pdev, PAGE_SIZE,630eq->page_list[i].buf,631dma_unmap_addr(&eq->page_list[i], mapping));632633kfree(eq->page_list);634mthca_free_mailbox(dev, mailbox);635}636637static void mthca_free_irqs(struct mthca_dev *dev)638{639int i;640641if (dev->eq_table.have_irq)642free_irq(dev->pdev->irq, dev);643for (i = 0; i < MTHCA_NUM_EQ; ++i)644if (dev->eq_table.eq[i].have_irq) {645free_irq(dev->eq_table.eq[i].msi_x_vector,646dev->eq_table.eq + i);647dev->eq_table.eq[i].have_irq = 0;648}649}650651static int mthca_map_reg(struct mthca_dev *dev,652unsigned long offset, unsigned long size,653void __iomem **map)654{655phys_addr_t base = pci_resource_start(dev->pdev, 0);656657*map = ioremap(base + offset, size);658if (!*map)659return -ENOMEM;660661return 0;662}663664static int mthca_map_eq_regs(struct mthca_dev *dev)665{666if (mthca_is_memfree(dev)) {667/*668* We assume that the EQ arm and EQ set CI registers669* fall within the first BAR. We can't trust the670* values firmware gives us, since those addresses are671* valid on the HCA's side of the PCI bus but not672* necessarily the host side.673*/674if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &675dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,676&dev->clr_base)) {677mthca_err(dev, "Couldn't map interrupt clear register, "678"aborting.\n");679return -ENOMEM;680}681682/*683* Add 4 because we limit ourselves to EQs 0 ... 31,684* so we only need the low word of the register.685*/686if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &687dev->fw.arbel.eq_arm_base) + 4, 4,688&dev->eq_regs.arbel.eq_arm)) {689mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");690iounmap(dev->clr_base);691return -ENOMEM;692}693694if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &695dev->fw.arbel.eq_set_ci_base,696MTHCA_EQ_SET_CI_SIZE,697&dev->eq_regs.arbel.eq_set_ci_base)) {698mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");699iounmap(dev->eq_regs.arbel.eq_arm);700iounmap(dev->clr_base);701return -ENOMEM;702}703} else {704if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,705&dev->clr_base)) {706mthca_err(dev, "Couldn't map interrupt clear register, "707"aborting.\n");708return -ENOMEM;709}710711if (mthca_map_reg(dev, MTHCA_ECR_BASE,712MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,713&dev->eq_regs.tavor.ecr_base)) {714mthca_err(dev, "Couldn't map ecr register, "715"aborting.\n");716iounmap(dev->clr_base);717return -ENOMEM;718}719}720721return 0;722723}724725static void mthca_unmap_eq_regs(struct mthca_dev *dev)726{727if (mthca_is_memfree(dev)) {728iounmap(dev->eq_regs.arbel.eq_set_ci_base);729iounmap(dev->eq_regs.arbel.eq_arm);730iounmap(dev->clr_base);731} else {732iounmap(dev->eq_regs.tavor.ecr_base);733iounmap(dev->clr_base);734}735}736737int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)738{739int ret;740u8 status;741742/*743* We assume that mapping one page is enough for the whole EQ744* context table. This is fine with all current HCAs, because745* we only use 32 EQs and each EQ uses 32 bytes of context746* memory, or 1 KB total.747*/748dev->eq_table.icm_virt = icm_virt;749dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);750if (!dev->eq_table.icm_page)751return -ENOMEM;752dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,753PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);754if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {755__free_page(dev->eq_table.icm_page);756return -ENOMEM;757}758759ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);760if (!ret && status)761ret = -EINVAL;762if (ret) {763pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,764PCI_DMA_BIDIRECTIONAL);765__free_page(dev->eq_table.icm_page);766}767768return ret;769}770771void mthca_unmap_eq_icm(struct mthca_dev *dev)772{773u8 status;774775mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);776pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,777PCI_DMA_BIDIRECTIONAL);778__free_page(dev->eq_table.icm_page);779}780781int mthca_init_eq_table(struct mthca_dev *dev)782{783int err;784u8 status;785u8 intr;786int i;787788err = mthca_alloc_init(&dev->eq_table.alloc,789dev->limits.num_eqs,790dev->limits.num_eqs - 1,791dev->limits.reserved_eqs);792if (err)793return err;794795err = mthca_map_eq_regs(dev);796if (err)797goto err_out_free;798799if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {800dev->eq_table.clr_mask = 0;801} else {802dev->eq_table.clr_mask =803swab32(1 << (dev->eq_table.inta_pin & 31));804dev->eq_table.clr_int = dev->clr_base +805(dev->eq_table.inta_pin < 32 ? 4 : 0);806}807808dev->eq_table.arm_mask = 0;809810intr = dev->eq_table.inta_pin;811812err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,813(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,814&dev->eq_table.eq[MTHCA_EQ_COMP]);815if (err)816goto err_out_unmap;817818err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,819(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,820&dev->eq_table.eq[MTHCA_EQ_ASYNC]);821if (err)822goto err_out_comp;823824err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,825(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,826&dev->eq_table.eq[MTHCA_EQ_CMD]);827if (err)828goto err_out_async;829830if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {831static const char *eq_name[] = {832[MTHCA_EQ_COMP] = DRV_NAME "-comp",833[MTHCA_EQ_ASYNC] = DRV_NAME "-async",834[MTHCA_EQ_CMD] = DRV_NAME "-cmd"835};836837for (i = 0; i < MTHCA_NUM_EQ; ++i) {838snprintf(dev->eq_table.eq[i].irq_name,839IB_DEVICE_NAME_MAX,840"%s@pci:%s", eq_name[i],841pci_name(dev->pdev));842err = request_irq(dev->eq_table.eq[i].msi_x_vector,843mthca_is_memfree(dev) ?844mthca_arbel_msi_x_interrupt :845mthca_tavor_msi_x_interrupt,8460, dev->eq_table.eq[i].irq_name,847dev->eq_table.eq + i);848if (err)849goto err_out_cmd;850dev->eq_table.eq[i].have_irq = 1;851}852} else {853snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,854DRV_NAME "@pci:%s", pci_name(dev->pdev));855err = request_irq(dev->pdev->irq,856mthca_is_memfree(dev) ?857mthca_arbel_interrupt :858mthca_tavor_interrupt,859IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);860if (err)861goto err_out_cmd;862dev->eq_table.have_irq = 1;863}864865err = mthca_MAP_EQ(dev, async_mask(dev),8660, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);867if (err)868mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",869dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);870if (status)871mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",872dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);873874err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,8750, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);876if (err)877mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",878dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);879if (status)880mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",881dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);882883for (i = 0; i < MTHCA_NUM_EQ; ++i)884if (mthca_is_memfree(dev))885arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);886else887tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);888889return 0;890891err_out_cmd:892mthca_free_irqs(dev);893mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);894895err_out_async:896mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);897898err_out_comp:899mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);900901err_out_unmap:902mthca_unmap_eq_regs(dev);903904err_out_free:905mthca_alloc_cleanup(&dev->eq_table.alloc);906return err;907}908909void mthca_cleanup_eq_table(struct mthca_dev *dev)910{911u8 status;912int i;913914mthca_free_irqs(dev);915916mthca_MAP_EQ(dev, async_mask(dev),9171, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);918mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,9191, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);920921for (i = 0; i < MTHCA_NUM_EQ; ++i)922mthca_free_eq(dev, &dev->eq_table.eq[i]);923924mthca_unmap_eq_regs(dev);925926mthca_alloc_cleanup(&dev->eq_table.alloc);927}928929930