Path: blob/master/drivers/infiniband/hw/qib/qib_sdma.c
15112 views
/*1* Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/3132#include <linux/spinlock.h>33#include <linux/netdevice.h>3435#include "qib.h"36#include "qib_common.h"3738/* default pio off, sdma on */39static ushort sdma_descq_cnt = 256;40module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);41MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");4243/*44* Bits defined in the send DMA descriptor.45*/46#define SDMA_DESC_LAST (1ULL << 11)47#define SDMA_DESC_FIRST (1ULL << 12)48#define SDMA_DESC_DMA_HEAD (1ULL << 13)49#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)50#define SDMA_DESC_INTR (1ULL << 15)51#define SDMA_DESC_COUNT_LSB 1652#define SDMA_DESC_GEN_LSB 305354char *qib_sdma_state_names[] = {55[qib_sdma_state_s00_hw_down] = "s00_HwDown",56[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",57[qib_sdma_state_s20_idle] = "s20_Idle",58[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",59[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",60[qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",61[qib_sdma_state_s99_running] = "s99_Running",62};6364char *qib_sdma_event_names[] = {65[qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",66[qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",67[qib_sdma_event_e20_hw_started] = "e20_HwStarted",68[qib_sdma_event_e30_go_running] = "e30_GoRunning",69[qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",70[qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",71[qib_sdma_event_e60_hw_halted] = "e60_HwHalted",72[qib_sdma_event_e70_go_idle] = "e70_GoIdle",73[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",74[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",75[qib_sdma_event_e90_timer_tick] = "e90_TimerTick",76};7778/* declare all statics here rather than keep sorting */79static int alloc_sdma(struct qib_pportdata *);80static void sdma_complete(struct kref *);81static void sdma_finalput(struct qib_sdma_state *);82static void sdma_get(struct qib_sdma_state *);83static void sdma_put(struct qib_sdma_state *);84static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);85static void sdma_start_sw_clean_up(struct qib_pportdata *);86static void sdma_sw_clean_up_task(unsigned long);87static void unmap_desc(struct qib_pportdata *, unsigned);8889static void sdma_get(struct qib_sdma_state *ss)90{91kref_get(&ss->kref);92}9394static void sdma_complete(struct kref *kref)95{96struct qib_sdma_state *ss =97container_of(kref, struct qib_sdma_state, kref);9899complete(&ss->comp);100}101102static void sdma_put(struct qib_sdma_state *ss)103{104kref_put(&ss->kref, sdma_complete);105}106107static void sdma_finalput(struct qib_sdma_state *ss)108{109sdma_put(ss);110wait_for_completion(&ss->comp);111}112113/*114* Complete all the sdma requests on the active list, in the correct115* order, and with appropriate processing. Called when cleaning up116* after sdma shutdown, and when new sdma requests are submitted for117* a link that is down. This matches what is done for requests118* that complete normally, it's just the full list.119*120* Must be called with sdma_lock held121*/122static void clear_sdma_activelist(struct qib_pportdata *ppd)123{124struct qib_sdma_txreq *txp, *txp_next;125126list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {127list_del_init(&txp->list);128if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {129unsigned idx;130131idx = txp->start_idx;132while (idx != txp->next_descq_idx) {133unmap_desc(ppd, idx);134if (++idx == ppd->sdma_descq_cnt)135idx = 0;136}137}138if (txp->callback)139(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);140}141}142143static void sdma_sw_clean_up_task(unsigned long opaque)144{145struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;146unsigned long flags;147148spin_lock_irqsave(&ppd->sdma_lock, flags);149150/*151* At this point, the following should always be true:152* - We are halted, so no more descriptors are getting retired.153* - We are not running, so no one is submitting new work.154* - Only we can send the e40_sw_cleaned, so we can't start155* running again until we say so. So, the active list and156* descq are ours to play with.157*/158159/* Process all retired requests. */160qib_sdma_make_progress(ppd);161162clear_sdma_activelist(ppd);163164/*165* Resync count of added and removed. It is VERY important that166* sdma_descq_removed NEVER decrement - user_sdma depends on it.167*/168ppd->sdma_descq_removed = ppd->sdma_descq_added;169170/*171* Reset our notion of head and tail.172* Note that the HW registers will be reset when switching states173* due to calling __qib_sdma_process_event() below.174*/175ppd->sdma_descq_tail = 0;176ppd->sdma_descq_head = 0;177ppd->sdma_head_dma[0] = 0;178ppd->sdma_generation = 0;179180__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);181182spin_unlock_irqrestore(&ppd->sdma_lock, flags);183}184185/*186* This is called when changing to state qib_sdma_state_s10_hw_start_up_wait187* as a result of send buffer errors or send DMA descriptor errors.188* We want to disarm the buffers in these cases.189*/190static void sdma_hw_start_up(struct qib_pportdata *ppd)191{192struct qib_sdma_state *ss = &ppd->sdma_state;193unsigned bufno;194195for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)196ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));197198ppd->dd->f_sdma_hw_start_up(ppd);199}200201static void sdma_sw_tear_down(struct qib_pportdata *ppd)202{203struct qib_sdma_state *ss = &ppd->sdma_state;204205/* Releasing this reference means the state machine has stopped. */206sdma_put(ss);207}208209static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)210{211tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);212}213214static void sdma_set_state(struct qib_pportdata *ppd,215enum qib_sdma_states next_state)216{217struct qib_sdma_state *ss = &ppd->sdma_state;218struct sdma_set_state_action *action = ss->set_state_action;219unsigned op = 0;220221/* debugging bookkeeping */222ss->previous_state = ss->current_state;223ss->previous_op = ss->current_op;224225ss->current_state = next_state;226227if (action[next_state].op_enable)228op |= QIB_SDMA_SENDCTRL_OP_ENABLE;229230if (action[next_state].op_intenable)231op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;232233if (action[next_state].op_halt)234op |= QIB_SDMA_SENDCTRL_OP_HALT;235236if (action[next_state].op_drain)237op |= QIB_SDMA_SENDCTRL_OP_DRAIN;238239if (action[next_state].go_s99_running_tofalse)240ss->go_s99_running = 0;241242if (action[next_state].go_s99_running_totrue)243ss->go_s99_running = 1;244245ss->current_op = op;246247ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);248}249250static void unmap_desc(struct qib_pportdata *ppd, unsigned head)251{252__le64 *descqp = &ppd->sdma_descq[head].qw[0];253u64 desc[2];254dma_addr_t addr;255size_t len;256257desc[0] = le64_to_cpu(descqp[0]);258desc[1] = le64_to_cpu(descqp[1]);259260addr = (desc[1] << 32) | (desc[0] >> 32);261len = (desc[0] >> 14) & (0x7ffULL << 2);262dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);263}264265static int alloc_sdma(struct qib_pportdata *ppd)266{267ppd->sdma_descq_cnt = sdma_descq_cnt;268if (!ppd->sdma_descq_cnt)269ppd->sdma_descq_cnt = 256;270271/* Allocate memory for SendDMA descriptor FIFO */272ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,273ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,274GFP_KERNEL);275276if (!ppd->sdma_descq) {277qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "278"FIFO memory\n");279goto bail;280}281282/* Allocate memory for DMA of head register to memory */283ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,284PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);285if (!ppd->sdma_head_dma) {286qib_dev_err(ppd->dd, "failed to allocate SendDMA "287"head memory\n");288goto cleanup_descq;289}290ppd->sdma_head_dma[0] = 0;291return 0;292293cleanup_descq:294dma_free_coherent(&ppd->dd->pcidev->dev,295ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,296ppd->sdma_descq_phys);297ppd->sdma_descq = NULL;298ppd->sdma_descq_phys = 0;299bail:300ppd->sdma_descq_cnt = 0;301return -ENOMEM;302}303304static void free_sdma(struct qib_pportdata *ppd)305{306struct qib_devdata *dd = ppd->dd;307308if (ppd->sdma_head_dma) {309dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,310(void *)ppd->sdma_head_dma,311ppd->sdma_head_phys);312ppd->sdma_head_dma = NULL;313ppd->sdma_head_phys = 0;314}315316if (ppd->sdma_descq) {317dma_free_coherent(&dd->pcidev->dev,318ppd->sdma_descq_cnt * sizeof(u64[2]),319ppd->sdma_descq, ppd->sdma_descq_phys);320ppd->sdma_descq = NULL;321ppd->sdma_descq_phys = 0;322}323}324325static inline void make_sdma_desc(struct qib_pportdata *ppd,326u64 *sdmadesc, u64 addr, u64 dwlen,327u64 dwoffset)328{329330WARN_ON(addr & 3);331/* SDmaPhyAddr[47:32] */332sdmadesc[1] = addr >> 32;333/* SDmaPhyAddr[31:0] */334sdmadesc[0] = (addr & 0xfffffffcULL) << 32;335/* SDmaGeneration[1:0] */336sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<337SDMA_DESC_GEN_LSB;338/* SDmaDwordCount[10:0] */339sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;340/* SDmaBufOffset[12:2] */341sdmadesc[0] |= dwoffset & 0x7ffULL;342}343344/* sdma_lock must be held */345int qib_sdma_make_progress(struct qib_pportdata *ppd)346{347struct list_head *lp = NULL;348struct qib_sdma_txreq *txp = NULL;349struct qib_devdata *dd = ppd->dd;350int progress = 0;351u16 hwhead;352u16 idx = 0;353354hwhead = dd->f_sdma_gethead(ppd);355356/* The reason for some of the complexity of this code is that357* not all descriptors have corresponding txps. So, we have to358* be able to skip over descs until we wander into the range of359* the next txp on the list.360*/361362if (!list_empty(&ppd->sdma_activelist)) {363lp = ppd->sdma_activelist.next;364txp = list_entry(lp, struct qib_sdma_txreq, list);365idx = txp->start_idx;366}367368while (ppd->sdma_descq_head != hwhead) {369/* if desc is part of this txp, unmap if needed */370if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&371(idx == ppd->sdma_descq_head)) {372unmap_desc(ppd, ppd->sdma_descq_head);373if (++idx == ppd->sdma_descq_cnt)374idx = 0;375}376377/* increment dequed desc count */378ppd->sdma_descq_removed++;379380/* advance head, wrap if needed */381if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)382ppd->sdma_descq_head = 0;383384/* if now past this txp's descs, do the callback */385if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {386/* remove from active list */387list_del_init(&txp->list);388if (txp->callback)389(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);390/* see if there is another txp */391if (list_empty(&ppd->sdma_activelist))392txp = NULL;393else {394lp = ppd->sdma_activelist.next;395txp = list_entry(lp, struct qib_sdma_txreq,396list);397idx = txp->start_idx;398}399}400progress = 1;401}402if (progress)403qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));404return progress;405}406407/*408* This is called from interrupt context.409*/410void qib_sdma_intr(struct qib_pportdata *ppd)411{412unsigned long flags;413414spin_lock_irqsave(&ppd->sdma_lock, flags);415416__qib_sdma_intr(ppd);417418spin_unlock_irqrestore(&ppd->sdma_lock, flags);419}420421void __qib_sdma_intr(struct qib_pportdata *ppd)422{423if (__qib_sdma_running(ppd))424qib_sdma_make_progress(ppd);425}426427int qib_setup_sdma(struct qib_pportdata *ppd)428{429struct qib_devdata *dd = ppd->dd;430unsigned long flags;431int ret = 0;432433ret = alloc_sdma(ppd);434if (ret)435goto bail;436437/* set consistent sdma state */438ppd->dd->f_sdma_init_early(ppd);439spin_lock_irqsave(&ppd->sdma_lock, flags);440sdma_set_state(ppd, qib_sdma_state_s00_hw_down);441spin_unlock_irqrestore(&ppd->sdma_lock, flags);442443/* set up reference counting */444kref_init(&ppd->sdma_state.kref);445init_completion(&ppd->sdma_state.comp);446447ppd->sdma_generation = 0;448ppd->sdma_descq_head = 0;449ppd->sdma_descq_removed = 0;450ppd->sdma_descq_added = 0;451452INIT_LIST_HEAD(&ppd->sdma_activelist);453454tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,455(unsigned long)ppd);456457ret = dd->f_init_sdma_regs(ppd);458if (ret)459goto bail_alloc;460461qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);462463return 0;464465bail_alloc:466qib_teardown_sdma(ppd);467bail:468return ret;469}470471void qib_teardown_sdma(struct qib_pportdata *ppd)472{473qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);474475/*476* This waits for the state machine to exit so it is not477* necessary to kill the sdma_sw_clean_up_task to make sure478* it is not running.479*/480sdma_finalput(&ppd->sdma_state);481482free_sdma(ppd);483}484485int qib_sdma_running(struct qib_pportdata *ppd)486{487unsigned long flags;488int ret;489490spin_lock_irqsave(&ppd->sdma_lock, flags);491ret = __qib_sdma_running(ppd);492spin_unlock_irqrestore(&ppd->sdma_lock, flags);493494return ret;495}496497/*498* Complete a request when sdma not running; likely only request499* but to simplify the code, always queue it, then process the full500* activelist. We process the entire list to ensure that this particular501* request does get it's callback, but in the correct order.502* Must be called with sdma_lock held503*/504static void complete_sdma_err_req(struct qib_pportdata *ppd,505struct qib_verbs_txreq *tx)506{507atomic_inc(&tx->qp->s_dma_busy);508/* no sdma descriptors, so no unmap_desc */509tx->txreq.start_idx = 0;510tx->txreq.next_descq_idx = 0;511list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);512clear_sdma_activelist(ppd);513}514515/*516* This function queues one IB packet onto the send DMA queue per call.517* The caller is responsible for checking:518* 1) The number of send DMA descriptor entries is less than the size of519* the descriptor queue.520* 2) The IB SGE addresses and lengths are 32-bit aligned521* (except possibly the last SGE's length)522* 3) The SGE addresses are suitable for passing to dma_map_single().523*/524int qib_sdma_verbs_send(struct qib_pportdata *ppd,525struct qib_sge_state *ss, u32 dwords,526struct qib_verbs_txreq *tx)527{528unsigned long flags;529struct qib_sge *sge;530struct qib_qp *qp;531int ret = 0;532u16 tail;533__le64 *descqp;534u64 sdmadesc[2];535u32 dwoffset;536dma_addr_t addr;537538spin_lock_irqsave(&ppd->sdma_lock, flags);539540retry:541if (unlikely(!__qib_sdma_running(ppd))) {542complete_sdma_err_req(ppd, tx);543goto unlock;544}545546if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {547if (qib_sdma_make_progress(ppd))548goto retry;549if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)550ppd->dd->f_sdma_set_desc_cnt(ppd,551ppd->sdma_descq_cnt / 2);552goto busy;553}554555dwoffset = tx->hdr_dwords;556make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);557558sdmadesc[0] |= SDMA_DESC_FIRST;559if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)560sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;561562/* write to the descq */563tail = ppd->sdma_descq_tail;564descqp = &ppd->sdma_descq[tail].qw[0];565*descqp++ = cpu_to_le64(sdmadesc[0]);566*descqp++ = cpu_to_le64(sdmadesc[1]);567568/* increment the tail */569if (++tail == ppd->sdma_descq_cnt) {570tail = 0;571descqp = &ppd->sdma_descq[0].qw[0];572++ppd->sdma_generation;573}574575tx->txreq.start_idx = tail;576577sge = &ss->sge;578while (dwords) {579u32 dw;580u32 len;581582len = dwords << 2;583if (len > sge->length)584len = sge->length;585if (len > sge->sge_length)586len = sge->sge_length;587BUG_ON(len == 0);588dw = (len + 3) >> 2;589addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,590dw << 2, DMA_TO_DEVICE);591if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))592goto unmap;593sdmadesc[0] = 0;594make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);595/* SDmaUseLargeBuf has to be set in every descriptor */596if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)597sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;598/* write to the descq */599*descqp++ = cpu_to_le64(sdmadesc[0]);600*descqp++ = cpu_to_le64(sdmadesc[1]);601602/* increment the tail */603if (++tail == ppd->sdma_descq_cnt) {604tail = 0;605descqp = &ppd->sdma_descq[0].qw[0];606++ppd->sdma_generation;607}608sge->vaddr += len;609sge->length -= len;610sge->sge_length -= len;611if (sge->sge_length == 0) {612if (--ss->num_sge)613*sge = *ss->sg_list++;614} else if (sge->length == 0 && sge->mr->lkey) {615if (++sge->n >= QIB_SEGSZ) {616if (++sge->m >= sge->mr->mapsz)617break;618sge->n = 0;619}620sge->vaddr =621sge->mr->map[sge->m]->segs[sge->n].vaddr;622sge->length =623sge->mr->map[sge->m]->segs[sge->n].length;624}625626dwoffset += dw;627dwords -= dw;628}629630if (!tail)631descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];632descqp -= 2;633descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);634if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)635descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);636if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)637descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);638639atomic_inc(&tx->qp->s_dma_busy);640tx->txreq.next_descq_idx = tail;641ppd->dd->f_sdma_update_tail(ppd, tail);642ppd->sdma_descq_added += tx->txreq.sg_count;643list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);644goto unlock;645646unmap:647for (;;) {648if (!tail)649tail = ppd->sdma_descq_cnt - 1;650else651tail--;652if (tail == ppd->sdma_descq_tail)653break;654unmap_desc(ppd, tail);655}656qp = tx->qp;657qib_put_txreq(tx);658spin_lock(&qp->r_lock);659spin_lock(&qp->s_lock);660if (qp->ibqp.qp_type == IB_QPT_RC) {661/* XXX what about error sending RDMA read responses? */662if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)663qib_error_qp(qp, IB_WC_GENERAL_ERR);664} else if (qp->s_wqe)665qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);666spin_unlock(&qp->s_lock);667spin_unlock(&qp->r_lock);668/* return zero to process the next send work request */669goto unlock;670671busy:672qp = tx->qp;673spin_lock(&qp->s_lock);674if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {675struct qib_ibdev *dev;676677/*678* If we couldn't queue the DMA request, save the info679* and try again later rather than destroying the680* buffer and undoing the side effects of the copy.681*/682tx->ss = ss;683tx->dwords = dwords;684qp->s_tx = tx;685dev = &ppd->dd->verbs_dev;686spin_lock(&dev->pending_lock);687if (list_empty(&qp->iowait)) {688struct qib_ibport *ibp;689690ibp = &ppd->ibport_data;691ibp->n_dmawait++;692qp->s_flags |= QIB_S_WAIT_DMA_DESC;693list_add_tail(&qp->iowait, &dev->dmawait);694}695spin_unlock(&dev->pending_lock);696qp->s_flags &= ~QIB_S_BUSY;697spin_unlock(&qp->s_lock);698ret = -EBUSY;699} else {700spin_unlock(&qp->s_lock);701qib_put_txreq(tx);702}703unlock:704spin_unlock_irqrestore(&ppd->sdma_lock, flags);705return ret;706}707708void qib_sdma_process_event(struct qib_pportdata *ppd,709enum qib_sdma_events event)710{711unsigned long flags;712713spin_lock_irqsave(&ppd->sdma_lock, flags);714715__qib_sdma_process_event(ppd, event);716717if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)718qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));719720spin_unlock_irqrestore(&ppd->sdma_lock, flags);721}722723void __qib_sdma_process_event(struct qib_pportdata *ppd,724enum qib_sdma_events event)725{726struct qib_sdma_state *ss = &ppd->sdma_state;727728switch (ss->current_state) {729case qib_sdma_state_s00_hw_down:730switch (event) {731case qib_sdma_event_e00_go_hw_down:732break;733case qib_sdma_event_e30_go_running:734/*735* If down, but running requested (usually result736* of link up, then we need to start up.737* This can happen when hw down is requested while738* bringing the link up with traffic active on739* 7220, e.g. */740ss->go_s99_running = 1;741/* fall through and start dma engine */742case qib_sdma_event_e10_go_hw_start:743/* This reference means the state machine is started */744sdma_get(&ppd->sdma_state);745sdma_set_state(ppd,746qib_sdma_state_s10_hw_start_up_wait);747break;748case qib_sdma_event_e20_hw_started:749break;750case qib_sdma_event_e40_sw_cleaned:751sdma_sw_tear_down(ppd);752break;753case qib_sdma_event_e50_hw_cleaned:754break;755case qib_sdma_event_e60_hw_halted:756break;757case qib_sdma_event_e70_go_idle:758break;759case qib_sdma_event_e7220_err_halted:760break;761case qib_sdma_event_e7322_err_halted:762break;763case qib_sdma_event_e90_timer_tick:764break;765}766break;767768case qib_sdma_state_s10_hw_start_up_wait:769switch (event) {770case qib_sdma_event_e00_go_hw_down:771sdma_set_state(ppd, qib_sdma_state_s00_hw_down);772sdma_sw_tear_down(ppd);773break;774case qib_sdma_event_e10_go_hw_start:775break;776case qib_sdma_event_e20_hw_started:777sdma_set_state(ppd, ss->go_s99_running ?778qib_sdma_state_s99_running :779qib_sdma_state_s20_idle);780break;781case qib_sdma_event_e30_go_running:782ss->go_s99_running = 1;783break;784case qib_sdma_event_e40_sw_cleaned:785break;786case qib_sdma_event_e50_hw_cleaned:787break;788case qib_sdma_event_e60_hw_halted:789break;790case qib_sdma_event_e70_go_idle:791ss->go_s99_running = 0;792break;793case qib_sdma_event_e7220_err_halted:794break;795case qib_sdma_event_e7322_err_halted:796break;797case qib_sdma_event_e90_timer_tick:798break;799}800break;801802case qib_sdma_state_s20_idle:803switch (event) {804case qib_sdma_event_e00_go_hw_down:805sdma_set_state(ppd, qib_sdma_state_s00_hw_down);806sdma_sw_tear_down(ppd);807break;808case qib_sdma_event_e10_go_hw_start:809break;810case qib_sdma_event_e20_hw_started:811break;812case qib_sdma_event_e30_go_running:813sdma_set_state(ppd, qib_sdma_state_s99_running);814ss->go_s99_running = 1;815break;816case qib_sdma_event_e40_sw_cleaned:817break;818case qib_sdma_event_e50_hw_cleaned:819break;820case qib_sdma_event_e60_hw_halted:821break;822case qib_sdma_event_e70_go_idle:823break;824case qib_sdma_event_e7220_err_halted:825break;826case qib_sdma_event_e7322_err_halted:827break;828case qib_sdma_event_e90_timer_tick:829break;830}831break;832833case qib_sdma_state_s30_sw_clean_up_wait:834switch (event) {835case qib_sdma_event_e00_go_hw_down:836sdma_set_state(ppd, qib_sdma_state_s00_hw_down);837break;838case qib_sdma_event_e10_go_hw_start:839break;840case qib_sdma_event_e20_hw_started:841break;842case qib_sdma_event_e30_go_running:843ss->go_s99_running = 1;844break;845case qib_sdma_event_e40_sw_cleaned:846sdma_set_state(ppd,847qib_sdma_state_s10_hw_start_up_wait);848sdma_hw_start_up(ppd);849break;850case qib_sdma_event_e50_hw_cleaned:851break;852case qib_sdma_event_e60_hw_halted:853break;854case qib_sdma_event_e70_go_idle:855ss->go_s99_running = 0;856break;857case qib_sdma_event_e7220_err_halted:858break;859case qib_sdma_event_e7322_err_halted:860break;861case qib_sdma_event_e90_timer_tick:862break;863}864break;865866case qib_sdma_state_s40_hw_clean_up_wait:867switch (event) {868case qib_sdma_event_e00_go_hw_down:869sdma_set_state(ppd, qib_sdma_state_s00_hw_down);870sdma_start_sw_clean_up(ppd);871break;872case qib_sdma_event_e10_go_hw_start:873break;874case qib_sdma_event_e20_hw_started:875break;876case qib_sdma_event_e30_go_running:877ss->go_s99_running = 1;878break;879case qib_sdma_event_e40_sw_cleaned:880break;881case qib_sdma_event_e50_hw_cleaned:882sdma_set_state(ppd,883qib_sdma_state_s30_sw_clean_up_wait);884sdma_start_sw_clean_up(ppd);885break;886case qib_sdma_event_e60_hw_halted:887break;888case qib_sdma_event_e70_go_idle:889ss->go_s99_running = 0;890break;891case qib_sdma_event_e7220_err_halted:892break;893case qib_sdma_event_e7322_err_halted:894break;895case qib_sdma_event_e90_timer_tick:896break;897}898break;899900case qib_sdma_state_s50_hw_halt_wait:901switch (event) {902case qib_sdma_event_e00_go_hw_down:903sdma_set_state(ppd, qib_sdma_state_s00_hw_down);904sdma_start_sw_clean_up(ppd);905break;906case qib_sdma_event_e10_go_hw_start:907break;908case qib_sdma_event_e20_hw_started:909break;910case qib_sdma_event_e30_go_running:911ss->go_s99_running = 1;912break;913case qib_sdma_event_e40_sw_cleaned:914break;915case qib_sdma_event_e50_hw_cleaned:916break;917case qib_sdma_event_e60_hw_halted:918sdma_set_state(ppd,919qib_sdma_state_s40_hw_clean_up_wait);920ppd->dd->f_sdma_hw_clean_up(ppd);921break;922case qib_sdma_event_e70_go_idle:923ss->go_s99_running = 0;924break;925case qib_sdma_event_e7220_err_halted:926break;927case qib_sdma_event_e7322_err_halted:928break;929case qib_sdma_event_e90_timer_tick:930break;931}932break;933934case qib_sdma_state_s99_running:935switch (event) {936case qib_sdma_event_e00_go_hw_down:937sdma_set_state(ppd, qib_sdma_state_s00_hw_down);938sdma_start_sw_clean_up(ppd);939break;940case qib_sdma_event_e10_go_hw_start:941break;942case qib_sdma_event_e20_hw_started:943break;944case qib_sdma_event_e30_go_running:945break;946case qib_sdma_event_e40_sw_cleaned:947break;948case qib_sdma_event_e50_hw_cleaned:949break;950case qib_sdma_event_e60_hw_halted:951sdma_set_state(ppd,952qib_sdma_state_s30_sw_clean_up_wait);953sdma_start_sw_clean_up(ppd);954break;955case qib_sdma_event_e70_go_idle:956sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);957ss->go_s99_running = 0;958break;959case qib_sdma_event_e7220_err_halted:960sdma_set_state(ppd,961qib_sdma_state_s30_sw_clean_up_wait);962sdma_start_sw_clean_up(ppd);963break;964case qib_sdma_event_e7322_err_halted:965sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);966break;967case qib_sdma_event_e90_timer_tick:968break;969}970break;971}972973ss->last_event = event;974}975976977