Path: blob/master/drivers/infiniband/hw/ipath/ipath_sdma.c
15112 views
/*1* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/3132#include <linux/spinlock.h>33#include <linux/gfp.h>3435#include "ipath_kernel.h"36#include "ipath_verbs.h"37#include "ipath_common.h"3839#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */4041static void vl15_watchdog_enq(struct ipath_devdata *dd)42{43/* ipath_sdma_lock must already be held */44if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {45unsigned long interval = (HZ + 19) / 20;46dd->ipath_sdma_vl15_timer.expires = jiffies + interval;47add_timer(&dd->ipath_sdma_vl15_timer);48}49}5051static void vl15_watchdog_deq(struct ipath_devdata *dd)52{53/* ipath_sdma_lock must already be held */54if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {55unsigned long interval = (HZ + 19) / 20;56mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);57} else {58del_timer(&dd->ipath_sdma_vl15_timer);59}60}6162static void vl15_watchdog_timeout(unsigned long opaque)63{64struct ipath_devdata *dd = (struct ipath_devdata *)opaque;6566if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {67ipath_dbg("vl15 watchdog timeout - clearing\n");68ipath_cancel_sends(dd, 1);69ipath_hol_down(dd);70} else {71ipath_dbg("vl15 watchdog timeout - "72"condition already cleared\n");73}74}7576static void unmap_desc(struct ipath_devdata *dd, unsigned head)77{78__le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];79u64 desc[2];80dma_addr_t addr;81size_t len;8283desc[0] = le64_to_cpu(descqp[0]);84desc[1] = le64_to_cpu(descqp[1]);8586addr = (desc[1] << 32) | (desc[0] >> 32);87len = (desc[0] >> 14) & (0x7ffULL << 2);88dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);89}9091/*92* ipath_sdma_lock should be locked before calling this.93*/94int ipath_sdma_make_progress(struct ipath_devdata *dd)95{96struct list_head *lp = NULL;97struct ipath_sdma_txreq *txp = NULL;98u16 dmahead;99u16 start_idx = 0;100int progress = 0;101102if (!list_empty(&dd->ipath_sdma_activelist)) {103lp = dd->ipath_sdma_activelist.next;104txp = list_entry(lp, struct ipath_sdma_txreq, list);105start_idx = txp->start_idx;106}107108/*109* Read the SDMA head register in order to know that the110* interrupt clear has been written to the chip.111* Otherwise, we may not get an interrupt for the last112* descriptor in the queue.113*/114dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);115/* sanity check return value for error handling (chip reset, etc.) */116if (dmahead >= dd->ipath_sdma_descq_cnt)117goto done;118119while (dd->ipath_sdma_descq_head != dmahead) {120if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&121dd->ipath_sdma_descq_head == start_idx) {122unmap_desc(dd, dd->ipath_sdma_descq_head);123start_idx++;124if (start_idx == dd->ipath_sdma_descq_cnt)125start_idx = 0;126}127128/* increment free count and head */129dd->ipath_sdma_descq_removed++;130if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)131dd->ipath_sdma_descq_head = 0;132133if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {134/* move to notify list */135if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)136vl15_watchdog_deq(dd);137list_move_tail(lp, &dd->ipath_sdma_notifylist);138if (!list_empty(&dd->ipath_sdma_activelist)) {139lp = dd->ipath_sdma_activelist.next;140txp = list_entry(lp, struct ipath_sdma_txreq,141list);142start_idx = txp->start_idx;143} else {144lp = NULL;145txp = NULL;146}147}148progress = 1;149}150151if (progress)152tasklet_hi_schedule(&dd->ipath_sdma_notify_task);153154done:155return progress;156}157158static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)159{160struct ipath_sdma_txreq *txp, *txp_next;161162list_for_each_entry_safe(txp, txp_next, list, list) {163list_del_init(&txp->list);164165if (txp->callback)166(*txp->callback)(txp->callback_cookie,167txp->callback_status);168}169}170171static void sdma_notify_taskbody(struct ipath_devdata *dd)172{173unsigned long flags;174struct list_head list;175176INIT_LIST_HEAD(&list);177178spin_lock_irqsave(&dd->ipath_sdma_lock, flags);179180list_splice_init(&dd->ipath_sdma_notifylist, &list);181182spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);183184ipath_sdma_notify(dd, &list);185186/*187* The IB verbs layer needs to see the callback before getting188* the call to ipath_ib_piobufavail() because the callback189* handles releasing resources the next send will need.190* Otherwise, we could do these calls in191* ipath_sdma_make_progress().192*/193ipath_ib_piobufavail(dd->verbs_dev);194}195196static void sdma_notify_task(unsigned long opaque)197{198struct ipath_devdata *dd = (struct ipath_devdata *)opaque;199200if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))201sdma_notify_taskbody(dd);202}203204static void dump_sdma_state(struct ipath_devdata *dd)205{206unsigned long reg;207208reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);209ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);210211reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);212ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);213214reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);215ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);216217reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);218ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);219220reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);221ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);222223reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);224ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);225226reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);227ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);228}229230static void sdma_abort_task(unsigned long opaque)231{232struct ipath_devdata *dd = (struct ipath_devdata *) opaque;233u64 status;234unsigned long flags;235236if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))237return;238239spin_lock_irqsave(&dd->ipath_sdma_lock, flags);240241status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;242243/* nothing to do */244if (status == IPATH_SDMA_ABORT_NONE)245goto unlock;246247/* ipath_sdma_abort() is done, waiting for interrupt */248if (status == IPATH_SDMA_ABORT_DISARMED) {249if (jiffies < dd->ipath_sdma_abort_intr_timeout)250goto resched_noprint;251/* give up, intr got lost somewhere */252ipath_dbg("give up waiting for SDMADISABLED intr\n");253__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);254status = IPATH_SDMA_ABORT_ABORTED;255}256257/* everything is stopped, time to clean up and restart */258if (status == IPATH_SDMA_ABORT_ABORTED) {259struct ipath_sdma_txreq *txp, *txpnext;260u64 hwstatus;261int notify = 0;262263hwstatus = ipath_read_kreg64(dd,264dd->ipath_kregs->kr_senddmastatus);265266if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |267IPATH_SDMA_STATUS_ABORT_IN_PROG |268IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||269!(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {270if (dd->ipath_sdma_reset_wait > 0) {271/* not done shutting down sdma */272--dd->ipath_sdma_reset_wait;273goto resched;274}275ipath_cdbg(VERBOSE, "gave up waiting for quiescent "276"status after SDMA reset, continuing\n");277dump_sdma_state(dd);278}279280/* dequeue all "sent" requests */281list_for_each_entry_safe(txp, txpnext,282&dd->ipath_sdma_activelist, list) {283txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;284if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)285vl15_watchdog_deq(dd);286list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);287notify = 1;288}289if (notify)290tasklet_hi_schedule(&dd->ipath_sdma_notify_task);291292/* reset our notion of head and tail */293dd->ipath_sdma_descq_tail = 0;294dd->ipath_sdma_descq_head = 0;295dd->ipath_sdma_head_dma[0] = 0;296dd->ipath_sdma_generation = 0;297dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;298299/* Reset SendDmaLenGen */300ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,301(u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));302303/* done with sdma state for a bit */304spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);305306/*307* Don't restart sdma here (with the exception308* below). Wait until link is up to ACTIVE. VL15 MADs309* used to bring the link up use PIO, and multiple link310* transitions otherwise cause the sdma engine to be311* stopped and started multiple times.312* The disable is done here, including the shadow,313* so the state is kept consistent.314* See ipath_restart_sdma() for the actual starting315* of sdma.316*/317spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);318dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;319ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,320dd->ipath_sendctrl);321ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);322spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);323324/* make sure I see next message */325dd->ipath_sdma_abort_jiffies = 0;326327/*328* Not everything that takes SDMA offline is a link329* status change. If the link was up, restart SDMA.330*/331if (dd->ipath_flags & IPATH_LINKACTIVE)332ipath_restart_sdma(dd);333334goto done;335}336337resched:338/*339* for now, keep spinning340* JAG - this is bad to just have default be a loop without341* state change342*/343if (jiffies > dd->ipath_sdma_abort_jiffies) {344ipath_dbg("looping with status 0x%08lx\n",345dd->ipath_sdma_status);346dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;347}348resched_noprint:349spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);350if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))351tasklet_hi_schedule(&dd->ipath_sdma_abort_task);352return;353354unlock:355spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);356done:357return;358}359360/*361* This is called from interrupt context.362*/363void ipath_sdma_intr(struct ipath_devdata *dd)364{365unsigned long flags;366367spin_lock_irqsave(&dd->ipath_sdma_lock, flags);368369(void) ipath_sdma_make_progress(dd);370371spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);372}373374static int alloc_sdma(struct ipath_devdata *dd)375{376int ret = 0;377378/* Allocate memory for SendDMA descriptor FIFO */379dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,380SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);381382if (!dd->ipath_sdma_descq) {383ipath_dev_err(dd, "failed to allocate SendDMA descriptor "384"FIFO memory\n");385ret = -ENOMEM;386goto done;387}388389dd->ipath_sdma_descq_cnt =390SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);391392/* Allocate memory for DMA of head register to memory */393dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,394PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);395if (!dd->ipath_sdma_head_dma) {396ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");397ret = -ENOMEM;398goto cleanup_descq;399}400dd->ipath_sdma_head_dma[0] = 0;401402init_timer(&dd->ipath_sdma_vl15_timer);403dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;404dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;405atomic_set(&dd->ipath_sdma_vl15_count, 0);406407goto done;408409cleanup_descq:410dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,411(void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);412dd->ipath_sdma_descq = NULL;413dd->ipath_sdma_descq_phys = 0;414done:415return ret;416}417418int setup_sdma(struct ipath_devdata *dd)419{420int ret = 0;421unsigned i, n;422u64 tmp64;423u64 senddmabufmask[3] = { 0 };424unsigned long flags;425426ret = alloc_sdma(dd);427if (ret)428goto done;429430if (!dd->ipath_sdma_descq) {431ipath_dev_err(dd, "SendDMA memory not allocated\n");432goto done;433}434435/*436* Set initial status as if we had been up, then gone down.437* This lets initial start on transition to ACTIVE be the438* same as restart after link flap.439*/440dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;441dd->ipath_sdma_abort_jiffies = 0;442dd->ipath_sdma_generation = 0;443dd->ipath_sdma_descq_tail = 0;444dd->ipath_sdma_descq_head = 0;445dd->ipath_sdma_descq_removed = 0;446dd->ipath_sdma_descq_added = 0;447448/* Set SendDmaBase */449ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,450dd->ipath_sdma_descq_phys);451/* Set SendDmaLenGen */452tmp64 = dd->ipath_sdma_descq_cnt;453tmp64 |= 1<<18; /* enable generation checking */454ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);455/* Set SendDmaTail */456ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,457dd->ipath_sdma_descq_tail);458/* Set SendDmaHeadAddr */459ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,460dd->ipath_sdma_head_phys);461462/*463* Reserve all the former "kernel" piobufs, using high number range464* so we get as many 4K buffers as possible465*/466n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;467i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;468ipath_chg_pioavailkernel(dd, i, n - i , 0);469for (; i < n; ++i) {470unsigned word = i / 64;471unsigned bit = i & 63;472BUG_ON(word >= 3);473senddmabufmask[word] |= 1ULL << bit;474}475ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,476senddmabufmask[0]);477ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,478senddmabufmask[1]);479ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,480senddmabufmask[2]);481482INIT_LIST_HEAD(&dd->ipath_sdma_activelist);483INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);484485tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,486(unsigned long) dd);487tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,488(unsigned long) dd);489490/*491* No use to turn on SDMA here, as link is probably not ACTIVE492* Just mark it RUNNING and enable the interrupt, and let the493* ipath_restart_sdma() on link transition to ACTIVE actually494* enable it.495*/496spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);497dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;498ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);499ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);500__set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);501spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);502503done:504return ret;505}506507void teardown_sdma(struct ipath_devdata *dd)508{509struct ipath_sdma_txreq *txp, *txpnext;510unsigned long flags;511dma_addr_t sdma_head_phys = 0;512dma_addr_t sdma_descq_phys = 0;513void *sdma_descq = NULL;514void *sdma_head_dma = NULL;515516spin_lock_irqsave(&dd->ipath_sdma_lock, flags);517__clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);518__set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);519__set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);520spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);521522tasklet_kill(&dd->ipath_sdma_abort_task);523tasklet_kill(&dd->ipath_sdma_notify_task);524525/* turn off sdma */526spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);527dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;528ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,529dd->ipath_sendctrl);530ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);531spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);532533spin_lock_irqsave(&dd->ipath_sdma_lock, flags);534/* dequeue all "sent" requests */535list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,536list) {537txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;538if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)539vl15_watchdog_deq(dd);540list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);541}542spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);543544sdma_notify_taskbody(dd);545546del_timer_sync(&dd->ipath_sdma_vl15_timer);547548spin_lock_irqsave(&dd->ipath_sdma_lock, flags);549550dd->ipath_sdma_abort_jiffies = 0;551552ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);553ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);554ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);555ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);556ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);557ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);558ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);559560if (dd->ipath_sdma_head_dma) {561sdma_head_dma = (void *) dd->ipath_sdma_head_dma;562sdma_head_phys = dd->ipath_sdma_head_phys;563dd->ipath_sdma_head_dma = NULL;564dd->ipath_sdma_head_phys = 0;565}566567if (dd->ipath_sdma_descq) {568sdma_descq = dd->ipath_sdma_descq;569sdma_descq_phys = dd->ipath_sdma_descq_phys;570dd->ipath_sdma_descq = NULL;571dd->ipath_sdma_descq_phys = 0;572}573574spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);575576if (sdma_head_dma)577dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,578sdma_head_dma, sdma_head_phys);579580if (sdma_descq)581dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,582sdma_descq, sdma_descq_phys);583}584585/*586* [Re]start SDMA, if we use it, and it's not already OK.587* This is called on transition to link ACTIVE, either the first or588* subsequent times.589*/590void ipath_restart_sdma(struct ipath_devdata *dd)591{592unsigned long flags;593int needed = 1;594595if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))596goto bail;597598/*599* First, make sure we should, which is to say,600* check that we are "RUNNING" (not in teardown)601* and not "SHUTDOWN"602*/603spin_lock_irqsave(&dd->ipath_sdma_lock, flags);604if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)605|| test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))606needed = 0;607else {608__clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);609__clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);610__clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);611}612spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);613if (!needed) {614ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",615dd->ipath_sdma_status);616goto bail;617}618spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);619/*620* First clear, just to be safe. Enable is only done621* in chip on 0->1 transition622*/623dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;624ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);625ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);626dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;627ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);628ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);629spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);630631/* notify upper layers */632ipath_ib_piobufavail(dd->verbs_dev);633634bail:635return;636}637638static inline void make_sdma_desc(struct ipath_devdata *dd,639u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)640{641WARN_ON(addr & 3);642/* SDmaPhyAddr[47:32] */643sdmadesc[1] = addr >> 32;644/* SDmaPhyAddr[31:0] */645sdmadesc[0] = (addr & 0xfffffffcULL) << 32;646/* SDmaGeneration[1:0] */647sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;648/* SDmaDwordCount[10:0] */649sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;650/* SDmaBufOffset[12:2] */651sdmadesc[0] |= dwoffset & 0x7ffULL;652}653654/*655* This function queues one IB packet onto the send DMA queue per call.656* The caller is responsible for checking:657* 1) The number of send DMA descriptor entries is less than the size of658* the descriptor queue.659* 2) The IB SGE addresses and lengths are 32-bit aligned660* (except possibly the last SGE's length)661* 3) The SGE addresses are suitable for passing to dma_map_single().662*/663int ipath_sdma_verbs_send(struct ipath_devdata *dd,664struct ipath_sge_state *ss, u32 dwords,665struct ipath_verbs_txreq *tx)666{667668unsigned long flags;669struct ipath_sge *sge;670int ret = 0;671u16 tail;672__le64 *descqp;673u64 sdmadesc[2];674u32 dwoffset;675dma_addr_t addr;676677if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {678ipath_dbg("packet size %X > ibmax %X, fail\n",679tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);680ret = -EMSGSIZE;681goto fail;682}683684spin_lock_irqsave(&dd->ipath_sdma_lock, flags);685686retry:687if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {688ret = -EBUSY;689goto unlock;690}691692if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {693if (ipath_sdma_make_progress(dd))694goto retry;695ret = -ENOBUFS;696goto unlock;697}698699addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,700tx->map_len, DMA_TO_DEVICE);701if (dma_mapping_error(&dd->pcidev->dev, addr))702goto ioerr;703704dwoffset = tx->map_len >> 2;705make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);706707/* SDmaFirstDesc */708sdmadesc[0] |= 1ULL << 12;709if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)710sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */711712/* write to the descq */713tail = dd->ipath_sdma_descq_tail;714descqp = &dd->ipath_sdma_descq[tail].qw[0];715*descqp++ = cpu_to_le64(sdmadesc[0]);716*descqp++ = cpu_to_le64(sdmadesc[1]);717718if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)719tx->txreq.start_idx = tail;720721/* increment the tail */722if (++tail == dd->ipath_sdma_descq_cnt) {723tail = 0;724descqp = &dd->ipath_sdma_descq[0].qw[0];725++dd->ipath_sdma_generation;726}727728sge = &ss->sge;729while (dwords) {730u32 dw;731u32 len;732733len = dwords << 2;734if (len > sge->length)735len = sge->length;736if (len > sge->sge_length)737len = sge->sge_length;738BUG_ON(len == 0);739dw = (len + 3) >> 2;740addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,741DMA_TO_DEVICE);742if (dma_mapping_error(&dd->pcidev->dev, addr))743goto unmap;744make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);745/* SDmaUseLargeBuf has to be set in every descriptor */746if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)747sdmadesc[0] |= 1ULL << 14;748/* write to the descq */749*descqp++ = cpu_to_le64(sdmadesc[0]);750*descqp++ = cpu_to_le64(sdmadesc[1]);751752/* increment the tail */753if (++tail == dd->ipath_sdma_descq_cnt) {754tail = 0;755descqp = &dd->ipath_sdma_descq[0].qw[0];756++dd->ipath_sdma_generation;757}758sge->vaddr += len;759sge->length -= len;760sge->sge_length -= len;761if (sge->sge_length == 0) {762if (--ss->num_sge)763*sge = *ss->sg_list++;764} else if (sge->length == 0 && sge->mr != NULL) {765if (++sge->n >= IPATH_SEGSZ) {766if (++sge->m >= sge->mr->mapsz)767break;768sge->n = 0;769}770sge->vaddr =771sge->mr->map[sge->m]->segs[sge->n].vaddr;772sge->length =773sge->mr->map[sge->m]->segs[sge->n].length;774}775776dwoffset += dw;777dwords -= dw;778}779780if (!tail)781descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];782descqp -= 2;783/* SDmaLastDesc */784descqp[0] |= cpu_to_le64(1ULL << 11);785if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {786/* SDmaIntReq */787descqp[0] |= cpu_to_le64(1ULL << 15);788}789790/* Commit writes to memory and advance the tail on the chip */791wmb();792ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);793794tx->txreq.next_descq_idx = tail;795tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;796dd->ipath_sdma_descq_tail = tail;797dd->ipath_sdma_descq_added += tx->txreq.sg_count;798list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);799if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)800vl15_watchdog_enq(dd);801goto unlock;802803unmap:804while (tail != dd->ipath_sdma_descq_tail) {805if (!tail)806tail = dd->ipath_sdma_descq_cnt - 1;807else808tail--;809unmap_desc(dd, tail);810}811ioerr:812ret = -EIO;813unlock:814spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);815fail:816return ret;817}818819820