Path: blob/master/drivers/infiniband/hw/ipath/ipath_user_sdma.c
15112 views
/*1* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/31#include <linux/mm.h>32#include <linux/types.h>33#include <linux/device.h>34#include <linux/dmapool.h>35#include <linux/sched.h>36#include <linux/slab.h>37#include <linux/list.h>38#include <linux/highmem.h>39#include <linux/io.h>40#include <linux/uio.h>41#include <linux/rbtree.h>42#include <linux/spinlock.h>43#include <linux/delay.h>4445#include "ipath_kernel.h"46#include "ipath_user_sdma.h"4748/* minimum size of header */49#define IPATH_USER_SDMA_MIN_HEADER_LENGTH 6450/* expected size of headers (for dma_pool) */51#define IPATH_USER_SDMA_EXP_HEADER_LENGTH 6452/* length mask in PBC (lower 11 bits) */53#define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)5455struct ipath_user_sdma_pkt {56u8 naddr; /* dimension of addr (1..3) ... */57u32 counter; /* sdma pkts queued counter for this entry */58u64 added; /* global descq number of entries */5960struct {61u32 offset; /* offset for kvaddr, addr */62u32 length; /* length in page */63u8 put_page; /* should we put_page? */64u8 dma_mapped; /* is page dma_mapped? */65struct page *page; /* may be NULL (coherent mem) */66void *kvaddr; /* FIXME: only for pio hack */67dma_addr_t addr;68} addr[4]; /* max pages, any more and we coalesce */69struct list_head list; /* list element */70};7172struct ipath_user_sdma_queue {73/*74* pkts sent to dma engine are queued on this75* list head. the type of the elements of this76* list are struct ipath_user_sdma_pkt...77*/78struct list_head sent;7980/* headers with expected length are allocated from here... */81char header_cache_name[64];82struct dma_pool *header_cache;8384/* packets are allocated from the slab cache... */85char pkt_slab_name[64];86struct kmem_cache *pkt_slab;8788/* as packets go on the queued queue, they are counted... */89u32 counter;90u32 sent_counter;9192/* dma page table */93struct rb_root dma_pages_root;9495/* protect everything above... */96struct mutex lock;97};9899struct ipath_user_sdma_queue *100ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)101{102struct ipath_user_sdma_queue *pq =103kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);104105if (!pq)106goto done;107108pq->counter = 0;109pq->sent_counter = 0;110INIT_LIST_HEAD(&pq->sent);111112mutex_init(&pq->lock);113114snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),115"ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);116pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,117sizeof(struct ipath_user_sdma_pkt),1180, 0, NULL);119120if (!pq->pkt_slab)121goto err_kfree;122123snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),124"ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);125pq->header_cache = dma_pool_create(pq->header_cache_name,126dev,127IPATH_USER_SDMA_EXP_HEADER_LENGTH,1284, 0);129if (!pq->header_cache)130goto err_slab;131132pq->dma_pages_root = RB_ROOT;133134goto done;135136err_slab:137kmem_cache_destroy(pq->pkt_slab);138err_kfree:139kfree(pq);140pq = NULL;141142done:143return pq;144}145146static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,147int i, size_t offset, size_t len,148int put_page, int dma_mapped,149struct page *page,150void *kvaddr, dma_addr_t dma_addr)151{152pkt->addr[i].offset = offset;153pkt->addr[i].length = len;154pkt->addr[i].put_page = put_page;155pkt->addr[i].dma_mapped = dma_mapped;156pkt->addr[i].page = page;157pkt->addr[i].kvaddr = kvaddr;158pkt->addr[i].addr = dma_addr;159}160161static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,162u32 counter, size_t offset,163size_t len, int dma_mapped,164struct page *page,165void *kvaddr, dma_addr_t dma_addr)166{167pkt->naddr = 1;168pkt->counter = counter;169ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,170kvaddr, dma_addr);171}172173/* we've too many pages in the iovec, coalesce to a single page */174static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,175struct ipath_user_sdma_pkt *pkt,176const struct iovec *iov,177unsigned long niov) {178int ret = 0;179struct page *page = alloc_page(GFP_KERNEL);180void *mpage_save;181char *mpage;182int i;183int len = 0;184dma_addr_t dma_addr;185186if (!page) {187ret = -ENOMEM;188goto done;189}190191mpage = kmap(page);192mpage_save = mpage;193for (i = 0; i < niov; i++) {194int cfur;195196cfur = copy_from_user(mpage,197iov[i].iov_base, iov[i].iov_len);198if (cfur) {199ret = -EFAULT;200goto free_unmap;201}202203mpage += iov[i].iov_len;204len += iov[i].iov_len;205}206207dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,208DMA_TO_DEVICE);209if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {210ret = -ENOMEM;211goto free_unmap;212}213214ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,215dma_addr);216pkt->naddr = 2;217218goto done;219220free_unmap:221kunmap(page);222__free_page(page);223done:224return ret;225}226227/* how many pages in this iovec element? */228static int ipath_user_sdma_num_pages(const struct iovec *iov)229{230const unsigned long addr = (unsigned long) iov->iov_base;231const unsigned long len = iov->iov_len;232const unsigned long spage = addr & PAGE_MASK;233const unsigned long epage = (addr + len - 1) & PAGE_MASK;234235return 1 + ((epage - spage) >> PAGE_SHIFT);236}237238/* truncate length to page boundary */239static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)240{241const unsigned long offset = addr & ~PAGE_MASK;242243return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;244}245246static void ipath_user_sdma_free_pkt_frag(struct device *dev,247struct ipath_user_sdma_queue *pq,248struct ipath_user_sdma_pkt *pkt,249int frag)250{251const int i = frag;252253if (pkt->addr[i].page) {254if (pkt->addr[i].dma_mapped)255dma_unmap_page(dev,256pkt->addr[i].addr,257pkt->addr[i].length,258DMA_TO_DEVICE);259260if (pkt->addr[i].kvaddr)261kunmap(pkt->addr[i].page);262263if (pkt->addr[i].put_page)264put_page(pkt->addr[i].page);265else266__free_page(pkt->addr[i].page);267} else if (pkt->addr[i].kvaddr)268/* free coherent mem from cache... */269dma_pool_free(pq->header_cache,270pkt->addr[i].kvaddr, pkt->addr[i].addr);271}272273/* return number of pages pinned... */274static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,275struct ipath_user_sdma_pkt *pkt,276unsigned long addr, int tlen, int npages)277{278struct page *pages[2];279int j;280int ret;281282ret = get_user_pages(current, current->mm, addr,283npages, 0, 1, pages, NULL);284285if (ret != npages) {286int i;287288for (i = 0; i < ret; i++)289put_page(pages[i]);290291ret = -ENOMEM;292goto done;293}294295for (j = 0; j < npages; j++) {296/* map the pages... */297const int flen =298ipath_user_sdma_page_length(addr, tlen);299dma_addr_t dma_addr =300dma_map_page(&dd->pcidev->dev,301pages[j], 0, flen, DMA_TO_DEVICE);302unsigned long fofs = addr & ~PAGE_MASK;303304if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {305ret = -ENOMEM;306goto done;307}308309ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,310pages[j], kmap(pages[j]),311dma_addr);312313pkt->naddr++;314addr += flen;315tlen -= flen;316}317318done:319return ret;320}321322static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,323struct ipath_user_sdma_queue *pq,324struct ipath_user_sdma_pkt *pkt,325const struct iovec *iov,326unsigned long niov)327{328int ret = 0;329unsigned long idx;330331for (idx = 0; idx < niov; idx++) {332const int npages = ipath_user_sdma_num_pages(iov + idx);333const unsigned long addr = (unsigned long) iov[idx].iov_base;334335ret = ipath_user_sdma_pin_pages(dd, pkt,336addr, iov[idx].iov_len,337npages);338if (ret < 0)339goto free_pkt;340}341342goto done;343344free_pkt:345for (idx = 0; idx < pkt->naddr; idx++)346ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);347348done:349return ret;350}351352static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,353struct ipath_user_sdma_queue *pq,354struct ipath_user_sdma_pkt *pkt,355const struct iovec *iov,356unsigned long niov, int npages)357{358int ret = 0;359360if (npages >= ARRAY_SIZE(pkt->addr))361ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);362else363ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);364365return ret;366}367368/* free a packet list -- return counter value of last packet */369static void ipath_user_sdma_free_pkt_list(struct device *dev,370struct ipath_user_sdma_queue *pq,371struct list_head *list)372{373struct ipath_user_sdma_pkt *pkt, *pkt_next;374375list_for_each_entry_safe(pkt, pkt_next, list, list) {376int i;377378for (i = 0; i < pkt->naddr; i++)379ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);380381kmem_cache_free(pq->pkt_slab, pkt);382}383}384385/*386* copy headers, coalesce etc -- pq->lock must be held387*388* we queue all the packets to list, returning the389* number of bytes total. list must be empty initially,390* as, if there is an error we clean it...391*/392static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,393struct ipath_user_sdma_queue *pq,394struct list_head *list,395const struct iovec *iov,396unsigned long niov,397int maxpkts)398{399unsigned long idx = 0;400int ret = 0;401int npkts = 0;402struct page *page = NULL;403__le32 *pbc;404dma_addr_t dma_addr;405struct ipath_user_sdma_pkt *pkt = NULL;406size_t len;407size_t nw;408u32 counter = pq->counter;409int dma_mapped = 0;410411while (idx < niov && npkts < maxpkts) {412const unsigned long addr = (unsigned long) iov[idx].iov_base;413const unsigned long idx_save = idx;414unsigned pktnw;415unsigned pktnwc;416int nfrags = 0;417int npages = 0;418int cfur;419420dma_mapped = 0;421len = iov[idx].iov_len;422nw = len >> 2;423page = NULL;424425pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);426if (!pkt) {427ret = -ENOMEM;428goto free_list;429}430431if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||432len > PAGE_SIZE || len & 3 || addr & 3) {433ret = -EINVAL;434goto free_pkt;435}436437if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)438pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,439&dma_addr);440else441pbc = NULL;442443if (!pbc) {444page = alloc_page(GFP_KERNEL);445if (!page) {446ret = -ENOMEM;447goto free_pkt;448}449pbc = kmap(page);450}451452cfur = copy_from_user(pbc, iov[idx].iov_base, len);453if (cfur) {454ret = -EFAULT;455goto free_pbc;456}457458/*459* this assignment is a bit strange. it's because the460* the pbc counts the number of 32 bit words in the full461* packet _except_ the first word of the pbc itself...462*/463pktnwc = nw - 1;464465/*466* pktnw computation yields the number of 32 bit words467* that the caller has indicated in the PBC. note that468* this is one less than the total number of words that469* goes to the send DMA engine as the first 32 bit word470* of the PBC itself is not counted. Armed with this count,471* we can verify that the packet is consistent with the472* iovec lengths.473*/474pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;475if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {476ret = -EINVAL;477goto free_pbc;478}479480481idx++;482while (pktnwc < pktnw && idx < niov) {483const size_t slen = iov[idx].iov_len;484const unsigned long faddr =485(unsigned long) iov[idx].iov_base;486487if (slen & 3 || faddr & 3 || !slen ||488slen > PAGE_SIZE) {489ret = -EINVAL;490goto free_pbc;491}492493npages++;494if ((faddr & PAGE_MASK) !=495((faddr + slen - 1) & PAGE_MASK))496npages++;497498pktnwc += slen >> 2;499idx++;500nfrags++;501}502503if (pktnwc != pktnw) {504ret = -EINVAL;505goto free_pbc;506}507508if (page) {509dma_addr = dma_map_page(&dd->pcidev->dev,510page, 0, len, DMA_TO_DEVICE);511if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {512ret = -ENOMEM;513goto free_pbc;514}515516dma_mapped = 1;517}518519ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,520page, pbc, dma_addr);521522if (nfrags) {523ret = ipath_user_sdma_init_payload(dd, pq, pkt,524iov + idx_save + 1,525nfrags, npages);526if (ret < 0)527goto free_pbc_dma;528}529530counter++;531npkts++;532533list_add_tail(&pkt->list, list);534}535536ret = idx;537goto done;538539free_pbc_dma:540if (dma_mapped)541dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);542free_pbc:543if (page) {544kunmap(page);545__free_page(page);546} else547dma_pool_free(pq->header_cache, pbc, dma_addr);548free_pkt:549kmem_cache_free(pq->pkt_slab, pkt);550free_list:551ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);552done:553return ret;554}555556static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,557u32 c)558{559pq->sent_counter = c;560}561562/* try to clean out queue -- needs pq->lock */563static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,564struct ipath_user_sdma_queue *pq)565{566struct list_head free_list;567struct ipath_user_sdma_pkt *pkt;568struct ipath_user_sdma_pkt *pkt_prev;569int ret = 0;570571INIT_LIST_HEAD(&free_list);572573list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {574s64 descd = dd->ipath_sdma_descq_removed - pkt->added;575576if (descd < 0)577break;578579list_move_tail(&pkt->list, &free_list);580581/* one more packet cleaned */582ret++;583}584585if (!list_empty(&free_list)) {586u32 counter;587588pkt = list_entry(free_list.prev,589struct ipath_user_sdma_pkt, list);590counter = pkt->counter;591592ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);593ipath_user_sdma_set_complete_counter(pq, counter);594}595596return ret;597}598599void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)600{601if (!pq)602return;603604kmem_cache_destroy(pq->pkt_slab);605dma_pool_destroy(pq->header_cache);606kfree(pq);607}608609/* clean descriptor queue, returns > 0 if some elements cleaned */610static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)611{612int ret;613unsigned long flags;614615spin_lock_irqsave(&dd->ipath_sdma_lock, flags);616ret = ipath_sdma_make_progress(dd);617spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);618619return ret;620}621622/* we're in close, drain packets so that we can cleanup successfully... */623void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,624struct ipath_user_sdma_queue *pq)625{626int i;627628if (!pq)629return;630631for (i = 0; i < 100; i++) {632mutex_lock(&pq->lock);633if (list_empty(&pq->sent)) {634mutex_unlock(&pq->lock);635break;636}637ipath_user_sdma_hwqueue_clean(dd);638ipath_user_sdma_queue_clean(dd, pq);639mutex_unlock(&pq->lock);640msleep(10);641}642643if (!list_empty(&pq->sent)) {644struct list_head free_list;645646printk(KERN_INFO "drain: lists not empty: forcing!\n");647INIT_LIST_HEAD(&free_list);648mutex_lock(&pq->lock);649list_splice_init(&pq->sent, &free_list);650ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);651mutex_unlock(&pq->lock);652}653}654655static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,656u64 addr, u64 dwlen, u64 dwoffset)657{658return cpu_to_le64(/* SDmaPhyAddr[31:0] */659((addr & 0xfffffffcULL) << 32) |660/* SDmaGeneration[1:0] */661((dd->ipath_sdma_generation & 3ULL) << 30) |662/* SDmaDwordCount[10:0] */663((dwlen & 0x7ffULL) << 16) |664/* SDmaBufOffset[12:2] */665(dwoffset & 0x7ffULL));666}667668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)669{670return descq | cpu_to_le64(1ULL << 12);671}672673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)674{675/* last */ /* dma head */676return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);677}678679static inline __le64 ipath_sdma_make_desc1(u64 addr)680{681/* SDmaPhyAddr[47:32] */682return cpu_to_le64(addr >> 32);683}684685static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,686struct ipath_user_sdma_pkt *pkt, int idx,687unsigned ofs, u16 tail)688{689const u64 addr = (u64) pkt->addr[idx].addr +690(u64) pkt->addr[idx].offset;691const u64 dwlen = (u64) pkt->addr[idx].length / 4;692__le64 *descqp;693__le64 descq0;694695descqp = &dd->ipath_sdma_descq[tail].qw[0];696697descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);698if (idx == 0)699descq0 = ipath_sdma_make_first_desc0(descq0);700if (idx == pkt->naddr - 1)701descq0 = ipath_sdma_make_last_desc0(descq0);702703descqp[0] = descq0;704descqp[1] = ipath_sdma_make_desc1(addr);705}706707/* pq->lock must be held, get packets on the wire... */708static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,709struct ipath_user_sdma_queue *pq,710struct list_head *pktlist)711{712int ret = 0;713unsigned long flags;714u16 tail;715716if (list_empty(pktlist))717return 0;718719if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))720return -ECOMM;721722spin_lock_irqsave(&dd->ipath_sdma_lock, flags);723724if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {725ret = -ECOMM;726goto unlock;727}728729tail = dd->ipath_sdma_descq_tail;730while (!list_empty(pktlist)) {731struct ipath_user_sdma_pkt *pkt =732list_entry(pktlist->next, struct ipath_user_sdma_pkt,733list);734int i;735unsigned ofs = 0;736u16 dtail = tail;737738if (pkt->naddr > ipath_sdma_descq_freecnt(dd))739goto unlock_check_tail;740741for (i = 0; i < pkt->naddr; i++) {742ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);743ofs += pkt->addr[i].length >> 2;744745if (++tail == dd->ipath_sdma_descq_cnt) {746tail = 0;747++dd->ipath_sdma_generation;748}749}750751if ((ofs<<2) > dd->ipath_ibmaxlen) {752ipath_dbg("packet size %X > ibmax %X, fail\n",753ofs<<2, dd->ipath_ibmaxlen);754ret = -EMSGSIZE;755goto unlock;756}757758/*759* if the packet is >= 2KB mtu equivalent, we have to use760* the large buffers, and have to mark each descriptor as761* part of a large buffer packet.762*/763if (ofs >= IPATH_SMALLBUF_DWORDS) {764for (i = 0; i < pkt->naddr; i++) {765dd->ipath_sdma_descq[dtail].qw[0] |=766cpu_to_le64(1ULL << 14);767if (++dtail == dd->ipath_sdma_descq_cnt)768dtail = 0;769}770}771772dd->ipath_sdma_descq_added += pkt->naddr;773pkt->added = dd->ipath_sdma_descq_added;774list_move_tail(&pkt->list, &pq->sent);775ret++;776}777778unlock_check_tail:779/* advance the tail on the chip if necessary */780if (dd->ipath_sdma_descq_tail != tail) {781wmb();782ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);783dd->ipath_sdma_descq_tail = tail;784}785786unlock:787spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);788789return ret;790}791792int ipath_user_sdma_writev(struct ipath_devdata *dd,793struct ipath_user_sdma_queue *pq,794const struct iovec *iov,795unsigned long dim)796{797int ret = 0;798struct list_head list;799int npkts = 0;800801INIT_LIST_HEAD(&list);802803mutex_lock(&pq->lock);804805if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {806ipath_user_sdma_hwqueue_clean(dd);807ipath_user_sdma_queue_clean(dd, pq);808}809810while (dim) {811const int mxp = 8;812813down_write(¤t->mm->mmap_sem);814ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);815up_write(¤t->mm->mmap_sem);816817if (ret <= 0)818goto done_unlock;819else {820dim -= ret;821iov += ret;822}823824/* force packets onto the sdma hw queue... */825if (!list_empty(&list)) {826/*827* lazily clean hw queue. the 4 is a guess of about828* how many sdma descriptors a packet will take (it829* doesn't have to be perfect).830*/831if (ipath_sdma_descq_freecnt(dd) < ret * 4) {832ipath_user_sdma_hwqueue_clean(dd);833ipath_user_sdma_queue_clean(dd, pq);834}835836ret = ipath_user_sdma_push_pkts(dd, pq, &list);837if (ret < 0)838goto done_unlock;839else {840npkts += ret;841pq->counter += ret;842843if (!list_empty(&list))844goto done_unlock;845}846}847}848849done_unlock:850if (!list_empty(&list))851ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);852mutex_unlock(&pq->lock);853854return (ret < 0) ? ret : npkts;855}856857int ipath_user_sdma_make_progress(struct ipath_devdata *dd,858struct ipath_user_sdma_queue *pq)859{860int ret = 0;861862mutex_lock(&pq->lock);863ipath_user_sdma_hwqueue_clean(dd);864ret = ipath_user_sdma_queue_clean(dd, pq);865mutex_unlock(&pq->lock);866867return ret;868}869870u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)871{872return pq->sent_counter;873}874875u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)876{877return pq->counter;878}879880881882