Path: blob/master/drivers/infiniband/hw/qib/qib_user_sdma.c
15112 views
/*1* Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/31#include <linux/mm.h>32#include <linux/types.h>33#include <linux/device.h>34#include <linux/dmapool.h>35#include <linux/slab.h>36#include <linux/list.h>37#include <linux/highmem.h>38#include <linux/io.h>39#include <linux/uio.h>40#include <linux/rbtree.h>41#include <linux/spinlock.h>42#include <linux/delay.h>4344#include "qib.h"45#include "qib_user_sdma.h"4647/* minimum size of header */48#define QIB_USER_SDMA_MIN_HEADER_LENGTH 6449/* expected size of headers (for dma_pool) */50#define QIB_USER_SDMA_EXP_HEADER_LENGTH 6451/* attempt to drain the queue for 5secs */52#define QIB_USER_SDMA_DRAIN_TIMEOUT 5005354struct qib_user_sdma_pkt {55u8 naddr; /* dimension of addr (1..3) ... */56u32 counter; /* sdma pkts queued counter for this entry */57u64 added; /* global descq number of entries */5859struct {60u32 offset; /* offset for kvaddr, addr */61u32 length; /* length in page */62u8 put_page; /* should we put_page? */63u8 dma_mapped; /* is page dma_mapped? */64struct page *page; /* may be NULL (coherent mem) */65void *kvaddr; /* FIXME: only for pio hack */66dma_addr_t addr;67} addr[4]; /* max pages, any more and we coalesce */68struct list_head list; /* list element */69};7071struct qib_user_sdma_queue {72/*73* pkts sent to dma engine are queued on this74* list head. the type of the elements of this75* list are struct qib_user_sdma_pkt...76*/77struct list_head sent;7879/* headers with expected length are allocated from here... */80char header_cache_name[64];81struct dma_pool *header_cache;8283/* packets are allocated from the slab cache... */84char pkt_slab_name[64];85struct kmem_cache *pkt_slab;8687/* as packets go on the queued queue, they are counted... */88u32 counter;89u32 sent_counter;9091/* dma page table */92struct rb_root dma_pages_root;9394/* protect everything above... */95struct mutex lock;96};9798struct qib_user_sdma_queue *99qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)100{101struct qib_user_sdma_queue *pq =102kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);103104if (!pq)105goto done;106107pq->counter = 0;108pq->sent_counter = 0;109INIT_LIST_HEAD(&pq->sent);110111mutex_init(&pq->lock);112113snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),114"qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);115pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,116sizeof(struct qib_user_sdma_pkt),1170, 0, NULL);118119if (!pq->pkt_slab)120goto err_kfree;121122snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),123"qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);124pq->header_cache = dma_pool_create(pq->header_cache_name,125dev,126QIB_USER_SDMA_EXP_HEADER_LENGTH,1274, 0);128if (!pq->header_cache)129goto err_slab;130131pq->dma_pages_root = RB_ROOT;132133goto done;134135err_slab:136kmem_cache_destroy(pq->pkt_slab);137err_kfree:138kfree(pq);139pq = NULL;140141done:142return pq;143}144145static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,146int i, size_t offset, size_t len,147int put_page, int dma_mapped,148struct page *page,149void *kvaddr, dma_addr_t dma_addr)150{151pkt->addr[i].offset = offset;152pkt->addr[i].length = len;153pkt->addr[i].put_page = put_page;154pkt->addr[i].dma_mapped = dma_mapped;155pkt->addr[i].page = page;156pkt->addr[i].kvaddr = kvaddr;157pkt->addr[i].addr = dma_addr;158}159160static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,161u32 counter, size_t offset,162size_t len, int dma_mapped,163struct page *page,164void *kvaddr, dma_addr_t dma_addr)165{166pkt->naddr = 1;167pkt->counter = counter;168qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,169kvaddr, dma_addr);170}171172/* we've too many pages in the iovec, coalesce to a single page */173static int qib_user_sdma_coalesce(const struct qib_devdata *dd,174struct qib_user_sdma_pkt *pkt,175const struct iovec *iov,176unsigned long niov)177{178int ret = 0;179struct page *page = alloc_page(GFP_KERNEL);180void *mpage_save;181char *mpage;182int i;183int len = 0;184dma_addr_t dma_addr;185186if (!page) {187ret = -ENOMEM;188goto done;189}190191mpage = kmap(page);192mpage_save = mpage;193for (i = 0; i < niov; i++) {194int cfur;195196cfur = copy_from_user(mpage,197iov[i].iov_base, iov[i].iov_len);198if (cfur) {199ret = -EFAULT;200goto free_unmap;201}202203mpage += iov[i].iov_len;204len += iov[i].iov_len;205}206207dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,208DMA_TO_DEVICE);209if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {210ret = -ENOMEM;211goto free_unmap;212}213214qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,215dma_addr);216pkt->naddr = 2;217218goto done;219220free_unmap:221kunmap(page);222__free_page(page);223done:224return ret;225}226227/*228* How many pages in this iovec element?229*/230static int qib_user_sdma_num_pages(const struct iovec *iov)231{232const unsigned long addr = (unsigned long) iov->iov_base;233const unsigned long len = iov->iov_len;234const unsigned long spage = addr & PAGE_MASK;235const unsigned long epage = (addr + len - 1) & PAGE_MASK;236237return 1 + ((epage - spage) >> PAGE_SHIFT);238}239240/*241* Truncate length to page boundary.242*/243static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)244{245const unsigned long offset = addr & ~PAGE_MASK;246247return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;248}249250static void qib_user_sdma_free_pkt_frag(struct device *dev,251struct qib_user_sdma_queue *pq,252struct qib_user_sdma_pkt *pkt,253int frag)254{255const int i = frag;256257if (pkt->addr[i].page) {258if (pkt->addr[i].dma_mapped)259dma_unmap_page(dev,260pkt->addr[i].addr,261pkt->addr[i].length,262DMA_TO_DEVICE);263264if (pkt->addr[i].kvaddr)265kunmap(pkt->addr[i].page);266267if (pkt->addr[i].put_page)268put_page(pkt->addr[i].page);269else270__free_page(pkt->addr[i].page);271} else if (pkt->addr[i].kvaddr)272/* free coherent mem from cache... */273dma_pool_free(pq->header_cache,274pkt->addr[i].kvaddr, pkt->addr[i].addr);275}276277/* return number of pages pinned... */278static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,279struct qib_user_sdma_pkt *pkt,280unsigned long addr, int tlen, int npages)281{282struct page *pages[2];283int j;284int ret;285286ret = get_user_pages(current, current->mm, addr,287npages, 0, 1, pages, NULL);288289if (ret != npages) {290int i;291292for (i = 0; i < ret; i++)293put_page(pages[i]);294295ret = -ENOMEM;296goto done;297}298299for (j = 0; j < npages; j++) {300/* map the pages... */301const int flen = qib_user_sdma_page_length(addr, tlen);302dma_addr_t dma_addr =303dma_map_page(&dd->pcidev->dev,304pages[j], 0, flen, DMA_TO_DEVICE);305unsigned long fofs = addr & ~PAGE_MASK;306307if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {308ret = -ENOMEM;309goto done;310}311312qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,313pages[j], kmap(pages[j]), dma_addr);314315pkt->naddr++;316addr += flen;317tlen -= flen;318}319320done:321return ret;322}323324static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,325struct qib_user_sdma_queue *pq,326struct qib_user_sdma_pkt *pkt,327const struct iovec *iov,328unsigned long niov)329{330int ret = 0;331unsigned long idx;332333for (idx = 0; idx < niov; idx++) {334const int npages = qib_user_sdma_num_pages(iov + idx);335const unsigned long addr = (unsigned long) iov[idx].iov_base;336337ret = qib_user_sdma_pin_pages(dd, pkt, addr,338iov[idx].iov_len, npages);339if (ret < 0)340goto free_pkt;341}342343goto done;344345free_pkt:346for (idx = 0; idx < pkt->naddr; idx++)347qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);348349done:350return ret;351}352353static int qib_user_sdma_init_payload(const struct qib_devdata *dd,354struct qib_user_sdma_queue *pq,355struct qib_user_sdma_pkt *pkt,356const struct iovec *iov,357unsigned long niov, int npages)358{359int ret = 0;360361if (npages >= ARRAY_SIZE(pkt->addr))362ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);363else364ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);365366return ret;367}368369/* free a packet list -- return counter value of last packet */370static void qib_user_sdma_free_pkt_list(struct device *dev,371struct qib_user_sdma_queue *pq,372struct list_head *list)373{374struct qib_user_sdma_pkt *pkt, *pkt_next;375376list_for_each_entry_safe(pkt, pkt_next, list, list) {377int i;378379for (i = 0; i < pkt->naddr; i++)380qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);381382kmem_cache_free(pq->pkt_slab, pkt);383}384INIT_LIST_HEAD(list);385}386387/*388* copy headers, coalesce etc -- pq->lock must be held389*390* we queue all the packets to list, returning the391* number of bytes total. list must be empty initially,392* as, if there is an error we clean it...393*/394static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,395struct qib_user_sdma_queue *pq,396struct list_head *list,397const struct iovec *iov,398unsigned long niov,399int maxpkts)400{401unsigned long idx = 0;402int ret = 0;403int npkts = 0;404struct page *page = NULL;405__le32 *pbc;406dma_addr_t dma_addr;407struct qib_user_sdma_pkt *pkt = NULL;408size_t len;409size_t nw;410u32 counter = pq->counter;411int dma_mapped = 0;412413while (idx < niov && npkts < maxpkts) {414const unsigned long addr = (unsigned long) iov[idx].iov_base;415const unsigned long idx_save = idx;416unsigned pktnw;417unsigned pktnwc;418int nfrags = 0;419int npages = 0;420int cfur;421422dma_mapped = 0;423len = iov[idx].iov_len;424nw = len >> 2;425page = NULL;426427pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);428if (!pkt) {429ret = -ENOMEM;430goto free_list;431}432433if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||434len > PAGE_SIZE || len & 3 || addr & 3) {435ret = -EINVAL;436goto free_pkt;437}438439if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)440pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,441&dma_addr);442else443pbc = NULL;444445if (!pbc) {446page = alloc_page(GFP_KERNEL);447if (!page) {448ret = -ENOMEM;449goto free_pkt;450}451pbc = kmap(page);452}453454cfur = copy_from_user(pbc, iov[idx].iov_base, len);455if (cfur) {456ret = -EFAULT;457goto free_pbc;458}459460/*461* This assignment is a bit strange. it's because the462* the pbc counts the number of 32 bit words in the full463* packet _except_ the first word of the pbc itself...464*/465pktnwc = nw - 1;466467/*468* pktnw computation yields the number of 32 bit words469* that the caller has indicated in the PBC. note that470* this is one less than the total number of words that471* goes to the send DMA engine as the first 32 bit word472* of the PBC itself is not counted. Armed with this count,473* we can verify that the packet is consistent with the474* iovec lengths.475*/476pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;477if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {478ret = -EINVAL;479goto free_pbc;480}481482idx++;483while (pktnwc < pktnw && idx < niov) {484const size_t slen = iov[idx].iov_len;485const unsigned long faddr =486(unsigned long) iov[idx].iov_base;487488if (slen & 3 || faddr & 3 || !slen ||489slen > PAGE_SIZE) {490ret = -EINVAL;491goto free_pbc;492}493494npages++;495if ((faddr & PAGE_MASK) !=496((faddr + slen - 1) & PAGE_MASK))497npages++;498499pktnwc += slen >> 2;500idx++;501nfrags++;502}503504if (pktnwc != pktnw) {505ret = -EINVAL;506goto free_pbc;507}508509if (page) {510dma_addr = dma_map_page(&dd->pcidev->dev,511page, 0, len, DMA_TO_DEVICE);512if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {513ret = -ENOMEM;514goto free_pbc;515}516517dma_mapped = 1;518}519520qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,521page, pbc, dma_addr);522523if (nfrags) {524ret = qib_user_sdma_init_payload(dd, pq, pkt,525iov + idx_save + 1,526nfrags, npages);527if (ret < 0)528goto free_pbc_dma;529}530531counter++;532npkts++;533534list_add_tail(&pkt->list, list);535}536537ret = idx;538goto done;539540free_pbc_dma:541if (dma_mapped)542dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);543free_pbc:544if (page) {545kunmap(page);546__free_page(page);547} else548dma_pool_free(pq->header_cache, pbc, dma_addr);549free_pkt:550kmem_cache_free(pq->pkt_slab, pkt);551free_list:552qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);553done:554return ret;555}556557static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,558u32 c)559{560pq->sent_counter = c;561}562563/* try to clean out queue -- needs pq->lock */564static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,565struct qib_user_sdma_queue *pq)566{567struct qib_devdata *dd = ppd->dd;568struct list_head free_list;569struct qib_user_sdma_pkt *pkt;570struct qib_user_sdma_pkt *pkt_prev;571int ret = 0;572573INIT_LIST_HEAD(&free_list);574575list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {576s64 descd = ppd->sdma_descq_removed - pkt->added;577578if (descd < 0)579break;580581list_move_tail(&pkt->list, &free_list);582583/* one more packet cleaned */584ret++;585}586587if (!list_empty(&free_list)) {588u32 counter;589590pkt = list_entry(free_list.prev,591struct qib_user_sdma_pkt, list);592counter = pkt->counter;593594qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);595qib_user_sdma_set_complete_counter(pq, counter);596}597598return ret;599}600601void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)602{603if (!pq)604return;605606kmem_cache_destroy(pq->pkt_slab);607dma_pool_destroy(pq->header_cache);608kfree(pq);609}610611/* clean descriptor queue, returns > 0 if some elements cleaned */612static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)613{614int ret;615unsigned long flags;616617spin_lock_irqsave(&ppd->sdma_lock, flags);618ret = qib_sdma_make_progress(ppd);619spin_unlock_irqrestore(&ppd->sdma_lock, flags);620621return ret;622}623624/* we're in close, drain packets so that we can cleanup successfully... */625void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,626struct qib_user_sdma_queue *pq)627{628struct qib_devdata *dd = ppd->dd;629int i;630631if (!pq)632return;633634for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {635mutex_lock(&pq->lock);636if (list_empty(&pq->sent)) {637mutex_unlock(&pq->lock);638break;639}640qib_user_sdma_hwqueue_clean(ppd);641qib_user_sdma_queue_clean(ppd, pq);642mutex_unlock(&pq->lock);643msleep(10);644}645646if (!list_empty(&pq->sent)) {647struct list_head free_list;648649qib_dev_err(dd, "user sdma lists not empty: forcing!\n");650INIT_LIST_HEAD(&free_list);651mutex_lock(&pq->lock);652list_splice_init(&pq->sent, &free_list);653qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);654mutex_unlock(&pq->lock);655}656}657658static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,659u64 addr, u64 dwlen, u64 dwoffset)660{661u8 tmpgen;662663tmpgen = ppd->sdma_generation;664665return cpu_to_le64(/* SDmaPhyAddr[31:0] */666((addr & 0xfffffffcULL) << 32) |667/* SDmaGeneration[1:0] */668((tmpgen & 3ULL) << 30) |669/* SDmaDwordCount[10:0] */670((dwlen & 0x7ffULL) << 16) |671/* SDmaBufOffset[12:2] */672(dwoffset & 0x7ffULL));673}674675static inline __le64 qib_sdma_make_first_desc0(__le64 descq)676{677return descq | cpu_to_le64(1ULL << 12);678}679680static inline __le64 qib_sdma_make_last_desc0(__le64 descq)681{682/* last */ /* dma head */683return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);684}685686static inline __le64 qib_sdma_make_desc1(u64 addr)687{688/* SDmaPhyAddr[47:32] */689return cpu_to_le64(addr >> 32);690}691692static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,693struct qib_user_sdma_pkt *pkt, int idx,694unsigned ofs, u16 tail)695{696const u64 addr = (u64) pkt->addr[idx].addr +697(u64) pkt->addr[idx].offset;698const u64 dwlen = (u64) pkt->addr[idx].length / 4;699__le64 *descqp;700__le64 descq0;701702descqp = &ppd->sdma_descq[tail].qw[0];703704descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);705if (idx == 0)706descq0 = qib_sdma_make_first_desc0(descq0);707if (idx == pkt->naddr - 1)708descq0 = qib_sdma_make_last_desc0(descq0);709710descqp[0] = descq0;711descqp[1] = qib_sdma_make_desc1(addr);712}713714/* pq->lock must be held, get packets on the wire... */715static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,716struct qib_user_sdma_queue *pq,717struct list_head *pktlist)718{719struct qib_devdata *dd = ppd->dd;720int ret = 0;721unsigned long flags;722u16 tail;723u8 generation;724u64 descq_added;725726if (list_empty(pktlist))727return 0;728729if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))730return -ECOMM;731732spin_lock_irqsave(&ppd->sdma_lock, flags);733734/* keep a copy for restoring purposes in case of problems */735generation = ppd->sdma_generation;736descq_added = ppd->sdma_descq_added;737738if (unlikely(!__qib_sdma_running(ppd))) {739ret = -ECOMM;740goto unlock;741}742743tail = ppd->sdma_descq_tail;744while (!list_empty(pktlist)) {745struct qib_user_sdma_pkt *pkt =746list_entry(pktlist->next, struct qib_user_sdma_pkt,747list);748int i;749unsigned ofs = 0;750u16 dtail = tail;751752if (pkt->naddr > qib_sdma_descq_freecnt(ppd))753goto unlock_check_tail;754755for (i = 0; i < pkt->naddr; i++) {756qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);757ofs += pkt->addr[i].length >> 2;758759if (++tail == ppd->sdma_descq_cnt) {760tail = 0;761++ppd->sdma_generation;762}763}764765if ((ofs << 2) > ppd->ibmaxlen) {766ret = -EMSGSIZE;767goto unlock;768}769770/*771* If the packet is >= 2KB mtu equivalent, we have to use772* the large buffers, and have to mark each descriptor as773* part of a large buffer packet.774*/775if (ofs > dd->piosize2kmax_dwords) {776for (i = 0; i < pkt->naddr; i++) {777ppd->sdma_descq[dtail].qw[0] |=778cpu_to_le64(1ULL << 14);779if (++dtail == ppd->sdma_descq_cnt)780dtail = 0;781}782}783784ppd->sdma_descq_added += pkt->naddr;785pkt->added = ppd->sdma_descq_added;786list_move_tail(&pkt->list, &pq->sent);787ret++;788}789790unlock_check_tail:791/* advance the tail on the chip if necessary */792if (ppd->sdma_descq_tail != tail)793dd->f_sdma_update_tail(ppd, tail);794795unlock:796if (unlikely(ret < 0)) {797ppd->sdma_generation = generation;798ppd->sdma_descq_added = descq_added;799}800spin_unlock_irqrestore(&ppd->sdma_lock, flags);801802return ret;803}804805int qib_user_sdma_writev(struct qib_ctxtdata *rcd,806struct qib_user_sdma_queue *pq,807const struct iovec *iov,808unsigned long dim)809{810struct qib_devdata *dd = rcd->dd;811struct qib_pportdata *ppd = rcd->ppd;812int ret = 0;813struct list_head list;814int npkts = 0;815816INIT_LIST_HEAD(&list);817818mutex_lock(&pq->lock);819820/* why not -ECOMM like qib_user_sdma_push_pkts() below? */821if (!qib_sdma_running(ppd))822goto done_unlock;823824if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {825qib_user_sdma_hwqueue_clean(ppd);826qib_user_sdma_queue_clean(ppd, pq);827}828829while (dim) {830const int mxp = 8;831832down_write(¤t->mm->mmap_sem);833ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);834up_write(¤t->mm->mmap_sem);835836if (ret <= 0)837goto done_unlock;838else {839dim -= ret;840iov += ret;841}842843/* force packets onto the sdma hw queue... */844if (!list_empty(&list)) {845/*846* Lazily clean hw queue. the 4 is a guess of about847* how many sdma descriptors a packet will take (it848* doesn't have to be perfect).849*/850if (qib_sdma_descq_freecnt(ppd) < ret * 4) {851qib_user_sdma_hwqueue_clean(ppd);852qib_user_sdma_queue_clean(ppd, pq);853}854855ret = qib_user_sdma_push_pkts(ppd, pq, &list);856if (ret < 0)857goto done_unlock;858else {859npkts += ret;860pq->counter += ret;861862if (!list_empty(&list))863goto done_unlock;864}865}866}867868done_unlock:869if (!list_empty(&list))870qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);871mutex_unlock(&pq->lock);872873return (ret < 0) ? ret : npkts;874}875876int qib_user_sdma_make_progress(struct qib_pportdata *ppd,877struct qib_user_sdma_queue *pq)878{879int ret = 0;880881mutex_lock(&pq->lock);882qib_user_sdma_hwqueue_clean(ppd);883ret = qib_user_sdma_queue_clean(ppd, pq);884mutex_unlock(&pq->lock);885886return ret;887}888889u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)890{891return pq ? pq->sent_counter : 0;892}893894u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)895{896return pq ? pq->counter : 0;897}898899900