Path: blob/master/drivers/infiniband/hw/qib/qib_mr.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.2* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#include <rdma/ib_umem.h>34#include <rdma/ib_smi.h>3536#include "qib.h"3738/* Fast memory region */39struct qib_fmr {40struct ib_fmr ibfmr;41struct qib_mregion mr; /* must be last */42};4344static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)45{46return container_of(ibfmr, struct qib_fmr, ibfmr);47}4849/**50* qib_get_dma_mr - get a DMA memory region51* @pd: protection domain for this memory region52* @acc: access flags53*54* Returns the memory region on success, otherwise returns an errno.55* Note that all DMA addresses should be created via the56* struct ib_dma_mapping_ops functions (see qib_dma.c).57*/58struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)59{60struct qib_ibdev *dev = to_idev(pd->device);61struct qib_mr *mr;62struct ib_mr *ret;63unsigned long flags;6465if (to_ipd(pd)->user) {66ret = ERR_PTR(-EPERM);67goto bail;68}6970mr = kzalloc(sizeof *mr, GFP_KERNEL);71if (!mr) {72ret = ERR_PTR(-ENOMEM);73goto bail;74}7576mr->mr.access_flags = acc;77atomic_set(&mr->mr.refcount, 0);7879spin_lock_irqsave(&dev->lk_table.lock, flags);80if (!dev->dma_mr)81dev->dma_mr = &mr->mr;82spin_unlock_irqrestore(&dev->lk_table.lock, flags);8384ret = &mr->ibmr;8586bail:87return ret;88}8990static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)91{92struct qib_mr *mr;93int m, i = 0;9495/* Allocate struct plus pointers to first level page tables. */96m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;97mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);98if (!mr)99goto done;100101/* Allocate first level page tables. */102for (; i < m; i++) {103mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);104if (!mr->mr.map[i])105goto bail;106}107mr->mr.mapsz = m;108mr->mr.page_shift = 0;109mr->mr.max_segs = count;110111/*112* ib_reg_phys_mr() will initialize mr->ibmr except for113* lkey and rkey.114*/115if (!qib_alloc_lkey(lk_table, &mr->mr))116goto bail;117mr->ibmr.lkey = mr->mr.lkey;118mr->ibmr.rkey = mr->mr.lkey;119120atomic_set(&mr->mr.refcount, 0);121goto done;122123bail:124while (i)125kfree(mr->mr.map[--i]);126kfree(mr);127mr = NULL;128129done:130return mr;131}132133/**134* qib_reg_phys_mr - register a physical memory region135* @pd: protection domain for this memory region136* @buffer_list: pointer to the list of physical buffers to register137* @num_phys_buf: the number of physical buffers to register138* @iova_start: the starting address passed over IB which maps to this MR139*140* Returns the memory region on success, otherwise returns an errno.141*/142struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,143struct ib_phys_buf *buffer_list,144int num_phys_buf, int acc, u64 *iova_start)145{146struct qib_mr *mr;147int n, m, i;148struct ib_mr *ret;149150mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);151if (mr == NULL) {152ret = ERR_PTR(-ENOMEM);153goto bail;154}155156mr->mr.pd = pd;157mr->mr.user_base = *iova_start;158mr->mr.iova = *iova_start;159mr->mr.length = 0;160mr->mr.offset = 0;161mr->mr.access_flags = acc;162mr->umem = NULL;163164m = 0;165n = 0;166for (i = 0; i < num_phys_buf; i++) {167mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;168mr->mr.map[m]->segs[n].length = buffer_list[i].size;169mr->mr.length += buffer_list[i].size;170n++;171if (n == QIB_SEGSZ) {172m++;173n = 0;174}175}176177ret = &mr->ibmr;178179bail:180return ret;181}182183/**184* qib_reg_user_mr - register a userspace memory region185* @pd: protection domain for this memory region186* @start: starting userspace address187* @length: length of region to register188* @virt_addr: virtual address to use (from HCA's point of view)189* @mr_access_flags: access flags for this memory region190* @udata: unused by the QLogic_IB driver191*192* Returns the memory region on success, otherwise returns an errno.193*/194struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,195u64 virt_addr, int mr_access_flags,196struct ib_udata *udata)197{198struct qib_mr *mr;199struct ib_umem *umem;200struct ib_umem_chunk *chunk;201int n, m, i;202struct ib_mr *ret;203204if (length == 0) {205ret = ERR_PTR(-EINVAL);206goto bail;207}208209umem = ib_umem_get(pd->uobject->context, start, length,210mr_access_flags, 0);211if (IS_ERR(umem))212return (void *) umem;213214n = 0;215list_for_each_entry(chunk, &umem->chunk_list, list)216n += chunk->nents;217218mr = alloc_mr(n, &to_idev(pd->device)->lk_table);219if (!mr) {220ret = ERR_PTR(-ENOMEM);221ib_umem_release(umem);222goto bail;223}224225mr->mr.pd = pd;226mr->mr.user_base = start;227mr->mr.iova = virt_addr;228mr->mr.length = length;229mr->mr.offset = umem->offset;230mr->mr.access_flags = mr_access_flags;231mr->umem = umem;232233if (is_power_of_2(umem->page_size))234mr->mr.page_shift = ilog2(umem->page_size);235m = 0;236n = 0;237list_for_each_entry(chunk, &umem->chunk_list, list) {238for (i = 0; i < chunk->nents; i++) {239void *vaddr;240241vaddr = page_address(sg_page(&chunk->page_list[i]));242if (!vaddr) {243ret = ERR_PTR(-EINVAL);244goto bail;245}246mr->mr.map[m]->segs[n].vaddr = vaddr;247mr->mr.map[m]->segs[n].length = umem->page_size;248n++;249if (n == QIB_SEGSZ) {250m++;251n = 0;252}253}254}255ret = &mr->ibmr;256257bail:258return ret;259}260261/**262* qib_dereg_mr - unregister and free a memory region263* @ibmr: the memory region to free264*265* Returns 0 on success.266*267* Note that this is called to free MRs created by qib_get_dma_mr()268* or qib_reg_user_mr().269*/270int qib_dereg_mr(struct ib_mr *ibmr)271{272struct qib_mr *mr = to_imr(ibmr);273struct qib_ibdev *dev = to_idev(ibmr->device);274int ret;275int i;276277ret = qib_free_lkey(dev, &mr->mr);278if (ret)279return ret;280281i = mr->mr.mapsz;282while (i)283kfree(mr->mr.map[--i]);284if (mr->umem)285ib_umem_release(mr->umem);286kfree(mr);287return 0;288}289290/*291* Allocate a memory region usable with the292* IB_WR_FAST_REG_MR send work request.293*294* Return the memory region on success, otherwise return an errno.295*/296struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)297{298struct qib_mr *mr;299300mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);301if (mr == NULL)302return ERR_PTR(-ENOMEM);303304mr->mr.pd = pd;305mr->mr.user_base = 0;306mr->mr.iova = 0;307mr->mr.length = 0;308mr->mr.offset = 0;309mr->mr.access_flags = 0;310mr->umem = NULL;311312return &mr->ibmr;313}314315struct ib_fast_reg_page_list *316qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)317{318unsigned size = page_list_len * sizeof(u64);319struct ib_fast_reg_page_list *pl;320321if (size > PAGE_SIZE)322return ERR_PTR(-EINVAL);323324pl = kmalloc(sizeof *pl, GFP_KERNEL);325if (!pl)326return ERR_PTR(-ENOMEM);327328pl->page_list = kmalloc(size, GFP_KERNEL);329if (!pl->page_list)330goto err_free;331332return pl;333334err_free:335kfree(pl);336return ERR_PTR(-ENOMEM);337}338339void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)340{341kfree(pl->page_list);342kfree(pl);343}344345/**346* qib_alloc_fmr - allocate a fast memory region347* @pd: the protection domain for this memory region348* @mr_access_flags: access flags for this memory region349* @fmr_attr: fast memory region attributes350*351* Returns the memory region on success, otherwise returns an errno.352*/353struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,354struct ib_fmr_attr *fmr_attr)355{356struct qib_fmr *fmr;357int m, i = 0;358struct ib_fmr *ret;359360/* Allocate struct plus pointers to first level page tables. */361m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;362fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);363if (!fmr)364goto bail;365366/* Allocate first level page tables. */367for (; i < m; i++) {368fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],369GFP_KERNEL);370if (!fmr->mr.map[i])371goto bail;372}373fmr->mr.mapsz = m;374375/*376* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &377* rkey.378*/379if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))380goto bail;381fmr->ibfmr.rkey = fmr->mr.lkey;382fmr->ibfmr.lkey = fmr->mr.lkey;383/*384* Resources are allocated but no valid mapping (RKEY can't be385* used).386*/387fmr->mr.pd = pd;388fmr->mr.user_base = 0;389fmr->mr.iova = 0;390fmr->mr.length = 0;391fmr->mr.offset = 0;392fmr->mr.access_flags = mr_access_flags;393fmr->mr.max_segs = fmr_attr->max_pages;394fmr->mr.page_shift = fmr_attr->page_shift;395396atomic_set(&fmr->mr.refcount, 0);397ret = &fmr->ibfmr;398goto done;399400bail:401while (i)402kfree(fmr->mr.map[--i]);403kfree(fmr);404ret = ERR_PTR(-ENOMEM);405406done:407return ret;408}409410/**411* qib_map_phys_fmr - set up a fast memory region412* @ibmfr: the fast memory region to set up413* @page_list: the list of pages to associate with the fast memory region414* @list_len: the number of pages to associate with the fast memory region415* @iova: the virtual address of the start of the fast memory region416*417* This may be called from interrupt context.418*/419420int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,421int list_len, u64 iova)422{423struct qib_fmr *fmr = to_ifmr(ibfmr);424struct qib_lkey_table *rkt;425unsigned long flags;426int m, n, i;427u32 ps;428int ret;429430if (atomic_read(&fmr->mr.refcount))431return -EBUSY;432433if (list_len > fmr->mr.max_segs) {434ret = -EINVAL;435goto bail;436}437rkt = &to_idev(ibfmr->device)->lk_table;438spin_lock_irqsave(&rkt->lock, flags);439fmr->mr.user_base = iova;440fmr->mr.iova = iova;441ps = 1 << fmr->mr.page_shift;442fmr->mr.length = list_len * ps;443m = 0;444n = 0;445for (i = 0; i < list_len; i++) {446fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];447fmr->mr.map[m]->segs[n].length = ps;448if (++n == QIB_SEGSZ) {449m++;450n = 0;451}452}453spin_unlock_irqrestore(&rkt->lock, flags);454ret = 0;455456bail:457return ret;458}459460/**461* qib_unmap_fmr - unmap fast memory regions462* @fmr_list: the list of fast memory regions to unmap463*464* Returns 0 on success.465*/466int qib_unmap_fmr(struct list_head *fmr_list)467{468struct qib_fmr *fmr;469struct qib_lkey_table *rkt;470unsigned long flags;471472list_for_each_entry(fmr, fmr_list, ibfmr.list) {473rkt = &to_idev(fmr->ibfmr.device)->lk_table;474spin_lock_irqsave(&rkt->lock, flags);475fmr->mr.user_base = 0;476fmr->mr.iova = 0;477fmr->mr.length = 0;478spin_unlock_irqrestore(&rkt->lock, flags);479}480return 0;481}482483/**484* qib_dealloc_fmr - deallocate a fast memory region485* @ibfmr: the fast memory region to deallocate486*487* Returns 0 on success.488*/489int qib_dealloc_fmr(struct ib_fmr *ibfmr)490{491struct qib_fmr *fmr = to_ifmr(ibfmr);492int ret;493int i;494495ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);496if (ret)497return ret;498499i = fmr->mr.mapsz;500while (i)501kfree(fmr->mr.map[--i]);502kfree(fmr);503return 0;504}505506507