Path: blob/master/drivers/infiniband/hw/qib/qib_dma.c
15112 views
/*1* Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.2*3* This software is available to you under a choice of one of two4* licenses. You may choose to be licensed under the terms of the GNU5* General Public License (GPL) Version 2, available from the file6* COPYING in the main directory of this source tree, or the7* OpenIB.org BSD license below:8*9* Redistribution and use in source and binary forms, with or10* without modification, are permitted provided that the following11* conditions are met:12*13* - Redistributions of source code must retain the above14* copyright notice, this list of conditions and the following15* disclaimer.16*17* - Redistributions in binary form must reproduce the above18* copyright notice, this list of conditions and the following19* disclaimer in the documentation and/or other materials20* provided with the distribution.21*22* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,23* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF24* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND25* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS26* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN27* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN28* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE29* SOFTWARE.30*/31#include <linux/types.h>32#include <linux/scatterlist.h>3334#include "qib_verbs.h"3536#define BAD_DMA_ADDRESS ((u64) 0)3738/*39* The following functions implement driver specific replacements40* for the ib_dma_*() functions.41*42* These functions return kernel virtual addresses instead of43* device bus addresses since the driver uses the CPU to copy44* data instead of using hardware DMA.45*/4647static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)48{49return dma_addr == BAD_DMA_ADDRESS;50}5152static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,53size_t size, enum dma_data_direction direction)54{55BUG_ON(!valid_dma_direction(direction));56return (u64) cpu_addr;57}5859static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,60enum dma_data_direction direction)61{62BUG_ON(!valid_dma_direction(direction));63}6465static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,66unsigned long offset, size_t size,67enum dma_data_direction direction)68{69u64 addr;7071BUG_ON(!valid_dma_direction(direction));7273if (offset + size > PAGE_SIZE) {74addr = BAD_DMA_ADDRESS;75goto done;76}7778addr = (u64) page_address(page);79if (addr)80addr += offset;81/* TODO: handle highmem pages */8283done:84return addr;85}8687static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,88enum dma_data_direction direction)89{90BUG_ON(!valid_dma_direction(direction));91}9293static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,94int nents, enum dma_data_direction direction)95{96struct scatterlist *sg;97u64 addr;98int i;99int ret = nents;100101BUG_ON(!valid_dma_direction(direction));102103for_each_sg(sgl, sg, nents, i) {104addr = (u64) page_address(sg_page(sg));105/* TODO: handle highmem pages */106if (!addr) {107ret = 0;108break;109}110}111return ret;112}113114static void qib_unmap_sg(struct ib_device *dev,115struct scatterlist *sg, int nents,116enum dma_data_direction direction)117{118BUG_ON(!valid_dma_direction(direction));119}120121static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)122{123u64 addr = (u64) page_address(sg_page(sg));124125if (addr)126addr += sg->offset;127return addr;128}129130static unsigned int qib_sg_dma_len(struct ib_device *dev,131struct scatterlist *sg)132{133return sg->length;134}135136static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,137size_t size, enum dma_data_direction dir)138{139}140141static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,142size_t size,143enum dma_data_direction dir)144{145}146147static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,148u64 *dma_handle, gfp_t flag)149{150struct page *p;151void *addr = NULL;152153p = alloc_pages(flag, get_order(size));154if (p)155addr = page_address(p);156if (dma_handle)157*dma_handle = (u64) addr;158return addr;159}160161static void qib_dma_free_coherent(struct ib_device *dev, size_t size,162void *cpu_addr, u64 dma_handle)163{164free_pages((unsigned long) cpu_addr, get_order(size));165}166167struct ib_dma_mapping_ops qib_dma_mapping_ops = {168.mapping_error = qib_mapping_error,169.map_single = qib_dma_map_single,170.unmap_single = qib_dma_unmap_single,171.map_page = qib_dma_map_page,172.unmap_page = qib_dma_unmap_page,173.map_sg = qib_map_sg,174.unmap_sg = qib_unmap_sg,175.dma_address = qib_sg_dma_address,176.dma_len = qib_sg_dma_len,177.sync_single_for_cpu = qib_sync_single_for_cpu,178.sync_single_for_device = qib_sync_single_for_device,179.alloc_coherent = qib_dma_alloc_coherent,180.free_coherent = qib_dma_free_coherent181};182183184