#include <linux/dma-buf-mapping.h>
#include <linux/dma-resv.h>
static struct scatterlist *fill_sg_entry(struct scatterlist *sgl, size_t length,
dma_addr_t addr)
{
unsigned int len, nents;
int i;
nents = DIV_ROUND_UP(length, UINT_MAX);
for (i = 0; i < nents; i++) {
len = min_t(size_t, length, UINT_MAX);
length -= len;
sg_set_page(sgl, NULL, 0, 0);
sg_dma_address(sgl) = addr + (dma_addr_t)i * UINT_MAX;
sg_dma_len(sgl) = len;
sgl = sg_next(sgl);
}
return sgl;
}
static unsigned int calc_sg_nents(struct dma_iova_state *state,
struct dma_buf_phys_vec *phys_vec,
size_t nr_ranges, size_t size)
{
unsigned int nents = 0;
size_t i;
if (!state || !dma_use_iova(state)) {
for (i = 0; i < nr_ranges; i++)
nents += DIV_ROUND_UP(phys_vec[i].len, UINT_MAX);
} else {
nents = DIV_ROUND_UP(size, UINT_MAX);
}
return nents;
}
struct dma_buf_dma {
struct sg_table sgt;
struct dma_iova_state *state;
size_t size;
};
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
struct dma_buf_phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir)
{
unsigned int nents, mapped_len = 0;
struct dma_buf_dma *dma;
struct scatterlist *sgl;
dma_addr_t addr;
size_t i;
int ret;
dma_resv_assert_held(attach->dmabuf->resv);
if (WARN_ON(!attach || !attach->dmabuf || !provider))
return ERR_PTR(-EINVAL);
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
return ERR_PTR(-ENOMEM);
switch (pci_p2pdma_map_type(provider, attach->dev)) {
case PCI_P2PDMA_MAP_BUS_ADDR:
break;
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
dma->state = kzalloc(sizeof(*dma->state), GFP_KERNEL);
if (!dma->state) {
ret = -ENOMEM;
goto err_free_dma;
}
dma_iova_try_alloc(attach->dev, dma->state, 0, size);
break;
default:
ret = -EINVAL;
goto err_free_dma;
}
nents = calc_sg_nents(dma->state, phys_vec, nr_ranges, size);
ret = sg_alloc_table(&dma->sgt, nents, GFP_KERNEL | __GFP_ZERO);
if (ret)
goto err_free_state;
sgl = dma->sgt.sgl;
for (i = 0; i < nr_ranges; i++) {
if (!dma->state) {
addr = pci_p2pdma_bus_addr_map(provider,
phys_vec[i].paddr);
} else if (dma_use_iova(dma->state)) {
ret = dma_iova_link(attach->dev, dma->state,
phys_vec[i].paddr, 0,
phys_vec[i].len, dir,
DMA_ATTR_MMIO);
if (ret)
goto err_unmap_dma;
mapped_len += phys_vec[i].len;
} else {
addr = dma_map_phys(attach->dev, phys_vec[i].paddr,
phys_vec[i].len, dir,
DMA_ATTR_MMIO);
ret = dma_mapping_error(attach->dev, addr);
if (ret)
goto err_unmap_dma;
}
if (!dma->state || !dma_use_iova(dma->state))
sgl = fill_sg_entry(sgl, phys_vec[i].len, addr);
}
if (dma->state && dma_use_iova(dma->state)) {
WARN_ON_ONCE(mapped_len != size);
ret = dma_iova_sync(attach->dev, dma->state, 0, mapped_len);
if (ret)
goto err_unmap_dma;
sgl = fill_sg_entry(sgl, mapped_len, dma->state->addr);
}
dma->size = size;
dma->sgt.orig_nents = 0;
WARN_ON_ONCE(sgl);
return &dma->sgt;
err_unmap_dma:
if (!i || !dma->state) {
;
} else if (dma_use_iova(dma->state)) {
dma_iova_destroy(attach->dev, dma->state, mapped_len, dir,
DMA_ATTR_MMIO);
} else {
for_each_sgtable_dma_sg(&dma->sgt, sgl, i)
dma_unmap_phys(attach->dev, sg_dma_address(sgl),
sg_dma_len(sgl), dir, DMA_ATTR_MMIO);
}
sg_free_table(&dma->sgt);
err_free_state:
kfree(dma->state);
err_free_dma:
kfree(dma);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_phys_vec_to_sgt, "DMA_BUF");
void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
enum dma_data_direction dir)
{
struct dma_buf_dma *dma = container_of(sgt, struct dma_buf_dma, sgt);
int i;
dma_resv_assert_held(attach->dmabuf->resv);
if (!dma->state) {
;
} else if (dma_use_iova(dma->state)) {
dma_iova_destroy(attach->dev, dma->state, dma->size, dir,
DMA_ATTR_MMIO);
} else {
struct scatterlist *sgl;
for_each_sgtable_dma_sg(sgt, sgl, i)
dma_unmap_phys(attach->dev, sg_dma_address(sgl),
sg_dma_len(sgl), dir, DMA_ATTR_MMIO);
}
sg_free_table(sgt);
kfree(dma->state);
kfree(dma);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_free_sgt, "DMA_BUF");