Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kernel/dma.c
10817 views
1
/*
2
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3
*
4
* Provide default implementations of the DMA mapping callbacks for
5
* directly mapped busses.
6
*/
7
8
#include <linux/device.h>
9
#include <linux/dma-mapping.h>
10
#include <linux/dma-debug.h>
11
#include <linux/gfp.h>
12
#include <linux/memblock.h>
13
#include <asm/bug.h>
14
#include <asm/abs_addr.h>
15
#include <asm/machdep.h>
16
17
/*
18
* Generic direct DMA implementation
19
*
20
* This implementation supports a per-device offset that can be applied if
21
* the address at which memory is visible to devices is not 0. Platform code
22
* can set archdata.dma_data to an unsigned long holding the offset. By
23
* default the offset is PCI_DRAM_OFFSET.
24
*/
25
26
27
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
28
dma_addr_t *dma_handle, gfp_t flag)
29
{
30
void *ret;
31
#ifdef CONFIG_NOT_COHERENT_CACHE
32
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
33
if (ret == NULL)
34
return NULL;
35
*dma_handle += get_dma_offset(dev);
36
return ret;
37
#else
38
struct page *page;
39
int node = dev_to_node(dev);
40
41
/* ignore region specifiers */
42
flag &= ~(__GFP_HIGHMEM);
43
44
page = alloc_pages_node(node, flag, get_order(size));
45
if (page == NULL)
46
return NULL;
47
ret = page_address(page);
48
memset(ret, 0, size);
49
*dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
50
51
return ret;
52
#endif
53
}
54
55
void dma_direct_free_coherent(struct device *dev, size_t size,
56
void *vaddr, dma_addr_t dma_handle)
57
{
58
#ifdef CONFIG_NOT_COHERENT_CACHE
59
__dma_free_coherent(size, vaddr);
60
#else
61
free_pages((unsigned long)vaddr, get_order(size));
62
#endif
63
}
64
65
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
66
int nents, enum dma_data_direction direction,
67
struct dma_attrs *attrs)
68
{
69
struct scatterlist *sg;
70
int i;
71
72
for_each_sg(sgl, sg, nents, i) {
73
sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
74
sg->dma_length = sg->length;
75
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
76
}
77
78
return nents;
79
}
80
81
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
82
int nents, enum dma_data_direction direction,
83
struct dma_attrs *attrs)
84
{
85
}
86
87
static int dma_direct_dma_supported(struct device *dev, u64 mask)
88
{
89
#ifdef CONFIG_PPC64
90
/* Could be improved so platforms can set the limit in case
91
* they have limited DMA windows
92
*/
93
return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
94
#else
95
return 1;
96
#endif
97
}
98
99
static inline dma_addr_t dma_direct_map_page(struct device *dev,
100
struct page *page,
101
unsigned long offset,
102
size_t size,
103
enum dma_data_direction dir,
104
struct dma_attrs *attrs)
105
{
106
BUG_ON(dir == DMA_NONE);
107
__dma_sync_page(page, offset, size, dir);
108
return page_to_phys(page) + offset + get_dma_offset(dev);
109
}
110
111
static inline void dma_direct_unmap_page(struct device *dev,
112
dma_addr_t dma_address,
113
size_t size,
114
enum dma_data_direction direction,
115
struct dma_attrs *attrs)
116
{
117
}
118
119
#ifdef CONFIG_NOT_COHERENT_CACHE
120
static inline void dma_direct_sync_sg(struct device *dev,
121
struct scatterlist *sgl, int nents,
122
enum dma_data_direction direction)
123
{
124
struct scatterlist *sg;
125
int i;
126
127
for_each_sg(sgl, sg, nents, i)
128
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
129
}
130
131
static inline void dma_direct_sync_single(struct device *dev,
132
dma_addr_t dma_handle, size_t size,
133
enum dma_data_direction direction)
134
{
135
__dma_sync(bus_to_virt(dma_handle), size, direction);
136
}
137
#endif
138
139
struct dma_map_ops dma_direct_ops = {
140
.alloc_coherent = dma_direct_alloc_coherent,
141
.free_coherent = dma_direct_free_coherent,
142
.map_sg = dma_direct_map_sg,
143
.unmap_sg = dma_direct_unmap_sg,
144
.dma_supported = dma_direct_dma_supported,
145
.map_page = dma_direct_map_page,
146
.unmap_page = dma_direct_unmap_page,
147
#ifdef CONFIG_NOT_COHERENT_CACHE
148
.sync_single_for_cpu = dma_direct_sync_single,
149
.sync_single_for_device = dma_direct_sync_single,
150
.sync_sg_for_cpu = dma_direct_sync_sg,
151
.sync_sg_for_device = dma_direct_sync_sg,
152
#endif
153
};
154
EXPORT_SYMBOL(dma_direct_ops);
155
156
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
157
158
int dma_set_mask(struct device *dev, u64 dma_mask)
159
{
160
struct dma_map_ops *dma_ops = get_dma_ops(dev);
161
162
if (ppc_md.dma_set_mask)
163
return ppc_md.dma_set_mask(dev, dma_mask);
164
if (unlikely(dma_ops == NULL))
165
return -EIO;
166
if (dma_ops->set_dma_mask != NULL)
167
return dma_ops->set_dma_mask(dev, dma_mask);
168
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
169
return -EIO;
170
*dev->dma_mask = dma_mask;
171
return 0;
172
}
173
EXPORT_SYMBOL(dma_set_mask);
174
175
static int __init dma_init(void)
176
{
177
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
178
179
return 0;
180
}
181
fs_initcall(dma_init);
182
183
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
184
void *cpu_addr, dma_addr_t handle, size_t size)
185
{
186
unsigned long pfn;
187
188
#ifdef CONFIG_NOT_COHERENT_CACHE
189
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
190
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
191
#else
192
pfn = page_to_pfn(virt_to_page(cpu_addr));
193
#endif
194
return remap_pfn_range(vma, vma->vm_start,
195
pfn + vma->vm_pgoff,
196
vma->vm_end - vma->vm_start,
197
vma->vm_page_prot);
198
}
199
EXPORT_SYMBOL_GPL(dma_mmap_coherent);
200
201