Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma-buf/heaps/cma_heap.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* DMABUF CMA heap exporter
4
*
5
* Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6
* Author: <[email protected]> for ST-Ericsson.
7
*
8
* Also utilizing parts of Andrew Davis' SRAM heap:
9
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10
* Andrew F. Davis <[email protected]>
11
*/
12
13
#define pr_fmt(fmt) "cma_heap: " fmt
14
15
#include <linux/cma.h>
16
#include <linux/dma-buf.h>
17
#include <linux/dma-heap.h>
18
#include <linux/dma-map-ops.h>
19
#include <linux/err.h>
20
#include <linux/highmem.h>
21
#include <linux/io.h>
22
#include <linux/mm.h>
23
#include <linux/module.h>
24
#include <linux/scatterlist.h>
25
#include <linux/slab.h>
26
#include <linux/vmalloc.h>
27
28
#define DEFAULT_CMA_NAME "default_cma_region"
29
30
struct cma_heap {
31
struct dma_heap *heap;
32
struct cma *cma;
33
};
34
35
struct cma_heap_buffer {
36
struct cma_heap *heap;
37
struct list_head attachments;
38
struct mutex lock;
39
unsigned long len;
40
struct page *cma_pages;
41
struct page **pages;
42
pgoff_t pagecount;
43
int vmap_cnt;
44
void *vaddr;
45
};
46
47
struct dma_heap_attachment {
48
struct device *dev;
49
struct sg_table table;
50
struct list_head list;
51
bool mapped;
52
};
53
54
static int cma_heap_attach(struct dma_buf *dmabuf,
55
struct dma_buf_attachment *attachment)
56
{
57
struct cma_heap_buffer *buffer = dmabuf->priv;
58
struct dma_heap_attachment *a;
59
int ret;
60
61
a = kzalloc(sizeof(*a), GFP_KERNEL);
62
if (!a)
63
return -ENOMEM;
64
65
ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
66
buffer->pagecount, 0,
67
buffer->pagecount << PAGE_SHIFT,
68
GFP_KERNEL);
69
if (ret) {
70
kfree(a);
71
return ret;
72
}
73
74
a->dev = attachment->dev;
75
INIT_LIST_HEAD(&a->list);
76
a->mapped = false;
77
78
attachment->priv = a;
79
80
mutex_lock(&buffer->lock);
81
list_add(&a->list, &buffer->attachments);
82
mutex_unlock(&buffer->lock);
83
84
return 0;
85
}
86
87
static void cma_heap_detach(struct dma_buf *dmabuf,
88
struct dma_buf_attachment *attachment)
89
{
90
struct cma_heap_buffer *buffer = dmabuf->priv;
91
struct dma_heap_attachment *a = attachment->priv;
92
93
mutex_lock(&buffer->lock);
94
list_del(&a->list);
95
mutex_unlock(&buffer->lock);
96
97
sg_free_table(&a->table);
98
kfree(a);
99
}
100
101
static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
102
enum dma_data_direction direction)
103
{
104
struct dma_heap_attachment *a = attachment->priv;
105
struct sg_table *table = &a->table;
106
int ret;
107
108
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
109
if (ret)
110
return ERR_PTR(-ENOMEM);
111
a->mapped = true;
112
return table;
113
}
114
115
static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
116
struct sg_table *table,
117
enum dma_data_direction direction)
118
{
119
struct dma_heap_attachment *a = attachment->priv;
120
121
a->mapped = false;
122
dma_unmap_sgtable(attachment->dev, table, direction, 0);
123
}
124
125
static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
126
enum dma_data_direction direction)
127
{
128
struct cma_heap_buffer *buffer = dmabuf->priv;
129
struct dma_heap_attachment *a;
130
131
mutex_lock(&buffer->lock);
132
133
if (buffer->vmap_cnt)
134
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
135
136
list_for_each_entry(a, &buffer->attachments, list) {
137
if (!a->mapped)
138
continue;
139
dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
140
}
141
mutex_unlock(&buffer->lock);
142
143
return 0;
144
}
145
146
static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
147
enum dma_data_direction direction)
148
{
149
struct cma_heap_buffer *buffer = dmabuf->priv;
150
struct dma_heap_attachment *a;
151
152
mutex_lock(&buffer->lock);
153
154
if (buffer->vmap_cnt)
155
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
156
157
list_for_each_entry(a, &buffer->attachments, list) {
158
if (!a->mapped)
159
continue;
160
dma_sync_sgtable_for_device(a->dev, &a->table, direction);
161
}
162
mutex_unlock(&buffer->lock);
163
164
return 0;
165
}
166
167
static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
168
{
169
struct vm_area_struct *vma = vmf->vma;
170
struct cma_heap_buffer *buffer = vma->vm_private_data;
171
172
if (vmf->pgoff >= buffer->pagecount)
173
return VM_FAULT_SIGBUS;
174
175
return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
176
}
177
178
static const struct vm_operations_struct dma_heap_vm_ops = {
179
.fault = cma_heap_vm_fault,
180
};
181
182
static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
183
{
184
struct cma_heap_buffer *buffer = dmabuf->priv;
185
186
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
187
return -EINVAL;
188
189
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
190
191
vma->vm_ops = &dma_heap_vm_ops;
192
vma->vm_private_data = buffer;
193
194
return 0;
195
}
196
197
static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
198
{
199
void *vaddr;
200
201
vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
202
if (!vaddr)
203
return ERR_PTR(-ENOMEM);
204
205
return vaddr;
206
}
207
208
static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
209
{
210
struct cma_heap_buffer *buffer = dmabuf->priv;
211
void *vaddr;
212
int ret = 0;
213
214
mutex_lock(&buffer->lock);
215
if (buffer->vmap_cnt) {
216
buffer->vmap_cnt++;
217
iosys_map_set_vaddr(map, buffer->vaddr);
218
goto out;
219
}
220
221
vaddr = cma_heap_do_vmap(buffer);
222
if (IS_ERR(vaddr)) {
223
ret = PTR_ERR(vaddr);
224
goto out;
225
}
226
buffer->vaddr = vaddr;
227
buffer->vmap_cnt++;
228
iosys_map_set_vaddr(map, buffer->vaddr);
229
out:
230
mutex_unlock(&buffer->lock);
231
232
return ret;
233
}
234
235
static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
236
{
237
struct cma_heap_buffer *buffer = dmabuf->priv;
238
239
mutex_lock(&buffer->lock);
240
if (!--buffer->vmap_cnt) {
241
vunmap(buffer->vaddr);
242
buffer->vaddr = NULL;
243
}
244
mutex_unlock(&buffer->lock);
245
iosys_map_clear(map);
246
}
247
248
static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
249
{
250
struct cma_heap_buffer *buffer = dmabuf->priv;
251
struct cma_heap *cma_heap = buffer->heap;
252
253
if (buffer->vmap_cnt > 0) {
254
WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
255
vunmap(buffer->vaddr);
256
buffer->vaddr = NULL;
257
}
258
259
/* free page list */
260
kfree(buffer->pages);
261
/* release memory */
262
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
263
kfree(buffer);
264
}
265
266
static const struct dma_buf_ops cma_heap_buf_ops = {
267
.attach = cma_heap_attach,
268
.detach = cma_heap_detach,
269
.map_dma_buf = cma_heap_map_dma_buf,
270
.unmap_dma_buf = cma_heap_unmap_dma_buf,
271
.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
272
.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
273
.mmap = cma_heap_mmap,
274
.vmap = cma_heap_vmap,
275
.vunmap = cma_heap_vunmap,
276
.release = cma_heap_dma_buf_release,
277
};
278
279
static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
280
unsigned long len,
281
u32 fd_flags,
282
u64 heap_flags)
283
{
284
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
285
struct cma_heap_buffer *buffer;
286
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
287
size_t size = PAGE_ALIGN(len);
288
pgoff_t pagecount = size >> PAGE_SHIFT;
289
unsigned long align = get_order(size);
290
struct page *cma_pages;
291
struct dma_buf *dmabuf;
292
int ret = -ENOMEM;
293
pgoff_t pg;
294
295
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
296
if (!buffer)
297
return ERR_PTR(-ENOMEM);
298
299
INIT_LIST_HEAD(&buffer->attachments);
300
mutex_init(&buffer->lock);
301
buffer->len = size;
302
303
if (align > CONFIG_CMA_ALIGNMENT)
304
align = CONFIG_CMA_ALIGNMENT;
305
306
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
307
if (!cma_pages)
308
goto free_buffer;
309
310
/* Clear the cma pages */
311
if (PageHighMem(cma_pages)) {
312
unsigned long nr_clear_pages = pagecount;
313
struct page *page = cma_pages;
314
315
while (nr_clear_pages > 0) {
316
void *vaddr = kmap_local_page(page);
317
318
memset(vaddr, 0, PAGE_SIZE);
319
kunmap_local(vaddr);
320
/*
321
* Avoid wasting time zeroing memory if the process
322
* has been killed by SIGKILL.
323
*/
324
if (fatal_signal_pending(current))
325
goto free_cma;
326
page++;
327
nr_clear_pages--;
328
}
329
} else {
330
memset(page_address(cma_pages), 0, size);
331
}
332
333
buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
334
if (!buffer->pages) {
335
ret = -ENOMEM;
336
goto free_cma;
337
}
338
339
for (pg = 0; pg < pagecount; pg++)
340
buffer->pages[pg] = &cma_pages[pg];
341
342
buffer->cma_pages = cma_pages;
343
buffer->heap = cma_heap;
344
buffer->pagecount = pagecount;
345
346
/* create the dmabuf */
347
exp_info.exp_name = dma_heap_get_name(heap);
348
exp_info.ops = &cma_heap_buf_ops;
349
exp_info.size = buffer->len;
350
exp_info.flags = fd_flags;
351
exp_info.priv = buffer;
352
dmabuf = dma_buf_export(&exp_info);
353
if (IS_ERR(dmabuf)) {
354
ret = PTR_ERR(dmabuf);
355
goto free_pages;
356
}
357
return dmabuf;
358
359
free_pages:
360
kfree(buffer->pages);
361
free_cma:
362
cma_release(cma_heap->cma, cma_pages, pagecount);
363
free_buffer:
364
kfree(buffer);
365
366
return ERR_PTR(ret);
367
}
368
369
static const struct dma_heap_ops cma_heap_ops = {
370
.allocate = cma_heap_allocate,
371
};
372
373
static int __init __add_cma_heap(struct cma *cma, const char *name)
374
{
375
struct dma_heap_export_info exp_info;
376
struct cma_heap *cma_heap;
377
378
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
379
if (!cma_heap)
380
return -ENOMEM;
381
cma_heap->cma = cma;
382
383
exp_info.name = name;
384
exp_info.ops = &cma_heap_ops;
385
exp_info.priv = cma_heap;
386
387
cma_heap->heap = dma_heap_add(&exp_info);
388
if (IS_ERR(cma_heap->heap)) {
389
int ret = PTR_ERR(cma_heap->heap);
390
391
kfree(cma_heap);
392
return ret;
393
}
394
395
return 0;
396
}
397
398
static int __init add_default_cma_heap(void)
399
{
400
struct cma *default_cma = dev_get_cma_area(NULL);
401
const char *legacy_cma_name;
402
int ret;
403
404
if (!default_cma)
405
return 0;
406
407
ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
408
if (ret)
409
return ret;
410
411
if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) {
412
legacy_cma_name = cma_get_name(default_cma);
413
if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) {
414
pr_warn("legacy name and default name are the same, skipping legacy heap\n");
415
return 0;
416
}
417
418
ret = __add_cma_heap(default_cma, legacy_cma_name);
419
if (ret)
420
pr_warn("failed to add legacy heap: %pe\n",
421
ERR_PTR(ret));
422
}
423
424
return 0;
425
}
426
module_init(add_default_cma_heap);
427
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
428
429