Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_ubuf.c
51759 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2025, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/amdxdna_accel.h>
7
#include <drm/drm_device.h>
8
#include <drm/drm_print.h>
9
#include <linux/dma-buf.h>
10
#include <linux/pagemap.h>
11
#include <linux/vmalloc.h>
12
13
#include "amdxdna_pci_drv.h"
14
#include "amdxdna_ubuf.h"
15
16
struct amdxdna_ubuf_priv {
17
struct page **pages;
18
u64 nr_pages;
19
enum amdxdna_ubuf_flag flags;
20
struct mm_struct *mm;
21
};
22
23
static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
24
enum dma_data_direction direction)
25
{
26
struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
27
struct sg_table *sg;
28
int ret;
29
30
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
31
if (!sg)
32
return ERR_PTR(-ENOMEM);
33
34
ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
35
ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
36
if (ret)
37
goto err_free_sg;
38
39
if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
40
ret = dma_map_sgtable(attach->dev, sg, direction, 0);
41
if (ret)
42
goto err_free_table;
43
}
44
45
return sg;
46
47
err_free_table:
48
sg_free_table(sg);
49
err_free_sg:
50
kfree(sg);
51
return ERR_PTR(ret);
52
}
53
54
static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
55
struct sg_table *sg,
56
enum dma_data_direction direction)
57
{
58
struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
59
60
if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
61
dma_unmap_sgtable(attach->dev, sg, direction, 0);
62
63
sg_free_table(sg);
64
kfree(sg);
65
}
66
67
static void amdxdna_ubuf_release(struct dma_buf *dbuf)
68
{
69
struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
70
71
unpin_user_pages(ubuf->pages, ubuf->nr_pages);
72
kvfree(ubuf->pages);
73
atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
74
mmdrop(ubuf->mm);
75
kfree(ubuf);
76
}
77
78
static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
79
{
80
struct vm_area_struct *vma = vmf->vma;
81
struct amdxdna_ubuf_priv *ubuf;
82
unsigned long pfn;
83
pgoff_t pgoff;
84
85
ubuf = vma->vm_private_data;
86
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
87
88
pfn = page_to_pfn(ubuf->pages[pgoff]);
89
return vmf_insert_pfn(vma, vmf->address, pfn);
90
}
91
92
static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
93
.fault = amdxdna_ubuf_vm_fault,
94
};
95
96
static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
97
{
98
struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
99
100
vma->vm_ops = &amdxdna_ubuf_vm_ops;
101
vma->vm_private_data = ubuf;
102
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
103
104
return 0;
105
}
106
107
static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
108
{
109
struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
110
void *kva;
111
112
kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
113
if (!kva)
114
return -EINVAL;
115
116
iosys_map_set_vaddr(map, kva);
117
return 0;
118
}
119
120
static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
121
{
122
vunmap(map->vaddr);
123
}
124
125
static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
126
.map_dma_buf = amdxdna_ubuf_map,
127
.unmap_dma_buf = amdxdna_ubuf_unmap,
128
.release = amdxdna_ubuf_release,
129
.mmap = amdxdna_ubuf_mmap,
130
.vmap = amdxdna_ubuf_vmap,
131
.vunmap = amdxdna_ubuf_vunmap,
132
};
133
134
struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
135
enum amdxdna_ubuf_flag flags,
136
u32 num_entries, void __user *va_entries)
137
{
138
struct amdxdna_dev *xdna = to_xdna_dev(dev);
139
unsigned long lock_limit, new_pinned;
140
struct amdxdna_drm_va_entry *va_ent;
141
struct amdxdna_ubuf_priv *ubuf;
142
u32 npages, start = 0;
143
struct dma_buf *dbuf;
144
int i, ret;
145
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
146
147
if (!can_do_mlock())
148
return ERR_PTR(-EPERM);
149
150
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
151
if (!ubuf)
152
return ERR_PTR(-ENOMEM);
153
154
ubuf->flags = flags;
155
ubuf->mm = current->mm;
156
mmgrab(ubuf->mm);
157
158
va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
159
if (!va_ent) {
160
ret = -ENOMEM;
161
goto free_ubuf;
162
}
163
164
if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
165
XDNA_DBG(xdna, "Access va entries failed");
166
ret = -EINVAL;
167
goto free_ent;
168
}
169
170
for (i = 0, exp_info.size = 0; i < num_entries; i++) {
171
if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
172
!IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
173
XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
174
va_ent[i].vaddr, va_ent[i].len);
175
ret = -EINVAL;
176
goto free_ent;
177
}
178
179
exp_info.size += va_ent[i].len;
180
}
181
182
ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
183
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
184
new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
185
if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
186
XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
187
new_pinned, lock_limit, capable(CAP_IPC_LOCK));
188
ret = -ENOMEM;
189
goto sub_pin_cnt;
190
}
191
192
ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
193
if (!ubuf->pages) {
194
ret = -ENOMEM;
195
goto sub_pin_cnt;
196
}
197
198
for (i = 0; i < num_entries; i++) {
199
npages = va_ent[i].len >> PAGE_SHIFT;
200
201
ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
202
FOLL_WRITE | FOLL_LONGTERM,
203
&ubuf->pages[start]);
204
if (ret < 0 || ret != npages) {
205
ret = -ENOMEM;
206
XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
207
goto destroy_pages;
208
}
209
210
start += ret;
211
}
212
213
exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
214
exp_info.priv = ubuf;
215
exp_info.flags = O_RDWR | O_CLOEXEC;
216
217
dbuf = dma_buf_export(&exp_info);
218
if (IS_ERR(dbuf)) {
219
ret = PTR_ERR(dbuf);
220
goto destroy_pages;
221
}
222
kvfree(va_ent);
223
224
return dbuf;
225
226
destroy_pages:
227
if (start)
228
unpin_user_pages(ubuf->pages, start);
229
kvfree(ubuf->pages);
230
sub_pin_cnt:
231
atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
232
free_ent:
233
kvfree(va_ent);
234
free_ubuf:
235
mmdrop(ubuf->mm);
236
kfree(ubuf);
237
return ERR_PTR(ret);
238
}
239
240