Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_gem_userptr.c
38186 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020-2025 Intel Corporation
4
*/
5
6
#include <linux/dma-buf.h>
7
#include <linux/err.h>
8
#include <linux/highmem.h>
9
#include <linux/mm.h>
10
#include <linux/mman.h>
11
#include <linux/scatterlist.h>
12
#include <linux/slab.h>
13
#include <linux/capability.h>
14
15
#include <drm/drm_device.h>
16
#include <drm/drm_file.h>
17
#include <drm/drm_gem.h>
18
19
#include "ivpu_drv.h"
20
#include "ivpu_gem.h"
21
22
static struct sg_table *
23
ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment,
24
enum dma_data_direction direction)
25
{
26
struct sg_table *sgt = attachment->dmabuf->priv;
27
int ret;
28
29
ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
30
if (ret)
31
return ERR_PTR(ret);
32
33
return sgt;
34
}
35
36
static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment,
37
struct sg_table *sgt,
38
enum dma_data_direction direction)
39
{
40
dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
41
}
42
43
static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf)
44
{
45
struct sg_table *sgt = dma_buf->priv;
46
struct sg_page_iter page_iter;
47
struct page *page;
48
49
for_each_sgtable_page(sgt, &page_iter, 0) {
50
page = sg_page_iter_page(&page_iter);
51
unpin_user_page(page);
52
}
53
54
sg_free_table(sgt);
55
kfree(sgt);
56
}
57
58
static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = {
59
.map_dma_buf = ivpu_gem_userptr_dmabuf_map,
60
.unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap,
61
.release = ivpu_gem_userptr_dmabuf_release,
62
};
63
64
static struct dma_buf *
65
ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
66
size_t size, uint32_t flags)
67
{
68
struct dma_buf_export_info exp_info = {};
69
struct dma_buf *dma_buf;
70
struct sg_table *sgt;
71
struct page **pages;
72
unsigned long nr_pages = size >> PAGE_SHIFT;
73
unsigned int gup_flags = FOLL_LONGTERM;
74
int ret, i, pinned;
75
76
/* Add FOLL_WRITE only if the BO is not read-only */
77
if (!(flags & DRM_IVPU_BO_READ_ONLY))
78
gup_flags |= FOLL_WRITE;
79
80
pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
81
if (!pages)
82
return ERR_PTR(-ENOMEM);
83
84
pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages);
85
if (pinned < 0) {
86
ret = pinned;
87
ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret);
88
goto free_pages_array;
89
}
90
91
if (pinned != nr_pages) {
92
ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
93
ret = -EFAULT;
94
goto unpin_pages;
95
}
96
97
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
98
if (!sgt) {
99
ret = -ENOMEM;
100
goto unpin_pages;
101
}
102
103
ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL);
104
if (ret) {
105
ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret);
106
goto free_sgt;
107
}
108
109
exp_info.exp_name = "ivpu_userptr_dmabuf";
110
exp_info.owner = THIS_MODULE;
111
exp_info.ops = &ivpu_gem_userptr_dmabuf_ops;
112
exp_info.size = size;
113
exp_info.flags = O_RDWR | O_CLOEXEC;
114
exp_info.priv = sgt;
115
116
dma_buf = dma_buf_export(&exp_info);
117
if (IS_ERR(dma_buf)) {
118
ret = PTR_ERR(dma_buf);
119
ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret);
120
goto free_sg_table;
121
}
122
123
kvfree(pages);
124
return dma_buf;
125
126
free_sg_table:
127
sg_free_table(sgt);
128
free_sgt:
129
kfree(sgt);
130
unpin_pages:
131
for (i = 0; i < pinned; i++)
132
unpin_user_page(pages[i]);
133
free_pages_array:
134
kvfree(pages);
135
return ERR_PTR(ret);
136
}
137
138
static struct ivpu_bo *
139
ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr,
140
size_t size, uint32_t flags)
141
{
142
struct dma_buf *dma_buf;
143
struct drm_gem_object *obj;
144
struct ivpu_bo *bo;
145
146
dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags);
147
if (IS_ERR(dma_buf))
148
return ERR_CAST(dma_buf);
149
150
obj = ivpu_gem_prime_import(&vdev->drm, dma_buf);
151
if (IS_ERR(obj)) {
152
dma_buf_put(dma_buf);
153
return ERR_CAST(obj);
154
}
155
156
dma_buf_put(dma_buf);
157
158
bo = to_ivpu_bo(obj);
159
bo->flags = flags;
160
161
return bo;
162
}
163
164
int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
165
{
166
struct drm_ivpu_bo_create_from_userptr *args = data;
167
struct ivpu_file_priv *file_priv = file->driver_priv;
168
struct ivpu_device *vdev = to_ivpu_device(dev);
169
void __user *user_ptr = u64_to_user_ptr(args->user_ptr);
170
struct ivpu_bo *bo;
171
int ret;
172
173
if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) {
174
ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags);
175
return -EINVAL;
176
}
177
178
if (!args->user_ptr || !args->size) {
179
ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n",
180
args->user_ptr, args->size);
181
return -EINVAL;
182
}
183
184
if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) {
185
ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n",
186
args->user_ptr, args->size);
187
return -EINVAL;
188
}
189
190
if (!access_ok(user_ptr, args->size)) {
191
ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n",
192
args->user_ptr, args->size);
193
return -EFAULT;
194
}
195
196
bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags);
197
if (IS_ERR(bo))
198
return PTR_ERR(bo);
199
200
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
201
if (ret) {
202
ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
203
bo, file_priv->ctx.id, args->size, args->flags);
204
} else {
205
ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",
206
args->handle, bo->vpu_addr, args->size, bo->flags);
207
args->vpu_addr = bo->vpu_addr;
208
}
209
210
drm_gem_object_put(&bo->base.base);
211
212
return ret;
213
}
214
215