Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/drm/freedreno_bo.c
4564 views
1
/*
2
* Copyright (C) 2012-2018 Rob Clark <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
*/
26
27
#include "os/os_mman.h"
28
29
#include "freedreno_drmif.h"
30
#include "freedreno_priv.h"
31
32
simple_mtx_t table_lock = _SIMPLE_MTX_INITIALIZER_NP;
33
void bo_del(struct fd_bo *bo);
34
35
/* set buffer name, and add to table, call w/ table_lock held: */
36
static void
37
set_name(struct fd_bo *bo, uint32_t name)
38
{
39
bo->name = name;
40
/* add ourself into the handle table: */
41
_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
42
}
43
44
/* lookup a buffer, call w/ table_lock held: */
45
static struct fd_bo *
46
lookup_bo(struct hash_table *tbl, uint32_t key)
47
{
48
struct fd_bo *bo = NULL;
49
struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);
50
if (entry) {
51
/* found, incr refcnt and return: */
52
bo = fd_bo_ref(entry->data);
53
54
/* don't break the bucket if this bo was found in one */
55
list_delinit(&bo->list);
56
}
57
return bo;
58
}
59
60
/* allocate a new buffer object, call w/ table_lock held */
61
static struct fd_bo *
62
bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
63
{
64
struct fd_bo *bo;
65
66
simple_mtx_assert_locked(&table_lock);
67
68
bo = dev->funcs->bo_from_handle(dev, size, handle);
69
if (!bo) {
70
struct drm_gem_close req = {
71
.handle = handle,
72
};
73
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
74
return NULL;
75
}
76
bo->dev = dev;
77
bo->size = size;
78
bo->handle = handle;
79
bo->iova = bo->funcs->iova(bo);
80
bo->flags = FD_RELOC_FLAGS_INIT;
81
82
p_atomic_set(&bo->refcnt, 1);
83
list_inithead(&bo->list);
84
/* add ourself into the handle table: */
85
_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
86
return bo;
87
}
88
89
static struct fd_bo *
90
bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
91
struct fd_bo_cache *cache)
92
{
93
struct fd_bo *bo = NULL;
94
uint32_t handle;
95
int ret;
96
97
bo = fd_bo_cache_alloc(cache, &size, flags);
98
if (bo)
99
return bo;
100
101
ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
102
if (ret)
103
return NULL;
104
105
simple_mtx_lock(&table_lock);
106
bo = bo_from_handle(dev, size, handle);
107
simple_mtx_unlock(&table_lock);
108
109
bo->max_fences = 1;
110
bo->fences = &bo->_inline_fence;
111
112
VG_BO_ALLOC(bo);
113
114
return bo;
115
}
116
117
struct fd_bo *
118
_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
119
{
120
struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
121
if (bo)
122
bo->bo_reuse = BO_CACHE;
123
return bo;
124
}
125
126
void
127
_fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
128
{
129
bo->funcs->set_name(bo, fmt, ap);
130
}
131
132
/* internal function to allocate bo's that use the ringbuffer cache
133
* instead of the normal bo_cache. The purpose is, because cmdstream
134
* bo's get vmap'd on the kernel side, and that is expensive, we want
135
* to re-use cmdstream bo's for cmdstream and not unrelated purposes.
136
*/
137
struct fd_bo *
138
fd_bo_new_ring(struct fd_device *dev, uint32_t size)
139
{
140
uint32_t flags = FD_BO_GPUREADONLY;
141
struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
142
if (bo) {
143
bo->bo_reuse = RING_CACHE;
144
bo->flags |= FD_RELOC_DUMP;
145
fd_bo_set_name(bo, "cmdstream");
146
}
147
return bo;
148
}
149
150
struct fd_bo *
151
fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
152
{
153
struct fd_bo *bo = NULL;
154
155
simple_mtx_lock(&table_lock);
156
157
bo = lookup_bo(dev->handle_table, handle);
158
if (bo)
159
goto out_unlock;
160
161
bo = bo_from_handle(dev, size, handle);
162
163
VG_BO_ALLOC(bo);
164
165
out_unlock:
166
simple_mtx_unlock(&table_lock);
167
168
return bo;
169
}
170
171
struct fd_bo *
172
fd_bo_from_dmabuf(struct fd_device *dev, int fd)
173
{
174
int ret, size;
175
uint32_t handle;
176
struct fd_bo *bo;
177
178
simple_mtx_lock(&table_lock);
179
ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
180
if (ret) {
181
simple_mtx_unlock(&table_lock);
182
return NULL;
183
}
184
185
bo = lookup_bo(dev->handle_table, handle);
186
if (bo)
187
goto out_unlock;
188
189
/* lseek() to get bo size */
190
size = lseek(fd, 0, SEEK_END);
191
lseek(fd, 0, SEEK_CUR);
192
193
bo = bo_from_handle(dev, size, handle);
194
195
VG_BO_ALLOC(bo);
196
197
out_unlock:
198
simple_mtx_unlock(&table_lock);
199
200
return bo;
201
}
202
203
struct fd_bo *
204
fd_bo_from_name(struct fd_device *dev, uint32_t name)
205
{
206
struct drm_gem_open req = {
207
.name = name,
208
};
209
struct fd_bo *bo;
210
211
simple_mtx_lock(&table_lock);
212
213
/* check name table first, to see if bo is already open: */
214
bo = lookup_bo(dev->name_table, name);
215
if (bo)
216
goto out_unlock;
217
218
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
219
ERROR_MSG("gem-open failed: %s", strerror(errno));
220
goto out_unlock;
221
}
222
223
bo = lookup_bo(dev->handle_table, req.handle);
224
if (bo)
225
goto out_unlock;
226
227
bo = bo_from_handle(dev, req.size, req.handle);
228
if (bo) {
229
set_name(bo, name);
230
VG_BO_ALLOC(bo);
231
}
232
233
out_unlock:
234
simple_mtx_unlock(&table_lock);
235
236
return bo;
237
}
238
239
void
240
fd_bo_mark_for_dump(struct fd_bo *bo)
241
{
242
bo->flags |= FD_RELOC_DUMP;
243
}
244
245
uint64_t
246
fd_bo_get_iova(struct fd_bo *bo)
247
{
248
/* ancient kernels did not support this */
249
assert(bo->iova != 0);
250
return bo->iova;
251
}
252
253
struct fd_bo *
254
fd_bo_ref(struct fd_bo *bo)
255
{
256
p_atomic_inc(&bo->refcnt);
257
return bo;
258
}
259
260
static void
261
bo_del_or_recycle(struct fd_bo *bo)
262
{
263
struct fd_device *dev = bo->dev;
264
265
simple_mtx_assert_locked(&table_lock);
266
267
if ((bo->bo_reuse == BO_CACHE) &&
268
(fd_bo_cache_free(&dev->bo_cache, bo) == 0))
269
return;
270
271
if ((bo->bo_reuse == RING_CACHE) &&
272
(fd_bo_cache_free(&dev->ring_cache, bo) == 0))
273
return;
274
275
bo_del(bo);
276
}
277
278
void
279
fd_bo_del_locked(struct fd_bo *bo)
280
{
281
simple_mtx_assert_locked(&table_lock);
282
283
if (!p_atomic_dec_zero(&bo->refcnt))
284
return;
285
286
bo_del_or_recycle(bo);
287
}
288
289
void
290
fd_bo_del(struct fd_bo *bo)
291
{
292
if (!p_atomic_dec_zero(&bo->refcnt))
293
return;
294
295
simple_mtx_lock(&table_lock);
296
bo_del_or_recycle(bo);
297
simple_mtx_unlock(&table_lock);
298
}
299
300
/**
301
* Cleanup fences, dropping pipe references. If 'expired' is true, only
302
* cleanup expired fences.
303
*
304
* Normally we expect at most a single fence, the exception being bo's
305
* shared between contexts
306
*/
307
static void
308
cleanup_fences(struct fd_bo *bo, bool expired)
309
{
310
simple_mtx_assert_locked(&table_lock);
311
312
for (int i = 0; i < bo->nr_fences; i++) {
313
struct fd_bo_fence *f = &bo->fences[i];
314
315
if (expired && fd_fence_before(f->pipe->control->fence, f->fence))
316
continue;
317
318
fd_pipe_del_locked(f->pipe);
319
bo->nr_fences--;
320
321
if (bo->nr_fences > 0) {
322
/* Shuffle up the last entry to replace the current slot: */
323
bo->fences[i] = bo->fences[bo->nr_fences];
324
i--;
325
}
326
}
327
}
328
329
/* Called under table_lock */
330
void
331
bo_del(struct fd_bo *bo)
332
{
333
VG_BO_FREE(bo);
334
335
simple_mtx_assert_locked(&table_lock);
336
337
cleanup_fences(bo, false);
338
if (bo->fences != &bo->_inline_fence)
339
free(bo->fences);
340
341
if (bo->map)
342
os_munmap(bo->map, bo->size);
343
344
/* TODO probably bo's in bucket list get removed from
345
* handle table??
346
*/
347
348
if (bo->handle) {
349
struct drm_gem_close req = {
350
.handle = bo->handle,
351
};
352
_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
353
if (bo->name)
354
_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
355
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
356
}
357
358
bo->funcs->destroy(bo);
359
}
360
361
static void
362
bo_flush(struct fd_bo *bo)
363
{
364
for (int i = 0; i < bo->nr_fences; i++) {
365
struct fd_bo_fence *f = &bo->fences[i];
366
fd_pipe_flush(f->pipe, f->fence);
367
}
368
}
369
370
int
371
fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
372
{
373
if (!bo->name) {
374
struct drm_gem_flink req = {
375
.handle = bo->handle,
376
};
377
int ret;
378
379
ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
380
if (ret) {
381
return ret;
382
}
383
384
simple_mtx_lock(&table_lock);
385
set_name(bo, req.name);
386
simple_mtx_unlock(&table_lock);
387
bo->bo_reuse = NO_CACHE;
388
bo->shared = true;
389
bo_flush(bo);
390
}
391
392
*name = bo->name;
393
394
return 0;
395
}
396
397
uint32_t
398
fd_bo_handle(struct fd_bo *bo)
399
{
400
bo->bo_reuse = NO_CACHE;
401
bo->shared = true;
402
bo_flush(bo);
403
return bo->handle;
404
}
405
406
int
407
fd_bo_dmabuf(struct fd_bo *bo)
408
{
409
int ret, prime_fd;
410
411
ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &prime_fd);
412
if (ret) {
413
ERROR_MSG("failed to get dmabuf fd: %d", ret);
414
return ret;
415
}
416
417
bo->bo_reuse = NO_CACHE;
418
bo->shared = true;
419
bo_flush(bo);
420
421
return prime_fd;
422
}
423
424
uint32_t
425
fd_bo_size(struct fd_bo *bo)
426
{
427
return bo->size;
428
}
429
430
void *
431
fd_bo_map(struct fd_bo *bo)
432
{
433
if (!bo->map) {
434
uint64_t offset;
435
int ret;
436
437
ret = bo->funcs->offset(bo, &offset);
438
if (ret) {
439
return NULL;
440
}
441
442
bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
443
bo->dev->fd, offset);
444
if (bo->map == MAP_FAILED) {
445
ERROR_MSG("mmap failed: %s", strerror(errno));
446
bo->map = NULL;
447
}
448
}
449
return bo->map;
450
}
451
452
/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
453
int
454
fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
455
{
456
if (op & (FD_BO_PREP_NOSYNC | FD_BO_PREP_FLUSH)) {
457
simple_mtx_lock(&table_lock);
458
enum fd_bo_state state = fd_bo_state(bo);
459
simple_mtx_unlock(&table_lock);
460
461
if (state == FD_BO_STATE_IDLE)
462
return 0;
463
464
if (op & FD_BO_PREP_FLUSH)
465
bo_flush(bo);
466
467
/* If we have *only* been asked to flush, then we aren't really
468
* interested about whether shared buffers are busy, so avoid
469
* the kernel ioctl.
470
*/
471
if ((state == FD_BO_STATE_BUSY) ||
472
(op == FD_BO_PREP_FLUSH))
473
return -EBUSY;
474
}
475
476
/* In case the bo is referenced by a deferred submit, flush up to the
477
* required fence now:
478
*/
479
bo_flush(bo);
480
481
/* FD_BO_PREP_FLUSH is purely a frontend flag, and is not seen/handled
482
* by backend or kernel:
483
*/
484
return bo->funcs->cpu_prep(bo, pipe, op & ~FD_BO_PREP_FLUSH);
485
}
486
487
void
488
fd_bo_cpu_fini(struct fd_bo *bo)
489
{
490
// TODO until we have cached buffers, the kernel side ioctl does nothing,
491
// so just skip it. When we have cached buffers, we can make the
492
// ioctl conditional
493
// bo->funcs->cpu_fini(bo);
494
}
495
496
void
497
fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence)
498
{
499
simple_mtx_assert_locked(&table_lock);
500
501
if (bo->nosync)
502
return;
503
504
/* The common case is bo re-used on the same pipe it had previously
505
* been used on:
506
*/
507
for (int i = 0; i < bo->nr_fences; i++) {
508
struct fd_bo_fence *f = &bo->fences[i];
509
if (f->pipe == pipe) {
510
assert(fd_fence_before(f->fence, fence));
511
f->fence = fence;
512
return;
513
}
514
}
515
516
cleanup_fences(bo, true);
517
518
/* The first time we grow past a single fence, we need some special
519
* handling, as we've been using the embedded _inline_fence to avoid
520
* a separate allocation:
521
*/
522
if (unlikely((bo->nr_fences == 1) &&
523
(bo->fences == &bo->_inline_fence))) {
524
bo->nr_fences = bo->max_fences = 0;
525
bo->fences = NULL;
526
APPEND(bo, fences, bo->_inline_fence);
527
}
528
529
APPEND(bo, fences, (struct fd_bo_fence){
530
.pipe = fd_pipe_ref_locked(pipe),
531
.fence = fence,
532
});
533
}
534
535
enum fd_bo_state
536
fd_bo_state(struct fd_bo *bo)
537
{
538
simple_mtx_assert_locked(&table_lock);
539
540
cleanup_fences(bo, true);
541
542
if (bo->shared || bo->nosync)
543
return FD_BO_STATE_UNKNOWN;
544
545
if (!bo->nr_fences)
546
return FD_BO_STATE_IDLE;
547
548
return FD_BO_STATE_BUSY;
549
}
550
551
552