Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_bufmgr.c
4565 views
1
/*
2
* Copyright © 2017 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included
12
* in all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
* DEALINGS IN THE SOFTWARE.
21
*/
22
23
/**
24
* @file iris_bufmgr.c
25
*
26
* The Iris buffer manager.
27
*
28
* XXX: write better comments
29
* - BOs
30
* - Explain BO cache
31
* - main interface to GEM in the kernel
32
*/
33
34
#include <xf86drm.h>
35
#include <util/u_atomic.h>
36
#include <fcntl.h>
37
#include <stdio.h>
38
#include <stdlib.h>
39
#include <string.h>
40
#include <unistd.h>
41
#include <assert.h>
42
#include <sys/ioctl.h>
43
#include <sys/mman.h>
44
#include <sys/stat.h>
45
#include <sys/types.h>
46
#include <stdbool.h>
47
#include <time.h>
48
#include <unistd.h>
49
50
#include "errno.h"
51
#include "common/intel_aux_map.h"
52
#include "common/intel_clflush.h"
53
#include "dev/intel_debug.h"
54
#include "common/intel_gem.h"
55
#include "dev/intel_device_info.h"
56
#include "isl/isl.h"
57
#include "main/macros.h"
58
#include "os/os_mman.h"
59
#include "util/debug.h"
60
#include "util/macros.h"
61
#include "util/hash_table.h"
62
#include "util/list.h"
63
#include "util/os_file.h"
64
#include "util/u_dynarray.h"
65
#include "util/vma.h"
66
#include "iris_bufmgr.h"
67
#include "iris_context.h"
68
#include "string.h"
69
70
#include "drm-uapi/i915_drm.h"
71
72
#ifdef HAVE_VALGRIND
73
#include <valgrind.h>
74
#include <memcheck.h>
75
#define VG(x) x
76
#else
77
#define VG(x)
78
#endif
79
80
/* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
81
* VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
82
* leaked. All because it does not call VG(cli_free) from its
83
* VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
84
* and allocation, we mark it available for use upon mmapping and remove
85
* it upon unmapping.
86
*/
87
#define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
88
#define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
89
90
/* On FreeBSD PAGE_SIZE is already defined in
91
* /usr/include/machine/param.h that is indirectly
92
* included here.
93
*/
94
#ifndef PAGE_SIZE
95
#define PAGE_SIZE 4096
96
#endif
97
98
#define WARN_ONCE(cond, fmt...) do { \
99
if (unlikely(cond)) { \
100
static bool _warned = false; \
101
if (!_warned) { \
102
fprintf(stderr, "WARNING: "); \
103
fprintf(stderr, fmt); \
104
_warned = true; \
105
} \
106
} \
107
} while (0)
108
109
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
110
111
/**
112
* For debugging purposes, this returns a time in seconds.
113
*/
114
static double
115
get_time(void)
116
{
117
struct timespec tp;
118
119
clock_gettime(CLOCK_MONOTONIC, &tp);
120
121
return tp.tv_sec + tp.tv_nsec / 1000000000.0;
122
}
123
124
static inline int
125
atomic_add_unless(int *v, int add, int unless)
126
{
127
int c, old;
128
c = p_atomic_read(v);
129
while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
130
c = old;
131
return c == unless;
132
}
133
134
static const char *
135
memzone_name(enum iris_memory_zone memzone)
136
{
137
const char *names[] = {
138
[IRIS_MEMZONE_SHADER] = "shader",
139
[IRIS_MEMZONE_BINDER] = "binder",
140
[IRIS_MEMZONE_BINDLESS] = "scratchsurf",
141
[IRIS_MEMZONE_SURFACE] = "surface",
142
[IRIS_MEMZONE_DYNAMIC] = "dynamic",
143
[IRIS_MEMZONE_OTHER] = "other",
144
[IRIS_MEMZONE_BORDER_COLOR_POOL] = "bordercolor",
145
};
146
assert(memzone < ARRAY_SIZE(names));
147
return names[memzone];
148
}
149
150
struct bo_cache_bucket {
151
/** List of cached BOs. */
152
struct list_head head;
153
154
/** Size of this bucket, in bytes. */
155
uint64_t size;
156
};
157
158
struct bo_export {
159
/** File descriptor associated with a handle export. */
160
int drm_fd;
161
162
/** GEM handle in drm_fd */
163
uint32_t gem_handle;
164
165
struct list_head link;
166
};
167
168
struct iris_memregion {
169
struct drm_i915_gem_memory_class_instance region;
170
uint64_t size;
171
};
172
173
struct iris_bufmgr {
174
/**
175
* List into the list of bufmgr.
176
*/
177
struct list_head link;
178
179
uint32_t refcount;
180
181
int fd;
182
183
simple_mtx_t lock;
184
185
/** Array of lists of cached gem objects of power-of-two sizes */
186
struct bo_cache_bucket cache_bucket[14 * 4];
187
int num_buckets;
188
189
/** Same as cache_bucket, but for local memory gem objects */
190
struct bo_cache_bucket local_cache_bucket[14 * 4];
191
int num_local_buckets;
192
193
time_t time;
194
195
struct hash_table *name_table;
196
struct hash_table *handle_table;
197
198
/**
199
* List of BOs which we've effectively freed, but are hanging on to
200
* until they're idle before closing and returning the VMA.
201
*/
202
struct list_head zombie_list;
203
204
struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
205
206
uint64_t vma_min_align;
207
struct iris_memregion vram, sys;
208
209
bool has_llc:1;
210
bool has_mmap_offset:1;
211
bool has_tiling_uapi:1;
212
bool bo_reuse:1;
213
214
struct intel_aux_map_context *aux_map_ctx;
215
};
216
217
static simple_mtx_t global_bufmgr_list_mutex = _SIMPLE_MTX_INITIALIZER_NP;
218
static struct list_head global_bufmgr_list = {
219
.next = &global_bufmgr_list,
220
.prev = &global_bufmgr_list,
221
};
222
223
static void bo_free(struct iris_bo *bo);
224
225
static struct iris_bo *
226
find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
227
{
228
struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
229
struct iris_bo *bo = entry ? entry->data : NULL;
230
231
if (bo) {
232
assert(iris_bo_is_external(bo));
233
assert(!bo->reusable);
234
235
/* Being non-reusable, the BO cannot be in the cache lists, but it
236
* may be in the zombie list if it had reached zero references, but
237
* we hadn't yet closed it...and then reimported the same BO. If it
238
* is, then remove it since it's now been resurrected.
239
*/
240
if (list_is_linked(&bo->head))
241
list_del(&bo->head);
242
243
iris_bo_reference(bo);
244
}
245
246
return bo;
247
}
248
249
/**
250
* This function finds the correct bucket fit for the input size.
251
* The function works with O(1) complexity when the requested size
252
* was queried instead of iterating the size through all the buckets.
253
*/
254
static struct bo_cache_bucket *
255
bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size, bool local)
256
{
257
/* Calculating the pages and rounding up to the page size. */
258
const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
259
260
/* Row Bucket sizes clz((x-1) | 3) Row Column
261
* in pages stride size
262
* 0: 1 2 3 4 -> 30 30 30 30 4 1
263
* 1: 5 6 7 8 -> 29 29 29 29 4 1
264
* 2: 10 12 14 16 -> 28 28 28 28 8 2
265
* 3: 20 24 28 32 -> 27 27 27 27 16 4
266
*/
267
const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
268
const unsigned row_max_pages = 4 << row;
269
270
/* The '& ~2' is the special case for row 1. In row 1, max pages /
271
* 2 is 2, but the previous row maximum is zero (because there is
272
* no previous row). All row maximum sizes are power of 2, so that
273
* is the only case where that bit will be set.
274
*/
275
const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
276
int col_size_log2 = row - 1;
277
col_size_log2 += (col_size_log2 < 0);
278
279
const unsigned col = (pages - prev_row_max_pages +
280
((1 << col_size_log2) - 1)) >> col_size_log2;
281
282
/* Calculating the index based on the row and column. */
283
const unsigned index = (row * 4) + (col - 1);
284
285
int num_buckets = local ? bufmgr->num_local_buckets : bufmgr->num_buckets;
286
struct bo_cache_bucket *buckets = local ?
287
bufmgr->local_cache_bucket : bufmgr->cache_bucket;
288
289
return (index < num_buckets) ? &buckets[index] : NULL;
290
}
291
292
enum iris_memory_zone
293
iris_memzone_for_address(uint64_t address)
294
{
295
STATIC_ASSERT(IRIS_MEMZONE_OTHER_START > IRIS_MEMZONE_DYNAMIC_START);
296
STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START > IRIS_MEMZONE_SURFACE_START);
297
STATIC_ASSERT(IRIS_MEMZONE_SURFACE_START > IRIS_MEMZONE_BINDLESS_START);
298
STATIC_ASSERT(IRIS_MEMZONE_BINDLESS_START > IRIS_MEMZONE_BINDER_START);
299
STATIC_ASSERT(IRIS_MEMZONE_BINDER_START > IRIS_MEMZONE_SHADER_START);
300
STATIC_ASSERT(IRIS_BORDER_COLOR_POOL_ADDRESS == IRIS_MEMZONE_DYNAMIC_START);
301
302
if (address >= IRIS_MEMZONE_OTHER_START)
303
return IRIS_MEMZONE_OTHER;
304
305
if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
306
return IRIS_MEMZONE_BORDER_COLOR_POOL;
307
308
if (address > IRIS_MEMZONE_DYNAMIC_START)
309
return IRIS_MEMZONE_DYNAMIC;
310
311
if (address >= IRIS_MEMZONE_SURFACE_START)
312
return IRIS_MEMZONE_SURFACE;
313
314
if (address >= IRIS_MEMZONE_BINDLESS_START)
315
return IRIS_MEMZONE_BINDLESS;
316
317
if (address >= IRIS_MEMZONE_BINDER_START)
318
return IRIS_MEMZONE_BINDER;
319
320
return IRIS_MEMZONE_SHADER;
321
}
322
323
/**
324
* Allocate a section of virtual memory for a buffer, assigning an address.
325
*
326
* This uses either the bucket allocator for the given size, or the large
327
* object allocator (util_vma).
328
*/
329
static uint64_t
330
vma_alloc(struct iris_bufmgr *bufmgr,
331
enum iris_memory_zone memzone,
332
uint64_t size,
333
uint64_t alignment)
334
{
335
/* Force minimum alignment based on device requirements */
336
assert((alignment & (alignment - 1)) == 0);
337
alignment = MAX2(alignment, bufmgr->vma_min_align);
338
339
if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
340
return IRIS_BORDER_COLOR_POOL_ADDRESS;
341
342
/* The binder handles its own allocations. Return non-zero here. */
343
if (memzone == IRIS_MEMZONE_BINDER)
344
return IRIS_MEMZONE_BINDER_START;
345
346
uint64_t addr =
347
util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size, alignment);
348
349
assert((addr >> 48ull) == 0);
350
assert((addr % alignment) == 0);
351
352
return intel_canonical_address(addr);
353
}
354
355
static void
356
vma_free(struct iris_bufmgr *bufmgr,
357
uint64_t address,
358
uint64_t size)
359
{
360
if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
361
return;
362
363
/* Un-canonicalize the address. */
364
address = intel_48b_address(address);
365
366
if (address == 0ull)
367
return;
368
369
enum iris_memory_zone memzone = iris_memzone_for_address(address);
370
371
/* The binder handles its own allocations. */
372
if (memzone == IRIS_MEMZONE_BINDER)
373
return;
374
375
assert(memzone < ARRAY_SIZE(bufmgr->vma_allocator));
376
377
util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
378
}
379
380
int
381
iris_bo_busy(struct iris_bo *bo)
382
{
383
struct iris_bufmgr *bufmgr = bo->bufmgr;
384
struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
385
386
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
387
if (ret == 0) {
388
bo->idle = !busy.busy;
389
return busy.busy;
390
}
391
return false;
392
}
393
394
int
395
iris_bo_madvise(struct iris_bo *bo, int state)
396
{
397
struct drm_i915_gem_madvise madv = {
398
.handle = bo->gem_handle,
399
.madv = state,
400
.retained = 1,
401
};
402
403
intel_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
404
405
return madv.retained;
406
}
407
408
static struct iris_bo *
409
bo_calloc(void)
410
{
411
struct iris_bo *bo = calloc(1, sizeof(*bo));
412
if (!bo)
413
return NULL;
414
415
list_inithead(&bo->exports);
416
417
bo->hash = _mesa_hash_pointer(bo);
418
419
return bo;
420
}
421
422
static void
423
bo_unmap(struct iris_bo *bo)
424
{
425
VG_NOACCESS(bo->map, bo->size);
426
os_munmap(bo->map, bo->size);
427
bo->map = NULL;
428
}
429
430
static struct iris_bo *
431
alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
432
struct bo_cache_bucket *bucket,
433
uint32_t alignment,
434
enum iris_memory_zone memzone,
435
enum iris_mmap_mode mmap_mode,
436
unsigned flags,
437
bool match_zone)
438
{
439
if (!bucket)
440
return NULL;
441
442
struct iris_bo *bo = NULL;
443
444
list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
445
/* Find one that's got the right mapping type. We used to swap maps
446
* around but the kernel doesn't allow this on discrete GPUs.
447
*/
448
if (mmap_mode != cur->mmap_mode)
449
continue;
450
451
/* Try a little harder to find one that's already in the right memzone */
452
if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
453
continue;
454
455
/* If the last BO in the cache is busy, there are no idle BOs. Bail,
456
* either falling back to a non-matching memzone, or if that fails,
457
* allocating a fresh buffer.
458
*/
459
if (iris_bo_busy(cur))
460
return NULL;
461
462
list_del(&cur->head);
463
464
/* Tell the kernel we need this BO. If it still exists, we're done! */
465
if (iris_bo_madvise(cur, I915_MADV_WILLNEED)) {
466
bo = cur;
467
break;
468
}
469
470
/* This BO was purged, throw it out and keep looking. */
471
bo_free(cur);
472
}
473
474
if (!bo)
475
return NULL;
476
477
if (bo->aux_map_address) {
478
/* This buffer was associated with an aux-buffer range. We make sure
479
* that buffers are not reused from the cache while the buffer is (busy)
480
* being used by an executing batch. Since we are here, the buffer is no
481
* longer being used by a batch and the buffer was deleted (in order to
482
* end up in the cache). Therefore its old aux-buffer range can be
483
* removed from the aux-map.
484
*/
485
if (bo->bufmgr->aux_map_ctx)
486
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
487
bo->size);
488
bo->aux_map_address = 0;
489
}
490
491
/* If the cached BO isn't in the right memory zone, or the alignment
492
* isn't sufficient, free the old memory and assign it a new address.
493
*/
494
if (memzone != iris_memzone_for_address(bo->gtt_offset) ||
495
bo->gtt_offset % alignment != 0) {
496
vma_free(bufmgr, bo->gtt_offset, bo->size);
497
bo->gtt_offset = 0ull;
498
}
499
500
/* Zero the contents if necessary. If this fails, fall back to
501
* allocating a fresh BO, which will always be zeroed by the kernel.
502
*/
503
if (flags & BO_ALLOC_ZEROED) {
504
void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
505
if (map) {
506
memset(map, 0, bo->size);
507
} else {
508
bo_free(bo);
509
return NULL;
510
}
511
}
512
513
return bo;
514
}
515
516
static struct iris_bo *
517
alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, bool local)
518
{
519
struct iris_bo *bo = bo_calloc();
520
if (!bo)
521
return NULL;
522
523
/* If we have vram size, we have multiple memory regions and should choose
524
* one of them.
525
*/
526
if (bufmgr->vram.size > 0) {
527
/* All new BOs we get from the kernel are zeroed, so we don't need to
528
* worry about that here.
529
*/
530
struct drm_i915_gem_memory_class_instance regions[2];
531
uint32_t nregions = 0;
532
if (local) {
533
/* For vram allocations, still use system memory as a fallback. */
534
regions[nregions++] = bufmgr->vram.region;
535
regions[nregions++] = bufmgr->sys.region;
536
} else {
537
regions[nregions++] = bufmgr->sys.region;
538
}
539
540
struct drm_i915_gem_create_ext_memory_regions ext_regions = {
541
.base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
542
.num_regions = nregions,
543
.regions = (uintptr_t)regions,
544
};
545
546
struct drm_i915_gem_create_ext create = {
547
.size = bo_size,
548
.extensions = (uintptr_t)&ext_regions,
549
};
550
551
/* It should be safe to use GEM_CREATE_EXT without checking, since we are
552
* in the side of the branch where discrete memory is available. So we
553
* can assume GEM_CREATE_EXT is supported already.
554
*/
555
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create) != 0) {
556
free(bo);
557
return NULL;
558
}
559
bo->gem_handle = create.handle;
560
} else {
561
struct drm_i915_gem_create create = { .size = bo_size };
562
563
/* All new BOs we get from the kernel are zeroed, so we don't need to
564
* worry about that here.
565
*/
566
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
567
free(bo);
568
return NULL;
569
}
570
bo->gem_handle = create.handle;
571
}
572
573
bo->bufmgr = bufmgr;
574
bo->size = bo_size;
575
bo->idle = true;
576
bo->local = local;
577
578
/* Calling set_domain() will allocate pages for the BO outside of the
579
* struct mutex lock in the kernel, which is more efficient than waiting
580
* to create them during the first execbuf that uses the BO.
581
*/
582
struct drm_i915_gem_set_domain sd = {
583
.handle = bo->gem_handle,
584
.read_domains = I915_GEM_DOMAIN_CPU,
585
.write_domain = 0,
586
};
587
588
if (intel_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
589
bo_free(bo);
590
return NULL;
591
}
592
593
return bo;
594
}
595
596
struct iris_bo *
597
iris_bo_alloc(struct iris_bufmgr *bufmgr,
598
const char *name,
599
uint64_t size,
600
uint32_t alignment,
601
enum iris_memory_zone memzone,
602
unsigned flags)
603
{
604
struct iris_bo *bo;
605
unsigned int page_size = getpagesize();
606
bool local = bufmgr->vram.size > 0 &&
607
!(flags & BO_ALLOC_COHERENT || flags & BO_ALLOC_SMEM);
608
struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size, local);
609
610
/* Round the size up to the bucket size, or if we don't have caching
611
* at this size, a multiple of the page size.
612
*/
613
uint64_t bo_size =
614
bucket ? bucket->size : MAX2(ALIGN(size, page_size), page_size);
615
616
bool is_coherent = bufmgr->has_llc || (flags & BO_ALLOC_COHERENT);
617
enum iris_mmap_mode mmap_mode =
618
!local && is_coherent ? IRIS_MMAP_WB : IRIS_MMAP_WC;
619
620
simple_mtx_lock(&bufmgr->lock);
621
622
/* Get a buffer out of the cache if available. First, we try to find
623
* one with a matching memory zone so we can avoid reallocating VMA.
624
*/
625
bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
626
flags, true);
627
628
/* If that fails, we try for any cached BO, without matching memzone. */
629
if (!bo) {
630
bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
631
flags, false);
632
}
633
634
simple_mtx_unlock(&bufmgr->lock);
635
636
if (!bo) {
637
bo = alloc_fresh_bo(bufmgr, bo_size, local);
638
if (!bo)
639
return NULL;
640
}
641
642
if (bo->gtt_offset == 0ull) {
643
simple_mtx_lock(&bufmgr->lock);
644
bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, alignment);
645
simple_mtx_unlock(&bufmgr->lock);
646
647
if (bo->gtt_offset == 0ull)
648
goto err_free;
649
}
650
651
bo->name = name;
652
p_atomic_set(&bo->refcount, 1);
653
bo->reusable = bucket && bufmgr->bo_reuse;
654
bo->index = -1;
655
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
656
657
/* By default, capture all driver-internal buffers like shader kernels,
658
* surface states, dynamic states, border colors, and so on.
659
*/
660
if (memzone < IRIS_MEMZONE_OTHER)
661
bo->kflags |= EXEC_OBJECT_CAPTURE;
662
663
assert(bo->map == NULL || bo->mmap_mode == mmap_mode);
664
bo->mmap_mode = mmap_mode;
665
666
/* On integrated GPUs, enable snooping to ensure coherency if needed.
667
* For discrete, we instead use SMEM and avoid WB maps for coherency.
668
*/
669
if ((flags & BO_ALLOC_COHERENT) &&
670
!bufmgr->has_llc && bufmgr->vram.size == 0) {
671
struct drm_i915_gem_caching arg = {
672
.handle = bo->gem_handle,
673
.caching = 1,
674
};
675
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) != 0)
676
goto err_free;
677
678
bo->reusable = false;
679
}
680
681
DBG("bo_create: buf %d (%s) (%s memzone) (%s) %llub\n", bo->gem_handle,
682
bo->name, memzone_name(memzone), bo->local ? "local" : "system",
683
(unsigned long long) size);
684
685
return bo;
686
687
err_free:
688
bo_free(bo);
689
return NULL;
690
}
691
692
struct iris_bo *
693
iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
694
void *ptr, size_t size,
695
enum iris_memory_zone memzone)
696
{
697
struct drm_gem_close close = { 0, };
698
struct iris_bo *bo;
699
700
bo = bo_calloc();
701
if (!bo)
702
return NULL;
703
704
struct drm_i915_gem_userptr arg = {
705
.user_ptr = (uintptr_t)ptr,
706
.user_size = size,
707
};
708
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
709
goto err_free;
710
bo->gem_handle = arg.handle;
711
712
/* Check the buffer for validity before we try and use it in a batch */
713
struct drm_i915_gem_set_domain sd = {
714
.handle = bo->gem_handle,
715
.read_domains = I915_GEM_DOMAIN_CPU,
716
};
717
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
718
goto err_close;
719
720
bo->name = name;
721
bo->size = size;
722
bo->map = ptr;
723
724
bo->bufmgr = bufmgr;
725
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
726
727
simple_mtx_lock(&bufmgr->lock);
728
bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
729
simple_mtx_unlock(&bufmgr->lock);
730
731
if (bo->gtt_offset == 0ull)
732
goto err_close;
733
734
p_atomic_set(&bo->refcount, 1);
735
bo->userptr = true;
736
bo->index = -1;
737
bo->idle = true;
738
bo->mmap_mode = IRIS_MMAP_WB;
739
740
return bo;
741
742
err_close:
743
close.handle = bo->gem_handle;
744
intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
745
err_free:
746
free(bo);
747
return NULL;
748
}
749
750
/**
751
* Returns a iris_bo wrapping the given buffer object handle.
752
*
753
* This can be used when one application needs to pass a buffer object
754
* to another.
755
*/
756
struct iris_bo *
757
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
758
const char *name, unsigned int handle)
759
{
760
struct iris_bo *bo;
761
762
/* At the moment most applications only have a few named bo.
763
* For instance, in a DRI client only the render buffers passed
764
* between X and the client are named. And since X returns the
765
* alternating names for the front/back buffer a linear search
766
* provides a sufficiently fast match.
767
*/
768
simple_mtx_lock(&bufmgr->lock);
769
bo = find_and_ref_external_bo(bufmgr->name_table, handle);
770
if (bo)
771
goto out;
772
773
struct drm_gem_open open_arg = { .name = handle };
774
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
775
if (ret != 0) {
776
DBG("Couldn't reference %s handle 0x%08x: %s\n",
777
name, handle, strerror(errno));
778
bo = NULL;
779
goto out;
780
}
781
/* Now see if someone has used a prime handle to get this
782
* object from the kernel before by looking through the list
783
* again for a matching gem_handle
784
*/
785
bo = find_and_ref_external_bo(bufmgr->handle_table, open_arg.handle);
786
if (bo)
787
goto out;
788
789
bo = bo_calloc();
790
if (!bo)
791
goto out;
792
793
p_atomic_set(&bo->refcount, 1);
794
795
bo->size = open_arg.size;
796
bo->bufmgr = bufmgr;
797
bo->gem_handle = open_arg.handle;
798
bo->name = name;
799
bo->global_name = handle;
800
bo->reusable = false;
801
bo->imported = true;
802
bo->mmap_mode = IRIS_MMAP_WC;
803
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
804
bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
805
806
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
807
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
808
809
DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
810
811
out:
812
simple_mtx_unlock(&bufmgr->lock);
813
return bo;
814
}
815
816
static void
817
bo_close(struct iris_bo *bo)
818
{
819
struct iris_bufmgr *bufmgr = bo->bufmgr;
820
821
if (iris_bo_is_external(bo)) {
822
struct hash_entry *entry;
823
824
if (bo->global_name) {
825
entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
826
_mesa_hash_table_remove(bufmgr->name_table, entry);
827
}
828
829
entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
830
_mesa_hash_table_remove(bufmgr->handle_table, entry);
831
832
list_for_each_entry_safe(struct bo_export, export, &bo->exports, link) {
833
struct drm_gem_close close = { .handle = export->gem_handle };
834
intel_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
835
836
list_del(&export->link);
837
free(export);
838
}
839
} else {
840
assert(list_is_empty(&bo->exports));
841
}
842
843
/* Close this object */
844
struct drm_gem_close close = { .handle = bo->gem_handle };
845
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
846
if (ret != 0) {
847
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
848
bo->gem_handle, bo->name, strerror(errno));
849
}
850
851
if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
852
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
853
bo->size);
854
}
855
856
/* Return the VMA for reuse */
857
vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
858
859
free(bo);
860
}
861
862
static void
863
bo_free(struct iris_bo *bo)
864
{
865
struct iris_bufmgr *bufmgr = bo->bufmgr;
866
867
if (!bo->userptr && bo->map)
868
bo_unmap(bo);
869
870
if (bo->idle) {
871
bo_close(bo);
872
} else {
873
/* Defer closing the GEM BO and returning the VMA for reuse until the
874
* BO is idle. Just move it to the dead list for now.
875
*/
876
list_addtail(&bo->head, &bufmgr->zombie_list);
877
}
878
}
879
880
/** Frees all cached buffers significantly older than @time. */
881
static void
882
cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
883
{
884
int i;
885
886
if (bufmgr->time == time)
887
return;
888
889
for (i = 0; i < bufmgr->num_buckets; i++) {
890
struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
891
892
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
893
if (time - bo->free_time <= 1)
894
break;
895
896
list_del(&bo->head);
897
898
bo_free(bo);
899
}
900
}
901
902
for (i = 0; i < bufmgr->num_local_buckets; i++) {
903
struct bo_cache_bucket *bucket = &bufmgr->local_cache_bucket[i];
904
905
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
906
if (time - bo->free_time <= 1)
907
break;
908
909
list_del(&bo->head);
910
911
bo_free(bo);
912
}
913
}
914
915
list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
916
/* Stop once we reach a busy BO - all others past this point were
917
* freed more recently so are likely also busy.
918
*/
919
if (!bo->idle && iris_bo_busy(bo))
920
break;
921
922
list_del(&bo->head);
923
bo_close(bo);
924
}
925
926
bufmgr->time = time;
927
}
928
929
static void
930
bo_unreference_final(struct iris_bo *bo, time_t time)
931
{
932
struct iris_bufmgr *bufmgr = bo->bufmgr;
933
struct bo_cache_bucket *bucket;
934
935
DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
936
937
bucket = NULL;
938
if (bo->reusable)
939
bucket = bucket_for_size(bufmgr, bo->size, bo->local);
940
/* Put the buffer into our internal cache for reuse if we can. */
941
if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
942
bo->free_time = time;
943
bo->name = NULL;
944
945
list_addtail(&bo->head, &bucket->head);
946
} else {
947
bo_free(bo);
948
}
949
}
950
951
void
952
iris_bo_unreference(struct iris_bo *bo)
953
{
954
if (bo == NULL)
955
return;
956
957
assert(p_atomic_read(&bo->refcount) > 0);
958
959
if (atomic_add_unless(&bo->refcount, -1, 1)) {
960
struct iris_bufmgr *bufmgr = bo->bufmgr;
961
struct timespec time;
962
963
clock_gettime(CLOCK_MONOTONIC, &time);
964
965
simple_mtx_lock(&bufmgr->lock);
966
967
if (p_atomic_dec_zero(&bo->refcount)) {
968
bo_unreference_final(bo, time.tv_sec);
969
cleanup_bo_cache(bufmgr, time.tv_sec);
970
}
971
972
simple_mtx_unlock(&bufmgr->lock);
973
}
974
}
975
976
static void
977
bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
978
struct iris_bo *bo,
979
const char *action)
980
{
981
bool busy = dbg && !bo->idle;
982
double elapsed = unlikely(busy) ? -get_time() : 0.0;
983
984
iris_bo_wait_rendering(bo);
985
986
if (unlikely(busy)) {
987
elapsed += get_time();
988
if (elapsed > 1e-5) /* 0.01ms */ {
989
perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
990
action, bo->name, elapsed * 1000);
991
}
992
}
993
}
994
995
static void
996
print_flags(unsigned flags)
997
{
998
if (flags & MAP_READ)
999
DBG("READ ");
1000
if (flags & MAP_WRITE)
1001
DBG("WRITE ");
1002
if (flags & MAP_ASYNC)
1003
DBG("ASYNC ");
1004
if (flags & MAP_PERSISTENT)
1005
DBG("PERSISTENT ");
1006
if (flags & MAP_COHERENT)
1007
DBG("COHERENT ");
1008
if (flags & MAP_RAW)
1009
DBG("RAW ");
1010
DBG("\n");
1011
}
1012
1013
static void *
1014
iris_bo_gem_mmap_legacy(struct pipe_debug_callback *dbg, struct iris_bo *bo)
1015
{
1016
struct iris_bufmgr *bufmgr = bo->bufmgr;
1017
1018
struct drm_i915_gem_mmap mmap_arg = {
1019
.handle = bo->gem_handle,
1020
.size = bo->size,
1021
.flags = bo->mmap_mode == IRIS_MMAP_WC ? I915_MMAP_WC : 0,
1022
};
1023
1024
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
1025
if (ret != 0) {
1026
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1027
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1028
return NULL;
1029
}
1030
void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
1031
1032
return map;
1033
}
1034
1035
static void *
1036
iris_bo_gem_mmap_offset(struct pipe_debug_callback *dbg, struct iris_bo *bo)
1037
{
1038
struct iris_bufmgr *bufmgr = bo->bufmgr;
1039
1040
struct drm_i915_gem_mmap_offset mmap_arg = {
1041
.handle = bo->gem_handle,
1042
};
1043
1044
if (bo->mmap_mode == IRIS_MMAP_WB)
1045
mmap_arg.flags = I915_MMAP_OFFSET_WB;
1046
else if (bo->mmap_mode == IRIS_MMAP_WC)
1047
mmap_arg.flags = I915_MMAP_OFFSET_WC;
1048
else
1049
mmap_arg.flags = I915_MMAP_OFFSET_UC;
1050
1051
/* Get the fake offset back */
1052
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
1053
if (ret != 0) {
1054
DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
1055
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1056
return NULL;
1057
}
1058
1059
/* And map it */
1060
void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
1061
bufmgr->fd, mmap_arg.offset);
1062
if (map == MAP_FAILED) {
1063
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1064
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1065
return NULL;
1066
}
1067
1068
return map;
1069
}
1070
1071
void *
1072
iris_bo_map(struct pipe_debug_callback *dbg,
1073
struct iris_bo *bo, unsigned flags)
1074
{
1075
struct iris_bufmgr *bufmgr = bo->bufmgr;
1076
1077
if (!bo->map) {
1078
DBG("iris_bo_map: %d (%s)\n", bo->gem_handle, bo->name);
1079
void *map = bufmgr->has_mmap_offset ? iris_bo_gem_mmap_offset(dbg, bo)
1080
: iris_bo_gem_mmap_legacy(dbg, bo);
1081
if (!map) {
1082
return NULL;
1083
}
1084
1085
VG_DEFINED(map, bo->size);
1086
1087
if (p_atomic_cmpxchg(&bo->map, NULL, map)) {
1088
VG_NOACCESS(map, bo->size);
1089
os_munmap(map, bo->size);
1090
}
1091
}
1092
assert(bo->map);
1093
1094
DBG("iris_bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map);
1095
print_flags(flags);
1096
1097
if (!(flags & MAP_ASYNC)) {
1098
bo_wait_with_stall_warning(dbg, bo, "memory mapping");
1099
}
1100
1101
return bo->map;
1102
}
1103
1104
/** Waits for all GPU rendering with the object to have completed. */
1105
void
1106
iris_bo_wait_rendering(struct iris_bo *bo)
1107
{
1108
/* We require a kernel recent enough for WAIT_IOCTL support.
1109
* See intel_init_bufmgr()
1110
*/
1111
iris_bo_wait(bo, -1);
1112
}
1113
1114
/**
1115
* Waits on a BO for the given amount of time.
1116
*
1117
* @bo: buffer object to wait for
1118
* @timeout_ns: amount of time to wait in nanoseconds.
1119
* If value is less than 0, an infinite wait will occur.
1120
*
1121
* Returns 0 if the wait was successful ie. the last batch referencing the
1122
* object has completed within the allotted time. Otherwise some negative return
1123
* value describes the error. Of particular interest is -ETIME when the wait has
1124
* failed to yield the desired result.
1125
*
1126
* Similar to iris_bo_wait_rendering except a timeout parameter allows
1127
* the operation to give up after a certain amount of time. Another subtle
1128
* difference is the internal locking semantics are different (this variant does
1129
* not hold the lock for the duration of the wait). This makes the wait subject
1130
* to a larger userspace race window.
1131
*
1132
* The implementation shall wait until the object is no longer actively
1133
* referenced within a batch buffer at the time of the call. The wait will
1134
* not guarantee that the buffer is re-issued via another thread, or an flinked
1135
* handle. Userspace must make sure this race does not occur if such precision
1136
* is important.
1137
*
1138
* Note that some kernels have broken the infinite wait for negative values
1139
* promise, upgrade to latest stable kernels if this is the case.
1140
*/
1141
int
1142
iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
1143
{
1144
struct iris_bufmgr *bufmgr = bo->bufmgr;
1145
1146
/* If we know it's idle, don't bother with the kernel round trip */
1147
if (bo->idle && !iris_bo_is_external(bo))
1148
return 0;
1149
1150
struct drm_i915_gem_wait wait = {
1151
.bo_handle = bo->gem_handle,
1152
.timeout_ns = timeout_ns,
1153
};
1154
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1155
if (ret != 0)
1156
return -errno;
1157
1158
bo->idle = true;
1159
1160
return ret;
1161
}
1162
1163
static void
1164
iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
1165
{
1166
/* Free aux-map buffers */
1167
intel_aux_map_finish(bufmgr->aux_map_ctx);
1168
1169
/* bufmgr will no longer try to free VMA entries in the aux-map */
1170
bufmgr->aux_map_ctx = NULL;
1171
1172
simple_mtx_destroy(&bufmgr->lock);
1173
1174
/* Free any cached buffer objects we were going to reuse */
1175
for (int i = 0; i < bufmgr->num_buckets; i++) {
1176
struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1177
1178
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1179
list_del(&bo->head);
1180
1181
bo_free(bo);
1182
}
1183
}
1184
1185
/* Close any buffer objects on the dead list. */
1186
list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
1187
list_del(&bo->head);
1188
bo_close(bo);
1189
}
1190
1191
_mesa_hash_table_destroy(bufmgr->name_table, NULL);
1192
_mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1193
1194
for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
1195
if (z != IRIS_MEMZONE_BINDER)
1196
util_vma_heap_finish(&bufmgr->vma_allocator[z]);
1197
}
1198
1199
close(bufmgr->fd);
1200
1201
free(bufmgr);
1202
}
1203
1204
int
1205
iris_gem_get_tiling(struct iris_bo *bo, uint32_t *tiling)
1206
{
1207
struct iris_bufmgr *bufmgr = bo->bufmgr;
1208
1209
if (!bufmgr->has_tiling_uapi) {
1210
*tiling = I915_TILING_NONE;
1211
return 0;
1212
}
1213
1214
struct drm_i915_gem_get_tiling ti = { .handle = bo->gem_handle };
1215
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &ti);
1216
1217
if (ret) {
1218
DBG("gem_get_tiling failed for BO %u: %s\n",
1219
bo->gem_handle, strerror(errno));
1220
}
1221
1222
*tiling = ti.tiling_mode;
1223
1224
return ret;
1225
}
1226
1227
int
1228
iris_gem_set_tiling(struct iris_bo *bo, const struct isl_surf *surf)
1229
{
1230
struct iris_bufmgr *bufmgr = bo->bufmgr;
1231
uint32_t tiling_mode = isl_tiling_to_i915_tiling(surf->tiling);
1232
int ret;
1233
1234
/* If we can't do map_gtt, the set/get_tiling API isn't useful. And it's
1235
* actually not supported by the kernel in those cases.
1236
*/
1237
if (!bufmgr->has_tiling_uapi)
1238
return 0;
1239
1240
/* GEM_SET_TILING is slightly broken and overwrites the input on the
1241
* error path, so we have to open code intel_ioctl().
1242
*/
1243
do {
1244
struct drm_i915_gem_set_tiling set_tiling = {
1245
.handle = bo->gem_handle,
1246
.tiling_mode = tiling_mode,
1247
.stride = surf->row_pitch_B,
1248
};
1249
ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1250
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1251
1252
if (ret) {
1253
DBG("gem_set_tiling failed for BO %u: %s\n",
1254
bo->gem_handle, strerror(errno));
1255
}
1256
1257
return ret;
1258
}
1259
1260
struct iris_bo *
1261
iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
1262
{
1263
uint32_t handle;
1264
struct iris_bo *bo;
1265
1266
simple_mtx_lock(&bufmgr->lock);
1267
int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1268
if (ret) {
1269
DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1270
strerror(errno));
1271
simple_mtx_unlock(&bufmgr->lock);
1272
return NULL;
1273
}
1274
1275
/*
1276
* See if the kernel has already returned this buffer to us. Just as
1277
* for named buffers, we must not create two bo's pointing at the same
1278
* kernel object
1279
*/
1280
bo = find_and_ref_external_bo(bufmgr->handle_table, handle);
1281
if (bo)
1282
goto out;
1283
1284
bo = bo_calloc();
1285
if (!bo)
1286
goto out;
1287
1288
p_atomic_set(&bo->refcount, 1);
1289
1290
/* Determine size of bo. The fd-to-handle ioctl really should
1291
* return the size, but it doesn't. If we have kernel 3.12 or
1292
* later, we can lseek on the prime fd to get the size. Older
1293
* kernels will just fail, in which case we fall back to the
1294
* provided (estimated or guess size). */
1295
ret = lseek(prime_fd, 0, SEEK_END);
1296
if (ret != -1)
1297
bo->size = ret;
1298
1299
bo->bufmgr = bufmgr;
1300
bo->name = "prime";
1301
bo->reusable = false;
1302
bo->imported = true;
1303
bo->mmap_mode = IRIS_MMAP_WC;
1304
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1305
1306
/* From the Bspec, Memory Compression - Gfx12:
1307
*
1308
* The base address for the surface has to be 64K page aligned and the
1309
* surface is expected to be padded in the virtual domain to be 4 4K
1310
* pages.
1311
*
1312
* The dmabuf may contain a compressed surface. Align the BO to 64KB just
1313
* in case. We always align to 64KB even on platforms where we don't need
1314
* to, because it's a fairly reasonable thing to do anyway.
1315
*/
1316
bo->gtt_offset =
1317
vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
1318
1319
bo->gem_handle = handle;
1320
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1321
1322
out:
1323
simple_mtx_unlock(&bufmgr->lock);
1324
return bo;
1325
}
1326
1327
static void
1328
iris_bo_mark_exported_locked(struct iris_bo *bo)
1329
{
1330
if (!iris_bo_is_external(bo))
1331
_mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
1332
1333
if (!bo->exported) {
1334
/* If a BO is going to be used externally, it could be sent to the
1335
* display HW. So make sure our CPU mappings don't assume cache
1336
* coherency since display is outside that cache.
1337
*/
1338
bo->exported = true;
1339
bo->reusable = false;
1340
}
1341
}
1342
1343
void
1344
iris_bo_mark_exported(struct iris_bo *bo)
1345
{
1346
struct iris_bufmgr *bufmgr = bo->bufmgr;
1347
1348
if (bo->exported) {
1349
assert(!bo->reusable);
1350
return;
1351
}
1352
1353
simple_mtx_lock(&bufmgr->lock);
1354
iris_bo_mark_exported_locked(bo);
1355
simple_mtx_unlock(&bufmgr->lock);
1356
}
1357
1358
int
1359
iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
1360
{
1361
struct iris_bufmgr *bufmgr = bo->bufmgr;
1362
1363
iris_bo_mark_exported(bo);
1364
1365
if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1366
DRM_CLOEXEC | DRM_RDWR, prime_fd) != 0)
1367
return -errno;
1368
1369
return 0;
1370
}
1371
1372
uint32_t
1373
iris_bo_export_gem_handle(struct iris_bo *bo)
1374
{
1375
iris_bo_mark_exported(bo);
1376
1377
return bo->gem_handle;
1378
}
1379
1380
int
1381
iris_bo_flink(struct iris_bo *bo, uint32_t *name)
1382
{
1383
struct iris_bufmgr *bufmgr = bo->bufmgr;
1384
1385
if (!bo->global_name) {
1386
struct drm_gem_flink flink = { .handle = bo->gem_handle };
1387
1388
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1389
return -errno;
1390
1391
simple_mtx_lock(&bufmgr->lock);
1392
if (!bo->global_name) {
1393
iris_bo_mark_exported_locked(bo);
1394
bo->global_name = flink.name;
1395
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1396
}
1397
simple_mtx_unlock(&bufmgr->lock);
1398
}
1399
1400
*name = bo->global_name;
1401
return 0;
1402
}
1403
1404
int
1405
iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
1406
uint32_t *out_handle)
1407
{
1408
/* Only add the new GEM handle to the list of export if it belongs to a
1409
* different GEM device. Otherwise we might close the same buffer multiple
1410
* times.
1411
*/
1412
struct iris_bufmgr *bufmgr = bo->bufmgr;
1413
int ret = os_same_file_description(drm_fd, bufmgr->fd);
1414
WARN_ONCE(ret < 0,
1415
"Kernel has no file descriptor comparison support: %s\n",
1416
strerror(errno));
1417
if (ret == 0) {
1418
*out_handle = iris_bo_export_gem_handle(bo);
1419
return 0;
1420
}
1421
1422
struct bo_export *export = calloc(1, sizeof(*export));
1423
if (!export)
1424
return -ENOMEM;
1425
1426
export->drm_fd = drm_fd;
1427
1428
int dmabuf_fd = -1;
1429
int err = iris_bo_export_dmabuf(bo, &dmabuf_fd);
1430
if (err) {
1431
free(export);
1432
return err;
1433
}
1434
1435
simple_mtx_lock(&bufmgr->lock);
1436
err = drmPrimeFDToHandle(drm_fd, dmabuf_fd, &export->gem_handle);
1437
close(dmabuf_fd);
1438
if (err) {
1439
simple_mtx_unlock(&bufmgr->lock);
1440
free(export);
1441
return err;
1442
}
1443
1444
bool found = false;
1445
list_for_each_entry(struct bo_export, iter, &bo->exports, link) {
1446
if (iter->drm_fd != drm_fd)
1447
continue;
1448
/* Here we assume that for a given DRM fd, we'll always get back the
1449
* same GEM handle for a given buffer.
1450
*/
1451
assert(iter->gem_handle == export->gem_handle);
1452
free(export);
1453
export = iter;
1454
found = true;
1455
break;
1456
}
1457
if (!found)
1458
list_addtail(&export->link, &bo->exports);
1459
1460
simple_mtx_unlock(&bufmgr->lock);
1461
1462
*out_handle = export->gem_handle;
1463
1464
return 0;
1465
}
1466
1467
static void
1468
add_bucket(struct iris_bufmgr *bufmgr, int size, bool local)
1469
{
1470
unsigned int i = local ?
1471
bufmgr->num_local_buckets : bufmgr->num_buckets;
1472
1473
struct bo_cache_bucket *buckets = local ?
1474
bufmgr->local_cache_bucket : bufmgr->cache_bucket;
1475
1476
assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1477
1478
list_inithead(&buckets[i].head);
1479
buckets[i].size = size;
1480
1481
if (local)
1482
bufmgr->num_local_buckets++;
1483
else
1484
bufmgr->num_buckets++;
1485
1486
assert(bucket_for_size(bufmgr, size, local) == &buckets[i]);
1487
assert(bucket_for_size(bufmgr, size - 2048, local) == &buckets[i]);
1488
assert(bucket_for_size(bufmgr, size + 1, local) != &buckets[i]);
1489
}
1490
1491
static void
1492
init_cache_buckets(struct iris_bufmgr *bufmgr, bool local)
1493
{
1494
uint64_t size, cache_max_size = 64 * 1024 * 1024;
1495
1496
/* OK, so power of two buckets was too wasteful of memory.
1497
* Give 3 other sizes between each power of two, to hopefully
1498
* cover things accurately enough. (The alternative is
1499
* probably to just go for exact matching of sizes, and assume
1500
* that for things like composited window resize the tiled
1501
* width/height alignment and rounding of sizes to pages will
1502
* get us useful cache hit rates anyway)
1503
*/
1504
add_bucket(bufmgr, PAGE_SIZE, local);
1505
add_bucket(bufmgr, PAGE_SIZE * 2, local);
1506
add_bucket(bufmgr, PAGE_SIZE * 3, local);
1507
1508
/* Initialize the linked lists for BO reuse cache. */
1509
for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
1510
add_bucket(bufmgr, size, local);
1511
1512
add_bucket(bufmgr, size + size * 1 / 4, local);
1513
add_bucket(bufmgr, size + size * 2 / 4, local);
1514
add_bucket(bufmgr, size + size * 3 / 4, local);
1515
}
1516
}
1517
1518
uint32_t
1519
iris_create_hw_context(struct iris_bufmgr *bufmgr)
1520
{
1521
struct drm_i915_gem_context_create create = { };
1522
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1523
if (ret != 0) {
1524
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1525
return 0;
1526
}
1527
1528
/* Upon declaring a GPU hang, the kernel will zap the guilty context
1529
* back to the default logical HW state and attempt to continue on to
1530
* our next submitted batchbuffer. However, our render batches assume
1531
* the previous GPU state is preserved, and only emit commands needed
1532
* to incrementally change that state. In particular, we inherit the
1533
* STATE_BASE_ADDRESS and PIPELINE_SELECT settings, which are critical.
1534
* With default base addresses, our next batches will almost certainly
1535
* cause more GPU hangs, leading to repeated hangs until we're banned
1536
* or the machine is dead.
1537
*
1538
* Here we tell the kernel not to attempt to recover our context but
1539
* immediately (on the next batchbuffer submission) report that the
1540
* context is lost, and we will do the recovery ourselves. Ideally,
1541
* we'll have two lost batches instead of a continual stream of hangs.
1542
*/
1543
struct drm_i915_gem_context_param p = {
1544
.ctx_id = create.ctx_id,
1545
.param = I915_CONTEXT_PARAM_RECOVERABLE,
1546
.value = false,
1547
};
1548
intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p);
1549
1550
return create.ctx_id;
1551
}
1552
1553
static int
1554
iris_hw_context_get_priority(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1555
{
1556
struct drm_i915_gem_context_param p = {
1557
.ctx_id = ctx_id,
1558
.param = I915_CONTEXT_PARAM_PRIORITY,
1559
};
1560
intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
1561
return p.value; /* on error, return 0 i.e. default priority */
1562
}
1563
1564
int
1565
iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
1566
uint32_t ctx_id,
1567
int priority)
1568
{
1569
struct drm_i915_gem_context_param p = {
1570
.ctx_id = ctx_id,
1571
.param = I915_CONTEXT_PARAM_PRIORITY,
1572
.value = priority,
1573
};
1574
int err;
1575
1576
err = 0;
1577
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
1578
err = -errno;
1579
1580
return err;
1581
}
1582
1583
uint32_t
1584
iris_clone_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1585
{
1586
uint32_t new_ctx = iris_create_hw_context(bufmgr);
1587
1588
if (new_ctx) {
1589
int priority = iris_hw_context_get_priority(bufmgr, ctx_id);
1590
iris_hw_context_set_priority(bufmgr, new_ctx, priority);
1591
}
1592
1593
return new_ctx;
1594
}
1595
1596
void
1597
iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1598
{
1599
struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
1600
1601
if (ctx_id != 0 &&
1602
intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1603
fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1604
strerror(errno));
1605
}
1606
}
1607
1608
int
1609
iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1610
{
1611
struct drm_i915_reg_read reg_read = { .offset = offset };
1612
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1613
1614
*result = reg_read.val;
1615
return ret;
1616
}
1617
1618
static uint64_t
1619
iris_gtt_size(int fd)
1620
{
1621
/* We use the default (already allocated) context to determine
1622
* the default configuration of the virtual address space.
1623
*/
1624
struct drm_i915_gem_context_param p = {
1625
.param = I915_CONTEXT_PARAM_GTT_SIZE,
1626
};
1627
if (!intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
1628
return p.value;
1629
1630
return 0;
1631
}
1632
1633
static struct intel_buffer *
1634
intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
1635
{
1636
struct intel_buffer *buf = malloc(sizeof(struct intel_buffer));
1637
if (!buf)
1638
return NULL;
1639
1640
struct iris_bufmgr *bufmgr = (struct iris_bufmgr *)driver_ctx;
1641
1642
struct iris_bo *bo =
1643
iris_bo_alloc(bufmgr, "aux-map", size, 64 * 1024,
1644
IRIS_MEMZONE_OTHER, 0);
1645
1646
buf->driver_bo = bo;
1647
buf->gpu = bo->gtt_offset;
1648
buf->gpu_end = buf->gpu + bo->size;
1649
buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
1650
return buf;
1651
}
1652
1653
static void
1654
intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
1655
{
1656
iris_bo_unreference((struct iris_bo*)buffer->driver_bo);
1657
free(buffer);
1658
}
1659
1660
static struct intel_mapped_pinned_buffer_alloc aux_map_allocator = {
1661
.alloc = intel_aux_map_buffer_alloc,
1662
.free = intel_aux_map_buffer_free,
1663
};
1664
1665
static int
1666
gem_param(int fd, int name)
1667
{
1668
int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
1669
1670
struct drm_i915_getparam gp = { .param = name, .value = &v };
1671
if (intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
1672
return -1;
1673
1674
return v;
1675
}
1676
1677
static bool
1678
iris_bufmgr_query_meminfo(struct iris_bufmgr *bufmgr)
1679
{
1680
struct drm_i915_query_memory_regions *meminfo =
1681
intel_i915_query_alloc(bufmgr->fd, DRM_I915_QUERY_MEMORY_REGIONS);
1682
if (meminfo == NULL)
1683
return false;
1684
1685
for (int i = 0; i < meminfo->num_regions; i++) {
1686
const struct drm_i915_memory_region_info *mem = &meminfo->regions[i];
1687
switch (mem->region.memory_class) {
1688
case I915_MEMORY_CLASS_SYSTEM:
1689
bufmgr->sys.region = mem->region;
1690
bufmgr->sys.size = mem->probed_size;
1691
break;
1692
case I915_MEMORY_CLASS_DEVICE:
1693
bufmgr->vram.region = mem->region;
1694
bufmgr->vram.size = mem->probed_size;
1695
break;
1696
default:
1697
break;
1698
}
1699
}
1700
1701
free(meminfo);
1702
1703
return true;
1704
}
1705
1706
/**
1707
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1708
* and manage map buffer objections.
1709
*
1710
* \param fd File descriptor of the opened DRM device.
1711
*/
1712
static struct iris_bufmgr *
1713
iris_bufmgr_create(struct intel_device_info *devinfo, int fd, bool bo_reuse)
1714
{
1715
uint64_t gtt_size = iris_gtt_size(fd);
1716
if (gtt_size <= IRIS_MEMZONE_OTHER_START)
1717
return NULL;
1718
1719
struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
1720
if (bufmgr == NULL)
1721
return NULL;
1722
1723
/* Handles to buffer objects belong to the device fd and are not
1724
* reference counted by the kernel. If the same fd is used by
1725
* multiple parties (threads sharing the same screen bufmgr, or
1726
* even worse the same device fd passed to multiple libraries)
1727
* ownership of those handles is shared by those independent parties.
1728
*
1729
* Don't do this! Ensure that each library/bufmgr has its own device
1730
* fd so that its namespace does not clash with another.
1731
*/
1732
bufmgr->fd = os_dupfd_cloexec(fd);
1733
1734
p_atomic_set(&bufmgr->refcount, 1);
1735
1736
simple_mtx_init(&bufmgr->lock, mtx_plain);
1737
1738
list_inithead(&bufmgr->zombie_list);
1739
1740
bufmgr->has_llc = devinfo->has_llc;
1741
bufmgr->has_tiling_uapi = devinfo->has_tiling_uapi;
1742
bufmgr->bo_reuse = bo_reuse;
1743
bufmgr->has_mmap_offset = gem_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
1744
iris_bufmgr_query_meminfo(bufmgr);
1745
1746
STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
1747
const uint64_t _4GB = 1ull << 32;
1748
const uint64_t _2GB = 1ul << 31;
1749
1750
/* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
1751
const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;
1752
1753
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
1754
PAGE_SIZE, _4GB_minus_1 - PAGE_SIZE);
1755
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_BINDLESS],
1756
IRIS_MEMZONE_BINDLESS_START, IRIS_BINDLESS_SIZE);
1757
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
1758
IRIS_MEMZONE_SURFACE_START,
1759
_4GB_minus_1 - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE -
1760
IRIS_BINDLESS_SIZE);
1761
/* TODO: Why does limiting to 2GB help some state items on gfx12?
1762
* - CC Viewport Pointer
1763
* - Blend State Pointer
1764
* - Color Calc State Pointer
1765
*/
1766
const uint64_t dynamic_pool_size =
1767
(devinfo->ver >= 12 ? _2GB : _4GB_minus_1) - IRIS_BORDER_COLOR_POOL_SIZE;
1768
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
1769
IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
1770
dynamic_pool_size);
1771
1772
/* Leave the last 4GB out of the high vma range, so that no state
1773
* base address + size can overflow 48 bits.
1774
*/
1775
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
1776
IRIS_MEMZONE_OTHER_START,
1777
(gtt_size - _4GB) - IRIS_MEMZONE_OTHER_START);
1778
1779
init_cache_buckets(bufmgr, false);
1780
init_cache_buckets(bufmgr, true);
1781
1782
bufmgr->name_table =
1783
_mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
1784
bufmgr->handle_table =
1785
_mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
1786
1787
bufmgr->vma_min_align = devinfo->has_local_mem ? 64 * 1024 : PAGE_SIZE;
1788
1789
if (devinfo->has_aux_map) {
1790
bufmgr->aux_map_ctx = intel_aux_map_init(bufmgr, &aux_map_allocator,
1791
devinfo);
1792
assert(bufmgr->aux_map_ctx);
1793
}
1794
1795
return bufmgr;
1796
}
1797
1798
static struct iris_bufmgr *
1799
iris_bufmgr_ref(struct iris_bufmgr *bufmgr)
1800
{
1801
p_atomic_inc(&bufmgr->refcount);
1802
return bufmgr;
1803
}
1804
1805
void
1806
iris_bufmgr_unref(struct iris_bufmgr *bufmgr)
1807
{
1808
simple_mtx_lock(&global_bufmgr_list_mutex);
1809
if (p_atomic_dec_zero(&bufmgr->refcount)) {
1810
list_del(&bufmgr->link);
1811
iris_bufmgr_destroy(bufmgr);
1812
}
1813
simple_mtx_unlock(&global_bufmgr_list_mutex);
1814
}
1815
1816
/**
1817
* Gets an already existing GEM buffer manager or create a new one.
1818
*
1819
* \param fd File descriptor of the opened DRM device.
1820
*/
1821
struct iris_bufmgr *
1822
iris_bufmgr_get_for_fd(struct intel_device_info *devinfo, int fd, bool bo_reuse)
1823
{
1824
struct stat st;
1825
1826
if (fstat(fd, &st))
1827
return NULL;
1828
1829
struct iris_bufmgr *bufmgr = NULL;
1830
1831
simple_mtx_lock(&global_bufmgr_list_mutex);
1832
list_for_each_entry(struct iris_bufmgr, iter_bufmgr, &global_bufmgr_list, link) {
1833
struct stat iter_st;
1834
if (fstat(iter_bufmgr->fd, &iter_st))
1835
continue;
1836
1837
if (st.st_rdev == iter_st.st_rdev) {
1838
assert(iter_bufmgr->bo_reuse == bo_reuse);
1839
bufmgr = iris_bufmgr_ref(iter_bufmgr);
1840
goto unlock;
1841
}
1842
}
1843
1844
bufmgr = iris_bufmgr_create(devinfo, fd, bo_reuse);
1845
if (bufmgr)
1846
list_addtail(&bufmgr->link, &global_bufmgr_list);
1847
1848
unlock:
1849
simple_mtx_unlock(&global_bufmgr_list_mutex);
1850
1851
return bufmgr;
1852
}
1853
1854
int
1855
iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr)
1856
{
1857
return bufmgr->fd;
1858
}
1859
1860
void*
1861
iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr)
1862
{
1863
return bufmgr->aux_map_ctx;
1864
}
1865
1866