Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/anv_gem.c
4547 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include <sys/ioctl.h>
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#include <string.h>
28
#include <errno.h>
29
#include <unistd.h>
30
#include <fcntl.h>
31
32
#include "anv_private.h"
33
#include "common/intel_defines.h"
34
#include "common/intel_gem.h"
35
#include "drm-uapi/sync_file.h"
36
37
/**
38
* Wrapper around DRM_IOCTL_I915_GEM_CREATE.
39
*
40
* Return gem handle, or 0 on failure. Gem handles are never 0.
41
*/
42
uint32_t
43
anv_gem_create(struct anv_device *device, uint64_t size)
44
{
45
struct drm_i915_gem_create gem_create = {
46
.size = size,
47
};
48
49
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
50
if (ret != 0) {
51
/* FIXME: What do we do if this fails? */
52
return 0;
53
}
54
55
return gem_create.handle;
56
}
57
58
void
59
anv_gem_close(struct anv_device *device, uint32_t gem_handle)
60
{
61
struct drm_gem_close close = {
62
.handle = gem_handle,
63
};
64
65
intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
66
}
67
68
uint32_t
69
anv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size,
70
uint32_t num_regions,
71
struct drm_i915_gem_memory_class_instance *regions)
72
{
73
struct drm_i915_gem_create_ext_memory_regions ext_regions = {
74
.base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
75
.num_regions = num_regions,
76
.regions = (uintptr_t)regions,
77
};
78
79
struct drm_i915_gem_create_ext gem_create = {
80
.size = anv_bo_size,
81
.extensions = (uintptr_t) &ext_regions,
82
};
83
84
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE_EXT,
85
&gem_create);
86
if (ret != 0) {
87
return 0;
88
}
89
90
return gem_create.handle;
91
}
92
93
/**
94
* Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
95
*/
96
static void*
97
anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
98
uint64_t offset, uint64_t size, uint32_t flags)
99
{
100
struct drm_i915_gem_mmap_offset gem_mmap = {
101
.handle = gem_handle,
102
.flags = (flags & I915_MMAP_WC) ?
103
I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
104
};
105
assert(offset == 0);
106
107
/* Get the fake offset back */
108
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
109
if (ret != 0)
110
return MAP_FAILED;
111
112
/* And map it */
113
void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
114
device->fd, gem_mmap.offset);
115
return map;
116
}
117
118
static void*
119
anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
120
uint64_t offset, uint64_t size, uint32_t flags)
121
{
122
struct drm_i915_gem_mmap gem_mmap = {
123
.handle = gem_handle,
124
.offset = offset,
125
.size = size,
126
.flags = flags,
127
};
128
129
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
130
if (ret != 0)
131
return MAP_FAILED;
132
133
return (void *)(uintptr_t) gem_mmap.addr_ptr;
134
}
135
136
/**
137
* Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
138
*/
139
void*
140
anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
141
uint64_t offset, uint64_t size, uint32_t flags)
142
{
143
void *map;
144
if (device->physical->has_mmap_offset)
145
map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
146
else
147
map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
148
149
if (map != MAP_FAILED)
150
VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
151
152
return map;
153
}
154
155
/* This is just a wrapper around munmap, but it also notifies valgrind that
156
* this map is no longer valid. Pair this with anv_gem_mmap().
157
*/
158
void
159
anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
160
{
161
VG(VALGRIND_FREELIKE_BLOCK(p, 0));
162
munmap(p, size);
163
}
164
165
uint32_t
166
anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
167
{
168
struct drm_i915_gem_userptr userptr = {
169
.user_ptr = (__u64)((unsigned long) mem),
170
.user_size = size,
171
.flags = 0,
172
};
173
174
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
175
if (ret == -1)
176
return 0;
177
178
return userptr.handle;
179
}
180
181
int
182
anv_gem_set_caching(struct anv_device *device,
183
uint32_t gem_handle, uint32_t caching)
184
{
185
struct drm_i915_gem_caching gem_caching = {
186
.handle = gem_handle,
187
.caching = caching,
188
};
189
190
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
191
}
192
193
int
194
anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
195
uint32_t read_domains, uint32_t write_domain)
196
{
197
struct drm_i915_gem_set_domain gem_set_domain = {
198
.handle = gem_handle,
199
.read_domains = read_domains,
200
.write_domain = write_domain,
201
};
202
203
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
204
}
205
206
/**
207
* Returns 0, 1, or negative to indicate error
208
*/
209
int
210
anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
211
{
212
struct drm_i915_gem_busy busy = {
213
.handle = gem_handle,
214
};
215
216
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
217
if (ret < 0)
218
return ret;
219
220
return busy.busy != 0;
221
}
222
223
/**
224
* On error, \a timeout_ns holds the remaining time.
225
*/
226
int
227
anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
228
{
229
struct drm_i915_gem_wait wait = {
230
.bo_handle = gem_handle,
231
.timeout_ns = *timeout_ns,
232
.flags = 0,
233
};
234
235
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
236
*timeout_ns = wait.timeout_ns;
237
238
return ret;
239
}
240
241
int
242
anv_gem_execbuffer(struct anv_device *device,
243
struct drm_i915_gem_execbuffer2 *execbuf)
244
{
245
if (execbuf->flags & I915_EXEC_FENCE_OUT)
246
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
247
else
248
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
249
}
250
251
/** Return -1 on error. */
252
int
253
anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
254
{
255
struct drm_i915_gem_get_tiling get_tiling = {
256
.handle = gem_handle,
257
};
258
259
/* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
260
* anymore, so we will need another way to get the tiling. Apparently this
261
* is only used in Android code, so we may need some other way to
262
* communicate the tiling mode.
263
*/
264
if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
265
assert(!"Failed to get BO tiling");
266
return -1;
267
}
268
269
return get_tiling.tiling_mode;
270
}
271
272
int
273
anv_gem_set_tiling(struct anv_device *device,
274
uint32_t gem_handle, uint32_t stride, uint32_t tiling)
275
{
276
int ret;
277
278
/* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
279
* nothing needs to be done.
280
*/
281
if (!device->info.has_tiling_uapi)
282
return 0;
283
284
/* set_tiling overwrites the input on the error path, so we have to open
285
* code intel_ioctl.
286
*/
287
do {
288
struct drm_i915_gem_set_tiling set_tiling = {
289
.handle = gem_handle,
290
.tiling_mode = tiling,
291
.stride = stride,
292
};
293
294
ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
295
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
296
297
return ret;
298
}
299
300
int
301
anv_gem_get_param(int fd, uint32_t param)
302
{
303
int tmp;
304
305
drm_i915_getparam_t gp = {
306
.param = param,
307
.value = &tmp,
308
};
309
310
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
311
if (ret == 0)
312
return tmp;
313
314
return 0;
315
}
316
317
uint64_t
318
anv_gem_get_drm_cap(int fd, uint32_t capability)
319
{
320
struct drm_get_cap cap = {
321
.capability = capability,
322
};
323
324
intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
325
return cap.value;
326
}
327
328
bool
329
anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
330
{
331
struct drm_gem_close close;
332
int ret;
333
334
struct drm_i915_gem_create gem_create = {
335
.size = 4096,
336
};
337
338
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
339
assert(!"Failed to create GEM BO");
340
return false;
341
}
342
343
bool swizzled = false;
344
345
/* set_tiling overwrites the input on the error path, so we have to open
346
* code intel_ioctl.
347
*/
348
do {
349
struct drm_i915_gem_set_tiling set_tiling = {
350
.handle = gem_create.handle,
351
.tiling_mode = tiling,
352
.stride = tiling == I915_TILING_X ? 512 : 128,
353
};
354
355
ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
356
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
357
358
if (ret != 0) {
359
assert(!"Failed to set BO tiling");
360
goto close_and_return;
361
}
362
363
struct drm_i915_gem_get_tiling get_tiling = {
364
.handle = gem_create.handle,
365
};
366
367
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
368
assert(!"Failed to get BO tiling");
369
goto close_and_return;
370
}
371
372
swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
373
374
close_and_return:
375
376
memset(&close, 0, sizeof(close));
377
close.handle = gem_create.handle;
378
intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
379
380
return swizzled;
381
}
382
383
bool
384
anv_gem_has_context_priority(int fd)
385
{
386
return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
387
INTEL_CONTEXT_MEDIUM_PRIORITY);
388
}
389
390
int
391
anv_gem_create_context(struct anv_device *device)
392
{
393
struct drm_i915_gem_context_create create = { 0 };
394
395
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
396
if (ret == -1)
397
return -1;
398
399
return create.ctx_id;
400
}
401
402
int
403
anv_gem_create_context_engines(struct anv_device *device,
404
const struct drm_i915_query_engine_info *info,
405
int num_engines, uint16_t *engine_classes)
406
{
407
const size_t engine_inst_sz = 2 * sizeof(__u16); /* 1 class, 1 instance */
408
const size_t engines_param_size =
409
sizeof(__u64) /* extensions */ + num_engines * engine_inst_sz;
410
411
void *engines_param = malloc(engines_param_size);
412
assert(engines_param);
413
*(__u64*)engines_param = 0;
414
__u16 *class_inst_ptr = (__u16*)(((__u64*)engines_param) + 1);
415
416
/* For each type of drm_i915_gem_engine_class of interest, we keep track of
417
* the previous engine instance used.
418
*/
419
int last_engine_idx[] = {
420
[I915_ENGINE_CLASS_RENDER] = -1,
421
};
422
423
int i915_engine_counts[] = {
424
[I915_ENGINE_CLASS_RENDER] =
425
anv_gem_count_engines(info, I915_ENGINE_CLASS_RENDER),
426
};
427
428
/* For each queue, we look for the next instance that matches the class we
429
* need.
430
*/
431
for (int i = 0; i < num_engines; i++) {
432
uint16_t engine_class = engine_classes[i];
433
if (i915_engine_counts[engine_class] <= 0) {
434
free(engines_param);
435
return -1;
436
}
437
438
/* Run through the engines reported by the kernel looking for the next
439
* matching instance. We loop in case we want to create multiple
440
* contexts on an engine instance.
441
*/
442
int engine_instance = -1;
443
for (int i = 0; i < info->num_engines; i++) {
444
int *idx = &last_engine_idx[engine_class];
445
if (++(*idx) >= info->num_engines)
446
*idx = 0;
447
if (info->engines[*idx].engine.engine_class == engine_class) {
448
engine_instance = info->engines[*idx].engine.engine_instance;
449
break;
450
}
451
}
452
if (engine_instance < 0) {
453
free(engines_param);
454
return -1;
455
}
456
457
*class_inst_ptr++ = engine_class;
458
*class_inst_ptr++ = engine_instance;
459
}
460
461
assert((uintptr_t)engines_param + engines_param_size ==
462
(uintptr_t)class_inst_ptr);
463
464
struct drm_i915_gem_context_create_ext_setparam set_engines = {
465
.base = {
466
.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
467
},
468
.param = {
469
.param = I915_CONTEXT_PARAM_ENGINES,
470
.value = (uintptr_t)engines_param,
471
.size = engines_param_size,
472
}
473
};
474
struct drm_i915_gem_context_create_ext create = {
475
.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
476
.extensions = (uintptr_t)&set_engines,
477
};
478
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);
479
free(engines_param);
480
if (ret == -1)
481
return -1;
482
483
return create.ctx_id;
484
}
485
486
int
487
anv_gem_destroy_context(struct anv_device *device, int context)
488
{
489
struct drm_i915_gem_context_destroy destroy = {
490
.ctx_id = context,
491
};
492
493
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
494
}
495
496
int
497
anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
498
{
499
struct drm_i915_gem_context_param p = {
500
.ctx_id = context,
501
.param = param,
502
.value = value,
503
};
504
int err = 0;
505
506
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
507
err = -errno;
508
return err;
509
}
510
511
int
512
anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
513
{
514
struct drm_i915_gem_context_param gp = {
515
.ctx_id = context,
516
.param = param,
517
};
518
519
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
520
if (ret == -1)
521
return -1;
522
523
*value = gp.value;
524
return 0;
525
}
526
527
int
528
anv_gem_context_get_reset_stats(int fd, int context,
529
uint32_t *active, uint32_t *pending)
530
{
531
struct drm_i915_reset_stats stats = {
532
.ctx_id = context,
533
};
534
535
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
536
if (ret == 0) {
537
*active = stats.batch_active;
538
*pending = stats.batch_pending;
539
}
540
541
return ret;
542
}
543
544
int
545
anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
546
{
547
struct drm_prime_handle args = {
548
.handle = gem_handle,
549
.flags = DRM_CLOEXEC | DRM_RDWR,
550
};
551
552
int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
553
if (ret == -1)
554
return -1;
555
556
return args.fd;
557
}
558
559
uint32_t
560
anv_gem_fd_to_handle(struct anv_device *device, int fd)
561
{
562
struct drm_prime_handle args = {
563
.fd = fd,
564
};
565
566
int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
567
if (ret == -1)
568
return 0;
569
570
return args.handle;
571
}
572
573
int
574
anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
575
{
576
struct drm_i915_reg_read args = {
577
.offset = offset
578
};
579
580
int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
581
582
*result = args.val;
583
return ret;
584
}
585
586
int
587
anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
588
{
589
struct sync_merge_data args = {
590
.name = "anv merge fence",
591
.fd2 = fd2,
592
.fence = -1,
593
};
594
595
int ret = intel_ioctl(fd1, SYNC_IOC_MERGE, &args);
596
if (ret == -1)
597
return -1;
598
599
return args.fence;
600
}
601
602
uint32_t
603
anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
604
{
605
struct drm_syncobj_create args = {
606
.flags = flags,
607
};
608
609
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
610
if (ret)
611
return 0;
612
613
return args.handle;
614
}
615
616
void
617
anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
618
{
619
struct drm_syncobj_destroy args = {
620
.handle = handle,
621
};
622
623
intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
624
}
625
626
int
627
anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
628
{
629
struct drm_syncobj_handle args = {
630
.handle = handle,
631
};
632
633
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
634
if (ret)
635
return -1;
636
637
return args.fd;
638
}
639
640
uint32_t
641
anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
642
{
643
struct drm_syncobj_handle args = {
644
.fd = fd,
645
};
646
647
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
648
if (ret)
649
return 0;
650
651
return args.handle;
652
}
653
654
int
655
anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
656
{
657
struct drm_syncobj_handle args = {
658
.handle = handle,
659
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
660
};
661
662
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
663
if (ret)
664
return -1;
665
666
return args.fd;
667
}
668
669
int
670
anv_gem_syncobj_import_sync_file(struct anv_device *device,
671
uint32_t handle, int fd)
672
{
673
struct drm_syncobj_handle args = {
674
.handle = handle,
675
.fd = fd,
676
.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
677
};
678
679
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
680
}
681
682
void
683
anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
684
{
685
struct drm_syncobj_array args = {
686
.handles = (uint64_t)(uintptr_t)&handle,
687
.count_handles = 1,
688
};
689
690
intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
691
}
692
693
bool
694
anv_gem_supports_syncobj_wait(int fd)
695
{
696
return intel_gem_supports_syncobj_wait(fd);
697
}
698
699
int
700
anv_gem_syncobj_wait(struct anv_device *device,
701
const uint32_t *handles, uint32_t num_handles,
702
int64_t abs_timeout_ns, bool wait_all)
703
{
704
struct drm_syncobj_wait args = {
705
.handles = (uint64_t)(uintptr_t)handles,
706
.count_handles = num_handles,
707
.timeout_nsec = abs_timeout_ns,
708
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
709
};
710
711
if (wait_all)
712
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
713
714
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
715
}
716
717
int
718
anv_gem_syncobj_timeline_wait(struct anv_device *device,
719
const uint32_t *handles, const uint64_t *points,
720
uint32_t num_items, int64_t abs_timeout_ns,
721
bool wait_all, bool wait_materialize)
722
{
723
assert(device->physical->has_syncobj_wait_available);
724
725
struct drm_syncobj_timeline_wait args = {
726
.handles = (uint64_t)(uintptr_t)handles,
727
.points = (uint64_t)(uintptr_t)points,
728
.count_handles = num_items,
729
.timeout_nsec = abs_timeout_ns,
730
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
731
};
732
733
if (wait_all)
734
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
735
if (wait_materialize)
736
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
737
738
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
739
}
740
741
int
742
anv_gem_syncobj_timeline_signal(struct anv_device *device,
743
const uint32_t *handles, const uint64_t *points,
744
uint32_t num_items)
745
{
746
assert(device->physical->has_syncobj_wait_available);
747
748
struct drm_syncobj_timeline_array args = {
749
.handles = (uint64_t)(uintptr_t)handles,
750
.points = (uint64_t)(uintptr_t)points,
751
.count_handles = num_items,
752
};
753
754
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
755
}
756
757
int
758
anv_gem_syncobj_timeline_query(struct anv_device *device,
759
const uint32_t *handles, uint64_t *points,
760
uint32_t num_items)
761
{
762
assert(device->physical->has_syncobj_wait_available);
763
764
struct drm_syncobj_timeline_array args = {
765
.handles = (uint64_t)(uintptr_t)handles,
766
.points = (uint64_t)(uintptr_t)points,
767
.count_handles = num_items,
768
};
769
770
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
771
}
772
773
struct drm_i915_query_engine_info *
774
anv_gem_get_engine_info(int fd)
775
{
776
return intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO);
777
}
778
779
int
780
anv_gem_count_engines(const struct drm_i915_query_engine_info *info,
781
uint16_t engine_class)
782
{
783
int count = 0;
784
for (int i = 0; i < info->num_engines; i++) {
785
if (info->engines[i].engine.engine_class == engine_class)
786
count++;
787
}
788
return count;
789
}
790
791