Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_fence.c
4565 views
1
/*
2
* Copyright © 2018 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included
12
* in all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
* DEALINGS IN THE SOFTWARE.
21
*/
22
23
/**
24
* @file iris_fence.c
25
*
26
* Fences for driver and IPC serialisation, scheduling and synchronisation.
27
*/
28
29
#include "drm-uapi/sync_file.h"
30
#include "util/u_debug.h"
31
#include "util/u_inlines.h"
32
#include "intel/common/intel_gem.h"
33
34
#include "iris_batch.h"
35
#include "iris_bufmgr.h"
36
#include "iris_context.h"
37
#include "iris_fence.h"
38
#include "iris_screen.h"
39
40
static uint32_t
41
gem_syncobj_create(int fd, uint32_t flags)
42
{
43
struct drm_syncobj_create args = {
44
.flags = flags,
45
};
46
47
intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48
49
return args.handle;
50
}
51
52
static void
53
gem_syncobj_destroy(int fd, uint32_t handle)
54
{
55
struct drm_syncobj_destroy args = {
56
.handle = handle,
57
};
58
59
intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60
}
61
62
/**
63
* Make a new sync-point.
64
*/
65
struct iris_syncobj *
66
iris_create_syncobj(struct iris_screen *screen)
67
{
68
struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
69
70
if (!syncobj)
71
return NULL;
72
73
syncobj->handle = gem_syncobj_create(screen->fd, 0);
74
assert(syncobj->handle);
75
76
pipe_reference_init(&syncobj->ref, 1);
77
78
return syncobj;
79
}
80
81
void
82
iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
83
{
84
gem_syncobj_destroy(screen->fd, syncobj->handle);
85
free(syncobj);
86
}
87
88
/**
89
* Add a sync-point to the batch, with the given flags.
90
*
91
* \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
92
*/
93
void
94
iris_batch_add_syncobj(struct iris_batch *batch,
95
struct iris_syncobj *syncobj,
96
unsigned flags)
97
{
98
struct drm_i915_gem_exec_fence *fence =
99
util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
100
101
*fence = (struct drm_i915_gem_exec_fence) {
102
.handle = syncobj->handle,
103
.flags = flags,
104
};
105
106
struct iris_syncobj **store =
107
util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
108
109
*store = NULL;
110
iris_syncobj_reference(batch->screen, store, syncobj);
111
}
112
113
/**
114
* Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
115
* and unreference any which have already passed.
116
*
117
* Sometimes the compute batch is seldom used, and accumulates references
118
* to stale render batches that are no longer of interest, so we can free
119
* those up.
120
*/
121
static void
122
clear_stale_syncobjs(struct iris_batch *batch)
123
{
124
struct iris_screen *screen = batch->screen;
125
126
int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
127
128
assert(n == util_dynarray_num_elements(&batch->exec_fences,
129
struct drm_i915_gem_exec_fence));
130
131
/* Skip the first syncobj, as it's the signalling one. */
132
for (int i = n - 1; i > 1; i--) {
133
struct iris_syncobj **syncobj =
134
util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
135
struct drm_i915_gem_exec_fence *fence =
136
util_dynarray_element(&batch->exec_fences,
137
struct drm_i915_gem_exec_fence, i);
138
assert(fence->flags & I915_EXEC_FENCE_WAIT);
139
140
if (iris_wait_syncobj(&screen->base, *syncobj, 0))
141
continue;
142
143
/* This sync object has already passed, there's no need to continue
144
* marking it as a dependency; we can stop holding on to the reference.
145
*/
146
iris_syncobj_reference(screen, syncobj, NULL);
147
148
/* Remove it from the lists; move the last element here. */
149
struct iris_syncobj **nth_syncobj =
150
util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
151
struct drm_i915_gem_exec_fence *nth_fence =
152
util_dynarray_pop_ptr(&batch->exec_fences,
153
struct drm_i915_gem_exec_fence);
154
155
if (syncobj != nth_syncobj) {
156
*syncobj = *nth_syncobj;
157
memcpy(fence, nth_fence, sizeof(*fence));
158
}
159
}
160
}
161
162
/* ------------------------------------------------------------------- */
163
164
struct pipe_fence_handle {
165
struct pipe_reference ref;
166
167
struct pipe_context *unflushed_ctx;
168
169
struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
170
};
171
172
static void
173
iris_fence_destroy(struct pipe_screen *p_screen,
174
struct pipe_fence_handle *fence)
175
{
176
struct iris_screen *screen = (struct iris_screen *)p_screen;
177
178
for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
179
iris_fine_fence_reference(screen, &fence->fine[i], NULL);
180
181
free(fence);
182
}
183
184
static void
185
iris_fence_reference(struct pipe_screen *p_screen,
186
struct pipe_fence_handle **dst,
187
struct pipe_fence_handle *src)
188
{
189
if (pipe_reference(*dst ? &(*dst)->ref : NULL,
190
src ? &src->ref : NULL))
191
iris_fence_destroy(p_screen, *dst);
192
193
*dst = src;
194
}
195
196
bool
197
iris_wait_syncobj(struct pipe_screen *p_screen,
198
struct iris_syncobj *syncobj,
199
int64_t timeout_nsec)
200
{
201
if (!syncobj)
202
return false;
203
204
struct iris_screen *screen = (struct iris_screen *)p_screen;
205
struct drm_syncobj_wait args = {
206
.handles = (uintptr_t)&syncobj->handle,
207
.count_handles = 1,
208
.timeout_nsec = timeout_nsec,
209
};
210
return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
211
}
212
213
#define CSI "\e["
214
#define BLUE_HEADER CSI "0;97;44m"
215
#define NORMAL CSI "0m"
216
217
static void
218
iris_fence_flush(struct pipe_context *ctx,
219
struct pipe_fence_handle **out_fence,
220
unsigned flags)
221
{
222
struct iris_screen *screen = (void *) ctx->screen;
223
struct iris_context *ice = (struct iris_context *)ctx;
224
225
/* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
226
* deferred flushes. Just ignore the request to defer on older kernels.
227
*/
228
if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
229
flags &= ~PIPE_FLUSH_DEFERRED;
230
231
const bool deferred = flags & PIPE_FLUSH_DEFERRED;
232
233
if (flags & PIPE_FLUSH_END_OF_FRAME) {
234
ice->frame++;
235
236
if (INTEL_DEBUG & DEBUG_SUBMIT) {
237
fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
238
(INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
239
ice->frame, ctx, ' ',
240
(INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
241
}
242
}
243
244
iris_flush_dirty_dmabufs(ice);
245
246
if (!deferred) {
247
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
248
iris_batch_flush(&ice->batches[i]);
249
}
250
251
if (flags & PIPE_FLUSH_END_OF_FRAME) {
252
iris_measure_frame_end(ice);
253
}
254
255
if (!out_fence)
256
return;
257
258
struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
259
if (!fence)
260
return;
261
262
pipe_reference_init(&fence->ref, 1);
263
264
if (deferred)
265
fence->unflushed_ctx = ctx;
266
267
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
268
struct iris_batch *batch = &ice->batches[b];
269
270
if (deferred && iris_batch_bytes_used(batch) > 0) {
271
struct iris_fine_fence *fine =
272
iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
273
iris_fine_fence_reference(screen, &fence->fine[b], fine);
274
iris_fine_fence_reference(screen, &fine, NULL);
275
} else {
276
/* This batch has no commands queued up (perhaps we just flushed,
277
* or all the commands are on the other batch). Wait for the last
278
* syncobj on this engine - unless it's already finished by now.
279
*/
280
if (iris_fine_fence_signaled(batch->last_fence))
281
continue;
282
283
iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
284
}
285
}
286
287
iris_fence_reference(ctx->screen, out_fence, NULL);
288
*out_fence = fence;
289
}
290
291
static void
292
iris_fence_await(struct pipe_context *ctx,
293
struct pipe_fence_handle *fence)
294
{
295
struct iris_context *ice = (struct iris_context *)ctx;
296
297
/* Unflushed fences from the same context are no-ops. */
298
if (ctx && ctx == fence->unflushed_ctx)
299
return;
300
301
/* XXX: We can't safely flush the other context, because it might be
302
* bound to another thread, and poking at its internals wouldn't
303
* be safe. In the future we should use MI_SEMAPHORE_WAIT and
304
* block until the other job has been submitted, relying on
305
* kernel timeslicing to preempt us until the other job is
306
* actually flushed and the seqno finally passes.
307
*/
308
if (fence->unflushed_ctx) {
309
pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
310
"glWaitSync on unflushed fence from another context "
311
"is unlikely to work without kernel 5.8+\n");
312
}
313
314
for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
315
struct iris_fine_fence *fine = fence->fine[i];
316
317
if (iris_fine_fence_signaled(fine))
318
continue;
319
320
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
321
struct iris_batch *batch = &ice->batches[b];
322
323
/* We're going to make any future work in this batch wait for our
324
* fence to have gone by. But any currently queued work doesn't
325
* need to wait. Flush the batch now, so it can happen sooner.
326
*/
327
iris_batch_flush(batch);
328
329
/* Before adding a new reference, clean out any stale ones. */
330
clear_stale_syncobjs(batch);
331
332
iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
333
}
334
}
335
}
336
337
#define NSEC_PER_SEC (1000 * USEC_PER_SEC)
338
#define USEC_PER_SEC (1000 * MSEC_PER_SEC)
339
#define MSEC_PER_SEC (1000)
340
341
static uint64_t
342
gettime_ns(void)
343
{
344
struct timespec current;
345
clock_gettime(CLOCK_MONOTONIC, &current);
346
return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
347
}
348
349
static uint64_t
350
rel2abs(uint64_t timeout)
351
{
352
if (timeout == 0)
353
return 0;
354
355
uint64_t current_time = gettime_ns();
356
uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
357
358
timeout = MIN2(max_timeout, timeout);
359
360
return current_time + timeout;
361
}
362
363
static bool
364
iris_fence_finish(struct pipe_screen *p_screen,
365
struct pipe_context *ctx,
366
struct pipe_fence_handle *fence,
367
uint64_t timeout)
368
{
369
ctx = threaded_context_unwrap_sync(ctx);
370
371
struct iris_context *ice = (struct iris_context *)ctx;
372
struct iris_screen *screen = (struct iris_screen *)p_screen;
373
374
/* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
375
* flushed yet. Check if our syncobj is the current batch's signalling
376
* syncobj - if so, we haven't flushed and need to now.
377
*
378
* The Gallium docs mention that a flush will occur if \p ctx matches
379
* the context the fence was created with. It may be NULL, so we check
380
* that it matches first.
381
*/
382
if (ctx && ctx == fence->unflushed_ctx) {
383
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
384
struct iris_fine_fence *fine = fence->fine[i];
385
386
if (iris_fine_fence_signaled(fine))
387
continue;
388
389
if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
390
iris_batch_flush(&ice->batches[i]);
391
}
392
393
/* The fence is no longer deferred. */
394
fence->unflushed_ctx = NULL;
395
}
396
397
unsigned int handle_count = 0;
398
uint32_t handles[ARRAY_SIZE(fence->fine)];
399
for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
400
struct iris_fine_fence *fine = fence->fine[i];
401
402
if (iris_fine_fence_signaled(fine))
403
continue;
404
405
handles[handle_count++] = fine->syncobj->handle;
406
}
407
408
if (handle_count == 0)
409
return true;
410
411
struct drm_syncobj_wait args = {
412
.handles = (uintptr_t)handles,
413
.count_handles = handle_count,
414
.timeout_nsec = rel2abs(timeout),
415
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
416
};
417
418
if (fence->unflushed_ctx) {
419
/* This fence had a deferred flush from another context. We can't
420
* safely flush it here, because the context might be bound to a
421
* different thread, and poking at its internals wouldn't be safe.
422
*
423
* Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
424
* another thread submits the work.
425
*/
426
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
427
}
428
429
return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
430
}
431
432
static int
433
sync_merge_fd(int sync_fd, int new_fd)
434
{
435
if (sync_fd == -1)
436
return new_fd;
437
438
if (new_fd == -1)
439
return sync_fd;
440
441
struct sync_merge_data args = {
442
.name = "iris fence",
443
.fd2 = new_fd,
444
.fence = -1,
445
};
446
447
intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
448
close(new_fd);
449
close(sync_fd);
450
451
return args.fence;
452
}
453
454
static int
455
iris_fence_get_fd(struct pipe_screen *p_screen,
456
struct pipe_fence_handle *fence)
457
{
458
struct iris_screen *screen = (struct iris_screen *)p_screen;
459
int fd = -1;
460
461
/* Deferred fences aren't supported. */
462
if (fence->unflushed_ctx)
463
return -1;
464
465
for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
466
struct iris_fine_fence *fine = fence->fine[i];
467
468
if (iris_fine_fence_signaled(fine))
469
continue;
470
471
struct drm_syncobj_handle args = {
472
.handle = fine->syncobj->handle,
473
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
474
.fd = -1,
475
};
476
477
intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
478
fd = sync_merge_fd(fd, args.fd);
479
}
480
481
if (fd == -1) {
482
/* Our fence has no syncobj's recorded. This means that all of the
483
* batches had already completed, their syncobj's had been signalled,
484
* and so we didn't bother to record them. But we're being asked to
485
* export such a fence. So export a dummy already-signalled syncobj.
486
*/
487
struct drm_syncobj_handle args = {
488
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
489
};
490
491
args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
492
intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
493
gem_syncobj_destroy(screen->fd, args.handle);
494
return args.fd;
495
}
496
497
return fd;
498
}
499
500
static void
501
iris_fence_create_fd(struct pipe_context *ctx,
502
struct pipe_fence_handle **out,
503
int fd,
504
enum pipe_fd_type type)
505
{
506
assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);
507
508
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
509
struct drm_syncobj_handle args = {
510
.fd = fd,
511
};
512
513
if (type == PIPE_FD_TYPE_NATIVE_SYNC) {
514
args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
515
args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
516
}
517
518
if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
519
fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
520
strerror(errno));
521
if (type == PIPE_FD_TYPE_NATIVE_SYNC)
522
gem_syncobj_destroy(screen->fd, args.handle);
523
*out = NULL;
524
return;
525
}
526
527
struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
528
if (!syncobj) {
529
*out = NULL;
530
return;
531
}
532
syncobj->handle = args.handle;
533
pipe_reference_init(&syncobj->ref, 1);
534
535
struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
536
if (!fine) {
537
free(syncobj);
538
*out = NULL;
539
return;
540
}
541
542
static const uint32_t zero = 0;
543
544
/* Fences work in terms of iris_fine_fence, but we don't actually have a
545
* seqno for an imported fence. So, create a fake one which always
546
* returns as 'not signaled' so we fall back to using the sync object.
547
*/
548
fine->seqno = UINT32_MAX;
549
fine->map = &zero;
550
fine->syncobj = syncobj;
551
fine->flags = IRIS_FENCE_END;
552
pipe_reference_init(&fine->reference, 1);
553
554
struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
555
if (!fence) {
556
free(fine);
557
free(syncobj);
558
*out = NULL;
559
return;
560
}
561
pipe_reference_init(&fence->ref, 1);
562
fence->fine[0] = fine;
563
564
*out = fence;
565
}
566
567
static void
568
iris_fence_signal(struct pipe_context *ctx,
569
struct pipe_fence_handle *fence)
570
{
571
struct iris_context *ice = (struct iris_context *)ctx;
572
573
if (ctx == fence->unflushed_ctx)
574
return;
575
576
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
577
for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
578
struct iris_fine_fence *fine = fence->fine[i];
579
580
/* already signaled fence skipped */
581
if (iris_fine_fence_signaled(fine))
582
continue;
583
584
ice->batches[b].contains_fence_signal = true;
585
iris_batch_add_syncobj(&ice->batches[b], fine->syncobj,
586
I915_EXEC_FENCE_SIGNAL);
587
}
588
}
589
}
590
591
void
592
iris_init_screen_fence_functions(struct pipe_screen *screen)
593
{
594
screen->fence_reference = iris_fence_reference;
595
screen->fence_finish = iris_fence_finish;
596
screen->fence_get_fd = iris_fence_get_fd;
597
}
598
599
void
600
iris_init_context_fence_functions(struct pipe_context *ctx)
601
{
602
ctx->flush = iris_fence_flush;
603
ctx->create_fence_fd = iris_fence_create_fd;
604
ctx->fence_server_sync = iris_fence_await;
605
ctx->fence_server_signal = iris_fence_signal;
606
}
607
608