Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/loader/loader_dri3_helper.c
4550 views
1
/*
2
* Copyright © 2013 Keith Packard
3
* Copyright © 2015 Boyan Ding
4
*
5
* Permission to use, copy, modify, distribute, and sell this software and its
6
* documentation for any purpose is hereby granted without fee, provided that
7
* the above copyright notice appear in all copies and that both that copyright
8
* notice and this permission notice appear in supporting documentation, and
9
* that the name of the copyright holders not be used in advertising or
10
* publicity pertaining to distribution of the software without specific,
11
* written prior permission. The copyright holders make no representations
12
* about the suitability of this software for any purpose. It is provided "as
13
* is" without express or implied warranty.
14
*
15
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21
* OF THIS SOFTWARE.
22
*/
23
24
#include <fcntl.h>
25
#include <stdlib.h>
26
#include <unistd.h>
27
#include <string.h>
28
29
#include <X11/xshmfence.h>
30
#include <xcb/xcb.h>
31
#include <xcb/dri3.h>
32
#include <xcb/present.h>
33
#include <xcb/xfixes.h>
34
35
#include <X11/Xlib-xcb.h>
36
37
#include "loader_dri_helper.h"
38
#include "loader_dri3_helper.h"
39
#include "util/macros.h"
40
#include "drm-uapi/drm_fourcc.h"
41
42
/* From driconf.h, user exposed so should be stable */
43
#define DRI_CONF_VBLANK_NEVER 0
44
#define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
45
#define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
46
#define DRI_CONF_VBLANK_ALWAYS_SYNC 3
47
48
/**
49
* A cached blit context.
50
*/
51
struct loader_dri3_blit_context {
52
mtx_t mtx;
53
__DRIcontext *ctx;
54
__DRIscreen *cur_screen;
55
const __DRIcoreExtension *core;
56
};
57
58
/* For simplicity we maintain the cache only for a single screen at a time */
59
static struct loader_dri3_blit_context blit_context = {
60
_MTX_INITIALIZER_NP, NULL
61
};
62
63
static void
64
dri3_flush_present_events(struct loader_dri3_drawable *draw);
65
66
static struct loader_dri3_buffer *
67
dri3_find_back_alloc(struct loader_dri3_drawable *draw);
68
69
static xcb_screen_t *
70
get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
71
{
72
xcb_screen_iterator_t screen_iter =
73
xcb_setup_roots_iterator(xcb_get_setup(conn));
74
75
for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
76
if (screen_iter.data->root == root)
77
return screen_iter.data;
78
}
79
80
return NULL;
81
}
82
83
static xcb_visualtype_t *
84
get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
85
{
86
xcb_visualtype_iterator_t visual_iter;
87
xcb_screen_t *screen = draw->screen;
88
xcb_depth_iterator_t depth_iter;
89
90
if (!screen)
91
return NULL;
92
93
depth_iter = xcb_screen_allowed_depths_iterator(screen);
94
for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
95
if (depth_iter.data->depth != depth)
96
continue;
97
98
visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
99
if (visual_iter.rem)
100
return visual_iter.data;
101
}
102
103
return NULL;
104
}
105
106
/* Sets the adaptive sync window property state. */
107
static void
108
set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
109
uint32_t state)
110
{
111
static char const name[] = "_VARIABLE_REFRESH";
112
xcb_intern_atom_cookie_t cookie;
113
xcb_intern_atom_reply_t* reply;
114
xcb_void_cookie_t check;
115
116
cookie = xcb_intern_atom(conn, 0, strlen(name), name);
117
reply = xcb_intern_atom_reply(conn, cookie, NULL);
118
if (reply == NULL)
119
return;
120
121
if (state)
122
check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
123
drawable, reply->atom,
124
XCB_ATOM_CARDINAL, 32, 1, &state);
125
else
126
check = xcb_delete_property_checked(conn, drawable, reply->atom);
127
128
xcb_discard_reply(conn, check.sequence);
129
free(reply);
130
}
131
132
/* Get red channel mask for given drawable at given depth. */
133
static unsigned int
134
dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
135
{
136
xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
137
138
if (visual)
139
return visual->red_mask;
140
141
return 0;
142
}
143
144
/**
145
* Do we have blit functionality in the image blit extension?
146
*
147
* \param draw[in] The drawable intended to blit from / to.
148
* \return true if we have blit functionality. false otherwise.
149
*/
150
static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
151
{
152
return draw->ext->image->base.version >= 9 &&
153
draw->ext->image->blitImage != NULL;
154
}
155
156
/**
157
* Get and lock (for use with the current thread) a dri context associated
158
* with the drawable's dri screen. The context is intended to be used with
159
* the dri image extension's blitImage method.
160
*
161
* \param draw[in] Pointer to the drawable whose dri screen we want a
162
* dri context for.
163
* \return A dri context or NULL if context creation failed.
164
*
165
* When the caller is done with the context (even if the context returned was
166
* NULL), the caller must call loader_dri3_blit_context_put.
167
*/
168
static __DRIcontext *
169
loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
170
{
171
mtx_lock(&blit_context.mtx);
172
173
if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
174
blit_context.core->destroyContext(blit_context.ctx);
175
blit_context.ctx = NULL;
176
}
177
178
if (!blit_context.ctx) {
179
blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
180
NULL, NULL, NULL);
181
blit_context.cur_screen = draw->dri_screen;
182
blit_context.core = draw->ext->core;
183
}
184
185
return blit_context.ctx;
186
}
187
188
/**
189
* Release (for use with other threads) a dri context previously obtained using
190
* loader_dri3_blit_context_get.
191
*/
192
static void
193
loader_dri3_blit_context_put(void)
194
{
195
mtx_unlock(&blit_context.mtx);
196
}
197
198
/**
199
* Blit (parts of) the contents of a DRI image to another dri image
200
*
201
* \param draw[in] The drawable which owns the images.
202
* \param dst[in] The destination image.
203
* \param src[in] The source image.
204
* \param dstx0[in] Start destination coordinate.
205
* \param dsty0[in] Start destination coordinate.
206
* \param width[in] Blit width.
207
* \param height[in] Blit height.
208
* \param srcx0[in] Start source coordinate.
209
* \param srcy0[in] Start source coordinate.
210
* \param flush_flag[in] Image blit flush flag.
211
* \return true iff successful.
212
*/
213
static bool
214
loader_dri3_blit_image(struct loader_dri3_drawable *draw,
215
__DRIimage *dst, __DRIimage *src,
216
int dstx0, int dsty0, int width, int height,
217
int srcx0, int srcy0, int flush_flag)
218
{
219
__DRIcontext *dri_context;
220
bool use_blit_context = false;
221
222
if (!loader_dri3_have_image_blit(draw))
223
return false;
224
225
dri_context = draw->vtable->get_dri_context(draw);
226
227
if (!dri_context || !draw->vtable->in_current_context(draw)) {
228
dri_context = loader_dri3_blit_context_get(draw);
229
use_blit_context = true;
230
flush_flag |= __BLIT_FLAG_FLUSH;
231
}
232
233
if (dri_context)
234
draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
235
width, height, srcx0, srcy0,
236
width, height, flush_flag);
237
238
if (use_blit_context)
239
loader_dri3_blit_context_put();
240
241
return dri_context != NULL;
242
}
243
244
static inline void
245
dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
246
{
247
xshmfence_reset(buffer->shm_fence);
248
}
249
250
static inline void
251
dri3_fence_set(struct loader_dri3_buffer *buffer)
252
{
253
xshmfence_trigger(buffer->shm_fence);
254
}
255
256
static inline void
257
dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
258
{
259
xcb_sync_trigger_fence(c, buffer->sync_fence);
260
}
261
262
static inline void
263
dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
264
struct loader_dri3_buffer *buffer)
265
{
266
xcb_flush(c);
267
xshmfence_await(buffer->shm_fence);
268
if (draw) {
269
mtx_lock(&draw->mtx);
270
dri3_flush_present_events(draw);
271
mtx_unlock(&draw->mtx);
272
}
273
}
274
275
static void
276
dri3_update_max_num_back(struct loader_dri3_drawable *draw)
277
{
278
switch (draw->last_present_mode) {
279
case XCB_PRESENT_COMPLETE_MODE_FLIP: {
280
int new_max;
281
282
if (draw->swap_interval == 0)
283
new_max = 4;
284
else
285
new_max = 3;
286
287
assert(new_max <= LOADER_DRI3_MAX_BACK);
288
289
if (new_max != draw->max_num_back) {
290
/* On transition from swap interval == 0 to != 0, start with two
291
* buffers again. Otherwise keep the current number of buffers. Either
292
* way, more will be allocated if needed.
293
*/
294
if (new_max < draw->max_num_back)
295
draw->cur_num_back = 2;
296
297
draw->max_num_back = new_max;
298
}
299
300
break;
301
}
302
303
case XCB_PRESENT_COMPLETE_MODE_SKIP:
304
break;
305
306
default:
307
/* On transition from flips to copies, start with a single buffer again,
308
* a second one will be allocated if needed
309
*/
310
if (draw->max_num_back != 2)
311
draw->cur_num_back = 1;
312
313
draw->max_num_back = 2;
314
}
315
}
316
317
void
318
loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
319
{
320
draw->swap_interval = interval;
321
}
322
323
/** dri3_free_render_buffer
324
*
325
* Free everything associated with one render buffer including pixmap, fence
326
* stuff and the driver image
327
*/
328
static void
329
dri3_free_render_buffer(struct loader_dri3_drawable *draw,
330
struct loader_dri3_buffer *buffer)
331
{
332
if (buffer->own_pixmap)
333
xcb_free_pixmap(draw->conn, buffer->pixmap);
334
xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
335
xshmfence_unmap_shm(buffer->shm_fence);
336
draw->ext->image->destroyImage(buffer->image);
337
if (buffer->linear_buffer)
338
draw->ext->image->destroyImage(buffer->linear_buffer);
339
free(buffer);
340
}
341
342
void
343
loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
344
{
345
int i;
346
347
draw->ext->core->destroyDrawable(draw->dri_drawable);
348
349
for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) {
350
if (draw->buffers[i])
351
dri3_free_render_buffer(draw, draw->buffers[i]);
352
}
353
354
if (draw->special_event) {
355
xcb_void_cookie_t cookie =
356
xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
357
XCB_PRESENT_EVENT_MASK_NO_EVENT);
358
359
xcb_discard_reply(draw->conn, cookie.sequence);
360
xcb_unregister_for_special_event(draw->conn, draw->special_event);
361
}
362
363
if (draw->region)
364
xcb_xfixes_destroy_region(draw->conn, draw->region);
365
366
cnd_destroy(&draw->event_cnd);
367
mtx_destroy(&draw->mtx);
368
}
369
370
int
371
loader_dri3_drawable_init(xcb_connection_t *conn,
372
xcb_drawable_t drawable,
373
__DRIscreen *dri_screen,
374
bool is_different_gpu,
375
bool multiplanes_available,
376
const __DRIconfig *dri_config,
377
struct loader_dri3_extensions *ext,
378
const struct loader_dri3_vtable *vtable,
379
struct loader_dri3_drawable *draw)
380
{
381
xcb_get_geometry_cookie_t cookie;
382
xcb_get_geometry_reply_t *reply;
383
xcb_generic_error_t *error;
384
GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
385
int swap_interval;
386
387
draw->conn = conn;
388
draw->ext = ext;
389
draw->vtable = vtable;
390
draw->drawable = drawable;
391
draw->region = 0;
392
draw->dri_screen = dri_screen;
393
draw->is_different_gpu = is_different_gpu;
394
draw->multiplanes_available = multiplanes_available;
395
396
draw->have_back = 0;
397
draw->have_fake_front = 0;
398
draw->first_init = true;
399
draw->adaptive_sync = false;
400
draw->adaptive_sync_active = false;
401
402
draw->cur_blit_source = -1;
403
draw->back_format = __DRI_IMAGE_FORMAT_NONE;
404
mtx_init(&draw->mtx, mtx_plain);
405
cnd_init(&draw->event_cnd);
406
407
if (draw->ext->config) {
408
unsigned char adaptive_sync = 0;
409
410
draw->ext->config->configQueryi(draw->dri_screen,
411
"vblank_mode", &vblank_mode);
412
413
draw->ext->config->configQueryb(draw->dri_screen,
414
"adaptive_sync",
415
&adaptive_sync);
416
417
draw->adaptive_sync = adaptive_sync;
418
}
419
420
if (!draw->adaptive_sync)
421
set_adaptive_sync_property(conn, draw->drawable, false);
422
423
switch (vblank_mode) {
424
case DRI_CONF_VBLANK_NEVER:
425
case DRI_CONF_VBLANK_DEF_INTERVAL_0:
426
swap_interval = 0;
427
break;
428
case DRI_CONF_VBLANK_DEF_INTERVAL_1:
429
case DRI_CONF_VBLANK_ALWAYS_SYNC:
430
default:
431
swap_interval = 1;
432
break;
433
}
434
draw->swap_interval = swap_interval;
435
436
dri3_update_max_num_back(draw);
437
438
/* Create a new drawable */
439
draw->dri_drawable =
440
draw->ext->image_driver->createNewDrawable(dri_screen,
441
dri_config,
442
draw);
443
444
if (!draw->dri_drawable)
445
return 1;
446
447
cookie = xcb_get_geometry(draw->conn, draw->drawable);
448
reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
449
if (reply == NULL || error != NULL) {
450
draw->ext->core->destroyDrawable(draw->dri_drawable);
451
return 1;
452
}
453
454
draw->screen = get_screen_for_root(draw->conn, reply->root);
455
draw->width = reply->width;
456
draw->height = reply->height;
457
draw->depth = reply->depth;
458
draw->vtable->set_drawable_size(draw, draw->width, draw->height);
459
free(reply);
460
461
draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
462
if (draw->ext->core->base.version >= 2) {
463
(void )draw->ext->core->getConfigAttrib(dri_config,
464
__DRI_ATTRIB_SWAP_METHOD,
465
&draw->swap_method);
466
}
467
468
/*
469
* Make sure server has the same swap interval we do for the new
470
* drawable.
471
*/
472
loader_dri3_set_swap_interval(draw, swap_interval);
473
474
return 0;
475
}
476
477
/*
478
* Process one Present event
479
*/
480
static void
481
dri3_handle_present_event(struct loader_dri3_drawable *draw,
482
xcb_present_generic_event_t *ge)
483
{
484
switch (ge->evtype) {
485
case XCB_PRESENT_CONFIGURE_NOTIFY: {
486
xcb_present_configure_notify_event_t *ce = (void *) ge;
487
488
draw->width = ce->width;
489
draw->height = ce->height;
490
draw->vtable->set_drawable_size(draw, draw->width, draw->height);
491
draw->ext->flush->invalidate(draw->dri_drawable);
492
break;
493
}
494
case XCB_PRESENT_COMPLETE_NOTIFY: {
495
xcb_present_complete_notify_event_t *ce = (void *) ge;
496
497
/* Compute the processed SBC number from the received 32-bit serial number
498
* merged with the upper 32-bits of the sent 64-bit serial number while
499
* checking for wrap.
500
*/
501
if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
502
uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
503
504
/* Only assume wraparound if that results in exactly the previous
505
* SBC + 1, otherwise ignore received SBC > sent SBC (those are
506
* probably from a previous loader_dri3_drawable instance) to avoid
507
* calculating bogus target MSC values in loader_dri3_swap_buffers_msc
508
*/
509
if (recv_sbc <= draw->send_sbc)
510
draw->recv_sbc = recv_sbc;
511
else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
512
draw->recv_sbc = recv_sbc - 0x100000000ULL;
513
514
/* When moving from flip to copy, we assume that we can allocate in
515
* a more optimal way if we don't need to cater for the display
516
* controller.
517
*/
518
if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
519
draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
520
for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
521
if (draw->buffers[b])
522
draw->buffers[b]->reallocate = true;
523
}
524
}
525
526
/* If the server tells us that our allocation is suboptimal, we
527
* reallocate once.
528
*/
529
#ifdef HAVE_DRI3_MODIFIERS
530
if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
531
draw->last_present_mode != ce->mode) {
532
for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
533
if (draw->buffers[b])
534
draw->buffers[b]->reallocate = true;
535
}
536
}
537
#endif
538
draw->last_present_mode = ce->mode;
539
540
if (draw->vtable->show_fps)
541
draw->vtable->show_fps(draw, ce->ust);
542
543
draw->ust = ce->ust;
544
draw->msc = ce->msc;
545
} else if (ce->serial == draw->eid) {
546
draw->notify_ust = ce->ust;
547
draw->notify_msc = ce->msc;
548
}
549
break;
550
}
551
case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
552
xcb_present_idle_notify_event_t *ie = (void *) ge;
553
int b;
554
555
for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
556
struct loader_dri3_buffer *buf = draw->buffers[b];
557
558
if (buf && buf->pixmap == ie->pixmap)
559
buf->busy = 0;
560
}
561
break;
562
}
563
}
564
free(ge);
565
}
566
567
static bool
568
dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
569
unsigned *full_sequence)
570
{
571
xcb_generic_event_t *ev;
572
xcb_present_generic_event_t *ge;
573
574
xcb_flush(draw->conn);
575
576
/* Only have one thread waiting for events at a time */
577
if (draw->has_event_waiter) {
578
cnd_wait(&draw->event_cnd, &draw->mtx);
579
if (full_sequence)
580
*full_sequence = draw->last_special_event_sequence;
581
/* Another thread has updated the protected info, so retest. */
582
return true;
583
} else {
584
draw->has_event_waiter = true;
585
/* Allow other threads access to the drawable while we're waiting. */
586
mtx_unlock(&draw->mtx);
587
ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
588
mtx_lock(&draw->mtx);
589
draw->has_event_waiter = false;
590
cnd_broadcast(&draw->event_cnd);
591
}
592
if (!ev)
593
return false;
594
draw->last_special_event_sequence = ev->full_sequence;
595
if (full_sequence)
596
*full_sequence = ev->full_sequence;
597
ge = (void *) ev;
598
dri3_handle_present_event(draw, ge);
599
return true;
600
}
601
602
/** loader_dri3_wait_for_msc
603
*
604
* Get the X server to send an event when the target msc/divisor/remainder is
605
* reached.
606
*/
607
bool
608
loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
609
int64_t target_msc,
610
int64_t divisor, int64_t remainder,
611
int64_t *ust, int64_t *msc, int64_t *sbc)
612
{
613
xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
614
draw->drawable,
615
draw->eid,
616
target_msc,
617
divisor,
618
remainder);
619
unsigned full_sequence;
620
621
mtx_lock(&draw->mtx);
622
623
/* Wait for the event */
624
do {
625
if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
626
mtx_unlock(&draw->mtx);
627
return false;
628
}
629
} while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
630
631
*ust = draw->notify_ust;
632
*msc = draw->notify_msc;
633
*sbc = draw->recv_sbc;
634
mtx_unlock(&draw->mtx);
635
636
return true;
637
}
638
639
/** loader_dri3_wait_for_sbc
640
*
641
* Wait for the completed swap buffer count to reach the specified
642
* target. Presumably the application knows that this will be reached with
643
* outstanding complete events, or we're going to be here awhile.
644
*/
645
int
646
loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
647
int64_t target_sbc, int64_t *ust,
648
int64_t *msc, int64_t *sbc)
649
{
650
/* From the GLX_OML_sync_control spec:
651
*
652
* "If <target_sbc> = 0, the function will block until all previous
653
* swaps requested with glXSwapBuffersMscOML for that window have
654
* completed."
655
*/
656
mtx_lock(&draw->mtx);
657
if (!target_sbc)
658
target_sbc = draw->send_sbc;
659
660
while (draw->recv_sbc < target_sbc) {
661
if (!dri3_wait_for_event_locked(draw, NULL)) {
662
mtx_unlock(&draw->mtx);
663
return 0;
664
}
665
}
666
667
*ust = draw->ust;
668
*msc = draw->msc;
669
*sbc = draw->recv_sbc;
670
mtx_unlock(&draw->mtx);
671
return 1;
672
}
673
674
/** loader_dri3_find_back
675
*
676
* Find an idle back buffer. If there isn't one, then
677
* wait for a present idle notify event from the X server
678
*/
679
static int
680
dri3_find_back(struct loader_dri3_drawable *draw)
681
{
682
int b;
683
int num_to_consider;
684
int max_num;
685
686
mtx_lock(&draw->mtx);
687
/* Increase the likelyhood of reusing current buffer */
688
dri3_flush_present_events(draw);
689
690
/* Check whether we need to reuse the current back buffer as new back.
691
* In that case, wait until it's not busy anymore.
692
*/
693
if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
694
num_to_consider = 1;
695
max_num = 1;
696
draw->cur_blit_source = -1;
697
} else {
698
num_to_consider = draw->cur_num_back;
699
max_num = draw->max_num_back;
700
}
701
702
for (;;) {
703
for (b = 0; b < num_to_consider; b++) {
704
int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->cur_num_back);
705
struct loader_dri3_buffer *buffer = draw->buffers[id];
706
707
if (!buffer || !buffer->busy) {
708
draw->cur_back = id;
709
mtx_unlock(&draw->mtx);
710
return id;
711
}
712
}
713
714
if (num_to_consider < max_num) {
715
num_to_consider = ++draw->cur_num_back;
716
} else if (!dri3_wait_for_event_locked(draw, NULL)) {
717
mtx_unlock(&draw->mtx);
718
return -1;
719
}
720
}
721
}
722
723
static xcb_gcontext_t
724
dri3_drawable_gc(struct loader_dri3_drawable *draw)
725
{
726
if (!draw->gc) {
727
uint32_t v = 0;
728
xcb_create_gc(draw->conn,
729
(draw->gc = xcb_generate_id(draw->conn)),
730
draw->drawable,
731
XCB_GC_GRAPHICS_EXPOSURES,
732
&v);
733
}
734
return draw->gc;
735
}
736
737
738
static struct loader_dri3_buffer *
739
dri3_back_buffer(struct loader_dri3_drawable *draw)
740
{
741
return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
742
}
743
744
static struct loader_dri3_buffer *
745
dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
746
{
747
return draw->buffers[LOADER_DRI3_FRONT_ID];
748
}
749
750
static void
751
dri3_copy_area(xcb_connection_t *c,
752
xcb_drawable_t src_drawable,
753
xcb_drawable_t dst_drawable,
754
xcb_gcontext_t gc,
755
int16_t src_x,
756
int16_t src_y,
757
int16_t dst_x,
758
int16_t dst_y,
759
uint16_t width,
760
uint16_t height)
761
{
762
xcb_void_cookie_t cookie;
763
764
cookie = xcb_copy_area_checked(c,
765
src_drawable,
766
dst_drawable,
767
gc,
768
src_x,
769
src_y,
770
dst_x,
771
dst_y,
772
width,
773
height);
774
xcb_discard_reply(c, cookie.sequence);
775
}
776
777
/**
778
* Asks the driver to flush any queued work necessary for serializing with the
779
* X command stream, and optionally the slightly more strict requirement of
780
* glFlush() equivalence (which would require flushing even if nothing had
781
* been drawn to a window system framebuffer, for example).
782
*/
783
void
784
loader_dri3_flush(struct loader_dri3_drawable *draw,
785
unsigned flags,
786
enum __DRI2throttleReason throttle_reason)
787
{
788
/* NEED TO CHECK WHETHER CONTEXT IS NULL */
789
__DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
790
791
if (dri_context) {
792
draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
793
flags, throttle_reason);
794
}
795
}
796
797
void
798
loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
799
int x, int y,
800
int width, int height,
801
bool flush)
802
{
803
struct loader_dri3_buffer *back;
804
unsigned flags = __DRI2_FLUSH_DRAWABLE;
805
806
/* Check we have the right attachments */
807
if (!draw->have_back || draw->is_pixmap)
808
return;
809
810
if (flush)
811
flags |= __DRI2_FLUSH_CONTEXT;
812
loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
813
814
back = dri3_find_back_alloc(draw);
815
if (!back)
816
return;
817
818
y = draw->height - y - height;
819
820
if (draw->is_different_gpu) {
821
/* Update the linear buffer part of the back buffer
822
* for the dri3_copy_area operation
823
*/
824
(void) loader_dri3_blit_image(draw,
825
back->linear_buffer,
826
back->image,
827
0, 0, back->width, back->height,
828
0, 0, __BLIT_FLAG_FLUSH);
829
}
830
831
loader_dri3_swapbuffer_barrier(draw);
832
dri3_fence_reset(draw->conn, back);
833
dri3_copy_area(draw->conn,
834
back->pixmap,
835
draw->drawable,
836
dri3_drawable_gc(draw),
837
x, y, x, y, width, height);
838
dri3_fence_trigger(draw->conn, back);
839
/* Refresh the fake front (if present) after we just damaged the real
840
* front.
841
*/
842
if (draw->have_fake_front &&
843
!loader_dri3_blit_image(draw,
844
dri3_fake_front_buffer(draw)->image,
845
back->image,
846
x, y, width, height,
847
x, y, __BLIT_FLAG_FLUSH) &&
848
!draw->is_different_gpu) {
849
dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
850
dri3_copy_area(draw->conn,
851
back->pixmap,
852
dri3_fake_front_buffer(draw)->pixmap,
853
dri3_drawable_gc(draw),
854
x, y, x, y, width, height);
855
dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
856
dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw));
857
}
858
dri3_fence_await(draw->conn, draw, back);
859
}
860
861
void
862
loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
863
xcb_drawable_t dest,
864
xcb_drawable_t src)
865
{
866
loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
867
868
dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
869
dri3_copy_area(draw->conn,
870
src, dest,
871
dri3_drawable_gc(draw),
872
0, 0, 0, 0, draw->width, draw->height);
873
dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
874
dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw));
875
}
876
877
void
878
loader_dri3_wait_x(struct loader_dri3_drawable *draw)
879
{
880
struct loader_dri3_buffer *front;
881
882
if (draw == NULL || !draw->have_fake_front)
883
return;
884
885
front = dri3_fake_front_buffer(draw);
886
887
loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
888
889
/* In the psc->is_different_gpu case, the linear buffer has been updated,
890
* but not yet the tiled buffer.
891
* Copy back to the tiled buffer we use for rendering.
892
* Note that we don't need flushing.
893
*/
894
if (draw->is_different_gpu)
895
(void) loader_dri3_blit_image(draw,
896
front->image,
897
front->linear_buffer,
898
0, 0, front->width, front->height,
899
0, 0, 0);
900
}
901
902
void
903
loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
904
{
905
struct loader_dri3_buffer *front;
906
907
if (draw == NULL || !draw->have_fake_front)
908
return;
909
910
front = dri3_fake_front_buffer(draw);
911
912
/* In the psc->is_different_gpu case, we update the linear_buffer
913
* before updating the real front.
914
*/
915
if (draw->is_different_gpu)
916
(void) loader_dri3_blit_image(draw,
917
front->linear_buffer,
918
front->image,
919
0, 0, front->width, front->height,
920
0, 0, __BLIT_FLAG_FLUSH);
921
loader_dri3_swapbuffer_barrier(draw);
922
loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
923
}
924
925
/** dri3_flush_present_events
926
*
927
* Process any present events that have been received from the X server
928
*/
929
static void
930
dri3_flush_present_events(struct loader_dri3_drawable *draw)
931
{
932
/* Check to see if any configuration changes have occurred
933
* since we were last invoked
934
*/
935
if (draw->has_event_waiter)
936
return;
937
938
if (draw->special_event) {
939
xcb_generic_event_t *ev;
940
941
while ((ev = xcb_poll_for_special_event(draw->conn,
942
draw->special_event)) != NULL) {
943
xcb_present_generic_event_t *ge = (void *) ev;
944
dri3_handle_present_event(draw, ge);
945
}
946
}
947
}
948
949
/** loader_dri3_swap_buffers_msc
950
*
951
* Make the current back buffer visible using the present extension
952
*/
953
int64_t
954
loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
955
int64_t target_msc, int64_t divisor,
956
int64_t remainder, unsigned flush_flags,
957
const int *rects, int n_rects,
958
bool force_copy)
959
{
960
struct loader_dri3_buffer *back;
961
int64_t ret = 0;
962
uint32_t options = XCB_PRESENT_OPTION_NONE;
963
964
draw->vtable->flush_drawable(draw, flush_flags);
965
966
back = dri3_find_back_alloc(draw);
967
968
mtx_lock(&draw->mtx);
969
970
if (draw->adaptive_sync && !draw->adaptive_sync_active) {
971
set_adaptive_sync_property(draw->conn, draw->drawable, true);
972
draw->adaptive_sync_active = true;
973
}
974
975
if (draw->is_different_gpu && back) {
976
/* Update the linear buffer before presenting the pixmap */
977
(void) loader_dri3_blit_image(draw,
978
back->linear_buffer,
979
back->image,
980
0, 0, back->width, back->height,
981
0, 0, __BLIT_FLAG_FLUSH);
982
}
983
984
/* If we need to preload the new back buffer, remember the source.
985
* The force_copy parameter is used by EGL to attempt to preserve
986
* the back buffer across a call to this function.
987
*/
988
if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy)
989
draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
990
991
/* Exchange the back and fake front. Even though the server knows about these
992
* buffers, it has no notion of back and fake front.
993
*/
994
if (back && draw->have_fake_front) {
995
struct loader_dri3_buffer *tmp;
996
997
tmp = dri3_fake_front_buffer(draw);
998
draw->buffers[LOADER_DRI3_FRONT_ID] = back;
999
draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1000
1001
if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy)
1002
draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1003
}
1004
1005
dri3_flush_present_events(draw);
1006
1007
if (back && !draw->is_pixmap) {
1008
dri3_fence_reset(draw->conn, back);
1009
1010
/* Compute when we want the frame shown by taking the last known
1011
* successful MSC and adding in a swap interval for each outstanding swap
1012
* request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1013
* semantic"
1014
*/
1015
++draw->send_sbc;
1016
if (target_msc == 0 && divisor == 0 && remainder == 0)
1017
target_msc = draw->msc + abs(draw->swap_interval) *
1018
(draw->send_sbc - draw->recv_sbc);
1019
else if (divisor == 0 && remainder > 0) {
1020
/* From the GLX_OML_sync_control spec:
1021
* "If <divisor> = 0, the swap will occur when MSC becomes
1022
* greater than or equal to <target_msc>."
1023
*
1024
* Note that there's no mention of the remainder. The Present
1025
* extension throws BadValue for remainder != 0 with divisor == 0, so
1026
* just drop the passed in value.
1027
*/
1028
remainder = 0;
1029
}
1030
1031
/* From the GLX_EXT_swap_control spec
1032
* and the EGL 1.4 spec (page 53):
1033
*
1034
* "If <interval> is set to a value of 0, buffer swaps are not
1035
* synchronized to a video frame."
1036
*
1037
* From GLX_EXT_swap_control_tear:
1038
*
1039
* "If <interval> is negative, the minimum number of video frames
1040
* between buffer swaps is the absolute value of <interval>. In this
1041
* case, if abs(<interval>) video frames have already passed from
1042
* the previous swap when the swap is ready to be performed, the
1043
* swap will occur without synchronization to a video frame."
1044
*
1045
* Implementation note: It is possible to enable triple buffering
1046
* behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1047
* the default.
1048
*/
1049
if (draw->swap_interval <= 0)
1050
options |= XCB_PRESENT_OPTION_ASYNC;
1051
1052
/* If we need to populate the new back, but need to reuse the back
1053
* buffer slot due to lack of local blit capabilities, make sure
1054
* the server doesn't flip and we deadlock.
1055
*/
1056
if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1057
options |= XCB_PRESENT_OPTION_COPY;
1058
#ifdef HAVE_DRI3_MODIFIERS
1059
if (draw->multiplanes_available)
1060
options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1061
#endif
1062
back->busy = 1;
1063
back->last_swap = draw->send_sbc;
1064
1065
if (!draw->region) {
1066
draw->region = xcb_generate_id(draw->conn);
1067
xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1068
}
1069
1070
xcb_xfixes_region_t region = 0;
1071
xcb_rectangle_t xcb_rects[64];
1072
1073
if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1074
for (int i = 0; i < n_rects; i++) {
1075
const int *rect = &rects[i * 4];
1076
xcb_rects[i].x = rect[0];
1077
xcb_rects[i].y = draw->height - rect[1] - rect[3];
1078
xcb_rects[i].width = rect[2];
1079
xcb_rects[i].height = rect[3];
1080
}
1081
1082
region = draw->region;
1083
xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1084
}
1085
1086
xcb_present_pixmap(draw->conn,
1087
draw->drawable,
1088
back->pixmap,
1089
(uint32_t) draw->send_sbc,
1090
0, /* valid */
1091
region, /* update */
1092
0, /* x_off */
1093
0, /* y_off */
1094
None, /* target_crtc */
1095
None,
1096
back->sync_fence,
1097
options,
1098
target_msc,
1099
divisor,
1100
remainder, 0, NULL);
1101
ret = (int64_t) draw->send_sbc;
1102
1103
/* Schedule a server-side back-preserving blit if necessary.
1104
* This happens iff all conditions below are satisfied:
1105
* a) We have a fake front,
1106
* b) We need to preserve the back buffer,
1107
* c) We don't have local blit capabilities.
1108
*/
1109
if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1110
draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1111
struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1112
struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1113
1114
dri3_fence_reset(draw->conn, new_back);
1115
dri3_copy_area(draw->conn, src->pixmap,
1116
new_back->pixmap,
1117
dri3_drawable_gc(draw),
1118
0, 0, 0, 0, draw->width, draw->height);
1119
dri3_fence_trigger(draw->conn, new_back);
1120
new_back->last_swap = src->last_swap;
1121
}
1122
1123
xcb_flush(draw->conn);
1124
if (draw->stamp)
1125
++(*draw->stamp);
1126
}
1127
mtx_unlock(&draw->mtx);
1128
1129
draw->ext->flush->invalidate(draw->dri_drawable);
1130
1131
return ret;
1132
}
1133
1134
int
1135
loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1136
{
1137
struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1138
int ret;
1139
1140
mtx_lock(&draw->mtx);
1141
ret = (!back || back->last_swap == 0) ? 0 :
1142
draw->send_sbc - back->last_swap + 1;
1143
mtx_unlock(&draw->mtx);
1144
1145
return ret;
1146
}
1147
1148
/** loader_dri3_open
1149
*
1150
* Wrapper around xcb_dri3_open
1151
*/
1152
int
1153
loader_dri3_open(xcb_connection_t *conn,
1154
xcb_window_t root,
1155
uint32_t provider)
1156
{
1157
xcb_dri3_open_cookie_t cookie;
1158
xcb_dri3_open_reply_t *reply;
1159
xcb_xfixes_query_version_cookie_t fixes_cookie;
1160
xcb_xfixes_query_version_reply_t *fixes_reply;
1161
int fd;
1162
1163
cookie = xcb_dri3_open(conn,
1164
root,
1165
provider);
1166
1167
reply = xcb_dri3_open_reply(conn, cookie, NULL);
1168
if (!reply)
1169
return -1;
1170
1171
if (reply->nfd != 1) {
1172
free(reply);
1173
return -1;
1174
}
1175
1176
fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1177
free(reply);
1178
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1179
1180
/* let the server know our xfixes level */
1181
fixes_cookie = xcb_xfixes_query_version(conn,
1182
XCB_XFIXES_MAJOR_VERSION,
1183
XCB_XFIXES_MINOR_VERSION);
1184
fixes_reply = xcb_xfixes_query_version_reply(conn, fixes_cookie, NULL);
1185
free(fixes_reply);
1186
1187
return fd;
1188
}
1189
1190
static uint32_t
1191
dri3_cpp_for_format(uint32_t format) {
1192
switch (format) {
1193
case __DRI_IMAGE_FORMAT_R8:
1194
return 1;
1195
case __DRI_IMAGE_FORMAT_RGB565:
1196
case __DRI_IMAGE_FORMAT_GR88:
1197
return 2;
1198
case __DRI_IMAGE_FORMAT_XRGB8888:
1199
case __DRI_IMAGE_FORMAT_ARGB8888:
1200
case __DRI_IMAGE_FORMAT_ABGR8888:
1201
case __DRI_IMAGE_FORMAT_XBGR8888:
1202
case __DRI_IMAGE_FORMAT_XRGB2101010:
1203
case __DRI_IMAGE_FORMAT_ARGB2101010:
1204
case __DRI_IMAGE_FORMAT_XBGR2101010:
1205
case __DRI_IMAGE_FORMAT_ABGR2101010:
1206
case __DRI_IMAGE_FORMAT_SARGB8:
1207
case __DRI_IMAGE_FORMAT_SABGR8:
1208
case __DRI_IMAGE_FORMAT_SXRGB8:
1209
return 4;
1210
case __DRI_IMAGE_FORMAT_XBGR16161616F:
1211
case __DRI_IMAGE_FORMAT_ABGR16161616F:
1212
return 8;
1213
case __DRI_IMAGE_FORMAT_NONE:
1214
default:
1215
return 0;
1216
}
1217
}
1218
1219
/* Map format of render buffer to corresponding format for the linear_buffer
1220
* used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1221
* Usually linear_format == format, except for depth >= 30 formats, where
1222
* different gpu vendors have different preferences wrt. color channel ordering.
1223
*/
1224
static uint32_t
1225
dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1226
{
1227
switch (format) {
1228
case __DRI_IMAGE_FORMAT_XRGB2101010:
1229
case __DRI_IMAGE_FORMAT_XBGR2101010:
1230
/* Different preferred formats for different hw */
1231
if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1232
return __DRI_IMAGE_FORMAT_XBGR2101010;
1233
else
1234
return __DRI_IMAGE_FORMAT_XRGB2101010;
1235
1236
case __DRI_IMAGE_FORMAT_ARGB2101010:
1237
case __DRI_IMAGE_FORMAT_ABGR2101010:
1238
/* Different preferred formats for different hw */
1239
if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1240
return __DRI_IMAGE_FORMAT_ABGR2101010;
1241
else
1242
return __DRI_IMAGE_FORMAT_ARGB2101010;
1243
1244
default:
1245
return format;
1246
}
1247
}
1248
1249
/* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1250
* the createImageFromFds call takes DRM_FORMAT codes. To avoid
1251
* complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1252
* translate to DRM_FORMAT codes in the call to createImageFromFds
1253
*/
1254
static int
1255
image_format_to_fourcc(int format)
1256
{
1257
1258
/* Convert from __DRI_IMAGE_FORMAT to DRM_FORMAT (sigh) */
1259
switch (format) {
1260
case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1261
case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888;
1262
case __DRI_IMAGE_FORMAT_SXRGB8: return __DRI_IMAGE_FOURCC_SXRGB8888;
1263
case __DRI_IMAGE_FORMAT_RGB565: return DRM_FORMAT_RGB565;
1264
case __DRI_IMAGE_FORMAT_XRGB8888: return DRM_FORMAT_XRGB8888;
1265
case __DRI_IMAGE_FORMAT_ARGB8888: return DRM_FORMAT_ARGB8888;
1266
case __DRI_IMAGE_FORMAT_ABGR8888: return DRM_FORMAT_ABGR8888;
1267
case __DRI_IMAGE_FORMAT_XBGR8888: return DRM_FORMAT_XBGR8888;
1268
case __DRI_IMAGE_FORMAT_XRGB2101010: return DRM_FORMAT_XRGB2101010;
1269
case __DRI_IMAGE_FORMAT_ARGB2101010: return DRM_FORMAT_ARGB2101010;
1270
case __DRI_IMAGE_FORMAT_XBGR2101010: return DRM_FORMAT_XBGR2101010;
1271
case __DRI_IMAGE_FORMAT_ABGR2101010: return DRM_FORMAT_ABGR2101010;
1272
case __DRI_IMAGE_FORMAT_XBGR16161616F: return DRM_FORMAT_XBGR16161616F;
1273
case __DRI_IMAGE_FORMAT_ABGR16161616F: return DRM_FORMAT_ABGR16161616F;
1274
}
1275
return 0;
1276
}
1277
1278
#ifdef HAVE_DRI3_MODIFIERS
1279
static bool
1280
has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1281
uint64_t *modifiers, uint32_t count)
1282
{
1283
uint64_t *supported_modifiers;
1284
int32_t supported_modifiers_count;
1285
bool found = false;
1286
int i, j;
1287
1288
if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen,
1289
format, 0, NULL, NULL,
1290
&supported_modifiers_count) ||
1291
supported_modifiers_count == 0)
1292
return false;
1293
1294
supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1295
if (!supported_modifiers)
1296
return false;
1297
1298
draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format,
1299
supported_modifiers_count,
1300
supported_modifiers, NULL,
1301
&supported_modifiers_count);
1302
1303
for (i = 0; !found && i < supported_modifiers_count; i++) {
1304
for (j = 0; !found && j < count; j++) {
1305
if (supported_modifiers[i] == modifiers[j])
1306
found = true;
1307
}
1308
}
1309
1310
free(supported_modifiers);
1311
return found;
1312
}
1313
#endif
1314
1315
/** loader_dri3_alloc_render_buffer
1316
*
1317
* Use the driver createImage function to construct a __DRIimage, then
1318
* get a file descriptor for that and create an X pixmap from that
1319
*
1320
* Allocate an xshmfence for synchronization
1321
*/
1322
static struct loader_dri3_buffer *
1323
dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1324
int width, int height, int depth)
1325
{
1326
struct loader_dri3_buffer *buffer;
1327
__DRIimage *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1328
xcb_pixmap_t pixmap;
1329
xcb_sync_fence_t sync_fence;
1330
struct xshmfence *shm_fence;
1331
int buffer_fds[4], fence_fd;
1332
int num_planes = 0;
1333
uint64_t *modifiers = NULL;
1334
uint32_t count = 0;
1335
int i, mod;
1336
int ret;
1337
1338
/* Create an xshmfence object and
1339
* prepare to send that to the X server
1340
*/
1341
1342
fence_fd = xshmfence_alloc_shm();
1343
if (fence_fd < 0)
1344
return NULL;
1345
1346
shm_fence = xshmfence_map_shm(fence_fd);
1347
if (shm_fence == NULL)
1348
goto no_shm_fence;
1349
1350
/* Allocate the image from the driver
1351
*/
1352
buffer = calloc(1, sizeof *buffer);
1353
if (!buffer)
1354
goto no_buffer;
1355
1356
buffer->cpp = dri3_cpp_for_format(format);
1357
if (!buffer->cpp)
1358
goto no_image;
1359
1360
if (!draw->is_different_gpu) {
1361
#ifdef HAVE_DRI3_MODIFIERS
1362
if (draw->multiplanes_available &&
1363
draw->ext->image->base.version >= 15 &&
1364
draw->ext->image->queryDmaBufModifiers &&
1365
draw->ext->image->createImageWithModifiers) {
1366
xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1367
xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1368
xcb_generic_error_t *error = NULL;
1369
1370
mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1371
draw->window,
1372
depth, buffer->cpp * 8);
1373
mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1374
mod_cookie,
1375
&error);
1376
if (!mod_reply)
1377
goto no_image;
1378
1379
if (mod_reply->num_window_modifiers) {
1380
count = mod_reply->num_window_modifiers;
1381
modifiers = malloc(count * sizeof(uint64_t));
1382
if (!modifiers) {
1383
free(mod_reply);
1384
goto no_image;
1385
}
1386
1387
memcpy(modifiers,
1388
xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1389
count * sizeof(uint64_t));
1390
1391
if (!has_supported_modifier(draw, image_format_to_fourcc(format),
1392
modifiers, count)) {
1393
free(modifiers);
1394
count = 0;
1395
modifiers = NULL;
1396
}
1397
}
1398
1399
if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1400
count = mod_reply->num_screen_modifiers;
1401
modifiers = malloc(count * sizeof(uint64_t));
1402
if (!modifiers) {
1403
free(modifiers);
1404
free(mod_reply);
1405
goto no_image;
1406
}
1407
1408
memcpy(modifiers,
1409
xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1410
count * sizeof(uint64_t));
1411
}
1412
1413
free(mod_reply);
1414
}
1415
#endif
1416
buffer->image = loader_dri_create_image(draw->dri_screen, draw->ext->image,
1417
width, height, format,
1418
__DRI_IMAGE_USE_SHARE |
1419
__DRI_IMAGE_USE_SCANOUT |
1420
__DRI_IMAGE_USE_BACKBUFFER |
1421
(draw->is_protected_content ?
1422
__DRI_IMAGE_USE_PROTECTED : 0),
1423
modifiers, count, buffer);
1424
free(modifiers);
1425
1426
pixmap_buffer = buffer->image;
1427
1428
if (!buffer->image)
1429
goto no_image;
1430
} else {
1431
buffer->image = draw->ext->image->createImage(draw->dri_screen,
1432
width, height,
1433
format,
1434
0,
1435
buffer);
1436
1437
if (!buffer->image)
1438
goto no_image;
1439
1440
/* if driver name is same only then dri_screen_display_gpu is set.
1441
* This check is needed because for simplicity render gpu image extension
1442
* is also used for display gpu.
1443
*/
1444
if (draw->dri_screen_display_gpu) {
1445
linear_buffer_display_gpu =
1446
draw->ext->image->createImage(draw->dri_screen_display_gpu,
1447
width, height,
1448
dri3_linear_format_for_format(draw, format),
1449
__DRI_IMAGE_USE_SHARE |
1450
__DRI_IMAGE_USE_LINEAR |
1451
__DRI_IMAGE_USE_BACKBUFFER |
1452
__DRI_IMAGE_USE_SCANOUT,
1453
buffer);
1454
pixmap_buffer = linear_buffer_display_gpu;
1455
}
1456
1457
if (!pixmap_buffer) {
1458
buffer->linear_buffer =
1459
draw->ext->image->createImage(draw->dri_screen,
1460
width, height,
1461
dri3_linear_format_for_format(draw, format),
1462
__DRI_IMAGE_USE_SHARE |
1463
__DRI_IMAGE_USE_LINEAR |
1464
__DRI_IMAGE_USE_BACKBUFFER |
1465
__DRI_IMAGE_USE_SCANOUT,
1466
buffer);
1467
1468
pixmap_buffer = buffer->linear_buffer;
1469
if (!buffer->linear_buffer) {
1470
goto no_linear_buffer;
1471
}
1472
}
1473
}
1474
1475
/* X want some information about the planes, so ask the image for it
1476
*/
1477
if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1478
&num_planes))
1479
num_planes = 1;
1480
1481
for (i = 0; i < num_planes; i++) {
1482
__DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1483
1484
if (!image) {
1485
assert(i == 0);
1486
image = pixmap_buffer;
1487
}
1488
1489
buffer_fds[i] = -1;
1490
1491
ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1492
&buffer_fds[i]);
1493
ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1494
&buffer->strides[i]);
1495
ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1496
&buffer->offsets[i]);
1497
if (image != pixmap_buffer)
1498
draw->ext->image->destroyImage(image);
1499
1500
if (!ret)
1501
goto no_buffer_attrib;
1502
}
1503
1504
ret = draw->ext->image->queryImage(pixmap_buffer,
1505
__DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1506
buffer->modifier = (uint64_t) mod << 32;
1507
ret &= draw->ext->image->queryImage(pixmap_buffer,
1508
__DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1509
buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1510
1511
if (!ret)
1512
buffer->modifier = DRM_FORMAT_MOD_INVALID;
1513
1514
if (draw->is_different_gpu && draw->dri_screen_display_gpu &&
1515
linear_buffer_display_gpu) {
1516
/* The linear buffer was created in the display GPU's vram, so we
1517
* need to make it visible to render GPU
1518
*/
1519
buffer->linear_buffer =
1520
draw->ext->image->createImageFromFds(draw->dri_screen,
1521
width,
1522
height,
1523
image_format_to_fourcc(format),
1524
&buffer_fds[0], num_planes,
1525
&buffer->strides[0],
1526
&buffer->offsets[0],
1527
buffer);
1528
if (!buffer->linear_buffer)
1529
goto no_buffer_attrib;
1530
1531
draw->ext->image->destroyImage(linear_buffer_display_gpu);
1532
}
1533
1534
pixmap = xcb_generate_id(draw->conn);
1535
#ifdef HAVE_DRI3_MODIFIERS
1536
if (draw->multiplanes_available &&
1537
buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1538
xcb_dri3_pixmap_from_buffers(draw->conn,
1539
pixmap,
1540
draw->window,
1541
num_planes,
1542
width, height,
1543
buffer->strides[0], buffer->offsets[0],
1544
buffer->strides[1], buffer->offsets[1],
1545
buffer->strides[2], buffer->offsets[2],
1546
buffer->strides[3], buffer->offsets[3],
1547
depth, buffer->cpp * 8,
1548
buffer->modifier,
1549
buffer_fds);
1550
} else
1551
#endif
1552
{
1553
xcb_dri3_pixmap_from_buffer(draw->conn,
1554
pixmap,
1555
draw->drawable,
1556
buffer->size,
1557
width, height, buffer->strides[0],
1558
depth, buffer->cpp * 8,
1559
buffer_fds[0]);
1560
}
1561
1562
xcb_dri3_fence_from_fd(draw->conn,
1563
pixmap,
1564
(sync_fence = xcb_generate_id(draw->conn)),
1565
false,
1566
fence_fd);
1567
1568
buffer->pixmap = pixmap;
1569
buffer->own_pixmap = true;
1570
buffer->sync_fence = sync_fence;
1571
buffer->shm_fence = shm_fence;
1572
buffer->width = width;
1573
buffer->height = height;
1574
1575
/* Mark the buffer as idle
1576
*/
1577
dri3_fence_set(buffer);
1578
1579
return buffer;
1580
1581
no_buffer_attrib:
1582
do {
1583
if (buffer_fds[i] != -1)
1584
close(buffer_fds[i]);
1585
} while (--i >= 0);
1586
draw->ext->image->destroyImage(pixmap_buffer);
1587
no_linear_buffer:
1588
if (draw->is_different_gpu)
1589
draw->ext->image->destroyImage(buffer->image);
1590
no_image:
1591
free(buffer);
1592
no_buffer:
1593
xshmfence_unmap_shm(shm_fence);
1594
no_shm_fence:
1595
close(fence_fd);
1596
return NULL;
1597
}
1598
1599
/** loader_dri3_update_drawable
1600
*
1601
* Called the first time we use the drawable and then
1602
* after we receive present configure notify events to
1603
* track the geometry of the drawable
1604
*/
1605
static int
1606
dri3_update_drawable(struct loader_dri3_drawable *draw)
1607
{
1608
mtx_lock(&draw->mtx);
1609
if (draw->first_init) {
1610
xcb_get_geometry_cookie_t geom_cookie;
1611
xcb_get_geometry_reply_t *geom_reply;
1612
xcb_void_cookie_t cookie;
1613
xcb_generic_error_t *error;
1614
xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
1615
xcb_present_query_capabilities_reply_t *present_capabilities_reply;
1616
xcb_window_t root_win;
1617
1618
draw->first_init = false;
1619
1620
/* Try to select for input on the window.
1621
*
1622
* If the drawable is a window, this will get our events
1623
* delivered.
1624
*
1625
* Otherwise, we'll get a BadWindow error back from this request which
1626
* will let us know that the drawable is a pixmap instead.
1627
*/
1628
1629
draw->eid = xcb_generate_id(draw->conn);
1630
cookie =
1631
xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1632
XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1633
XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1634
XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1635
1636
present_capabilities_cookie =
1637
xcb_present_query_capabilities(draw->conn, draw->drawable);
1638
1639
/* Create an XCB event queue to hold present events outside of the usual
1640
* application event queue
1641
*/
1642
draw->special_event = xcb_register_for_special_xge(draw->conn,
1643
&xcb_present_id,
1644
draw->eid,
1645
draw->stamp);
1646
geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1647
1648
geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1649
1650
if (!geom_reply) {
1651
mtx_unlock(&draw->mtx);
1652
return false;
1653
}
1654
draw->width = geom_reply->width;
1655
draw->height = geom_reply->height;
1656
draw->depth = geom_reply->depth;
1657
draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1658
root_win = geom_reply->root;
1659
1660
free(geom_reply);
1661
1662
draw->is_pixmap = false;
1663
1664
/* Check to see if our select input call failed. If it failed with a
1665
* BadWindow error, then assume the drawable is a pixmap. Destroy the
1666
* special event queue created above and mark the drawable as a pixmap
1667
*/
1668
1669
error = xcb_request_check(draw->conn, cookie);
1670
1671
present_capabilities_reply =
1672
xcb_present_query_capabilities_reply(draw->conn,
1673
present_capabilities_cookie,
1674
NULL);
1675
1676
if (present_capabilities_reply) {
1677
draw->present_capabilities = present_capabilities_reply->capabilities;
1678
free(present_capabilities_reply);
1679
} else
1680
draw->present_capabilities = 0;
1681
1682
if (error) {
1683
if (error->error_code != BadWindow) {
1684
free(error);
1685
mtx_unlock(&draw->mtx);
1686
return false;
1687
}
1688
free(error);
1689
draw->is_pixmap = true;
1690
xcb_unregister_for_special_event(draw->conn, draw->special_event);
1691
draw->special_event = NULL;
1692
}
1693
1694
if (draw->is_pixmap)
1695
draw->window = root_win;
1696
else
1697
draw->window = draw->drawable;
1698
}
1699
dri3_flush_present_events(draw);
1700
mtx_unlock(&draw->mtx);
1701
return true;
1702
}
1703
1704
__DRIimage *
1705
loader_dri3_create_image(xcb_connection_t *c,
1706
xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1707
unsigned int format,
1708
__DRIscreen *dri_screen,
1709
const __DRIimageExtension *image,
1710
void *loaderPrivate)
1711
{
1712
int *fds;
1713
__DRIimage *image_planar, *ret;
1714
int stride, offset;
1715
1716
/* Get an FD for the pixmap object
1717
*/
1718
fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1719
1720
stride = bp_reply->stride;
1721
offset = 0;
1722
1723
/* createImageFromFds creates a wrapper __DRIimage structure which
1724
* can deal with multiple planes for things like Yuv images. So, once
1725
* we've gotten the planar wrapper, pull the single plane out of it and
1726
* discard the wrapper.
1727
*/
1728
image_planar = image->createImageFromFds(dri_screen,
1729
bp_reply->width,
1730
bp_reply->height,
1731
image_format_to_fourcc(format),
1732
fds, 1,
1733
&stride, &offset, loaderPrivate);
1734
close(fds[0]);
1735
if (!image_planar)
1736
return NULL;
1737
1738
ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1739
1740
if (!ret)
1741
ret = image_planar;
1742
else
1743
image->destroyImage(image_planar);
1744
1745
return ret;
1746
}
1747
1748
#ifdef HAVE_DRI3_MODIFIERS
1749
__DRIimage *
1750
loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1751
xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1752
unsigned int format,
1753
__DRIscreen *dri_screen,
1754
const __DRIimageExtension *image,
1755
void *loaderPrivate)
1756
{
1757
__DRIimage *ret;
1758
int *fds;
1759
uint32_t *strides_in, *offsets_in;
1760
int strides[4], offsets[4];
1761
unsigned error;
1762
int i;
1763
1764
if (bp_reply->nfd > 4)
1765
return NULL;
1766
1767
fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1768
strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1769
offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1770
for (i = 0; i < bp_reply->nfd; i++) {
1771
strides[i] = strides_in[i];
1772
offsets[i] = offsets_in[i];
1773
}
1774
1775
ret = image->createImageFromDmaBufs2(dri_screen,
1776
bp_reply->width,
1777
bp_reply->height,
1778
image_format_to_fourcc(format),
1779
bp_reply->modifier,
1780
fds, bp_reply->nfd,
1781
strides, offsets,
1782
0, 0, 0, 0, /* UNDEFINED */
1783
&error, loaderPrivate);
1784
1785
for (i = 0; i < bp_reply->nfd; i++)
1786
close(fds[i]);
1787
1788
return ret;
1789
}
1790
#endif
1791
1792
/** dri3_get_pixmap_buffer
1793
*
1794
* Get the DRM object for a pixmap from the X server and
1795
* wrap that with a __DRIimage structure using createImageFromFds
1796
*/
1797
static struct loader_dri3_buffer *
1798
dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1799
enum loader_dri3_buffer_type buffer_type,
1800
struct loader_dri3_drawable *draw)
1801
{
1802
int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1803
struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1804
xcb_drawable_t pixmap;
1805
xcb_sync_fence_t sync_fence;
1806
struct xshmfence *shm_fence;
1807
int width;
1808
int height;
1809
int fence_fd;
1810
__DRIscreen *cur_screen;
1811
1812
if (buffer)
1813
return buffer;
1814
1815
pixmap = draw->drawable;
1816
1817
buffer = calloc(1, sizeof *buffer);
1818
if (!buffer)
1819
goto no_buffer;
1820
1821
fence_fd = xshmfence_alloc_shm();
1822
if (fence_fd < 0)
1823
goto no_fence;
1824
shm_fence = xshmfence_map_shm(fence_fd);
1825
if (shm_fence == NULL) {
1826
close (fence_fd);
1827
goto no_fence;
1828
}
1829
1830
/* Get the currently-bound screen or revert to using the drawable's screen if
1831
* no contexts are currently bound. The latter case is at least necessary for
1832
* obs-studio, when using Window Capture (Xcomposite) as a Source.
1833
*/
1834
cur_screen = draw->vtable->get_dri_screen();
1835
if (!cur_screen) {
1836
cur_screen = draw->dri_screen;
1837
}
1838
1839
xcb_dri3_fence_from_fd(draw->conn,
1840
pixmap,
1841
(sync_fence = xcb_generate_id(draw->conn)),
1842
false,
1843
fence_fd);
1844
#ifdef HAVE_DRI3_MODIFIERS
1845
if (draw->multiplanes_available &&
1846
draw->ext->image->base.version >= 15 &&
1847
draw->ext->image->createImageFromDmaBufs2) {
1848
xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1849
xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1850
1851
bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1852
bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1853
NULL);
1854
if (!bps_reply)
1855
goto no_image;
1856
buffer->image =
1857
loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1858
cur_screen, draw->ext->image,
1859
buffer);
1860
width = bps_reply->width;
1861
height = bps_reply->height;
1862
free(bps_reply);
1863
} else
1864
#endif
1865
{
1866
xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1867
xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1868
1869
bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1870
bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1871
if (!bp_reply)
1872
goto no_image;
1873
1874
buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1875
cur_screen, draw->ext->image,
1876
buffer);
1877
width = bp_reply->width;
1878
height = bp_reply->height;
1879
free(bp_reply);
1880
}
1881
1882
if (!buffer->image)
1883
goto no_image;
1884
1885
buffer->pixmap = pixmap;
1886
buffer->own_pixmap = false;
1887
buffer->width = width;
1888
buffer->height = height;
1889
buffer->shm_fence = shm_fence;
1890
buffer->sync_fence = sync_fence;
1891
1892
draw->buffers[buf_id] = buffer;
1893
1894
return buffer;
1895
1896
no_image:
1897
xcb_sync_destroy_fence(draw->conn, sync_fence);
1898
xshmfence_unmap_shm(shm_fence);
1899
no_fence:
1900
free(buffer);
1901
no_buffer:
1902
return NULL;
1903
}
1904
1905
/** dri3_get_buffer
1906
*
1907
* Find a front or back buffer, allocating new ones as necessary
1908
*/
1909
static struct loader_dri3_buffer *
1910
dri3_get_buffer(__DRIdrawable *driDrawable,
1911
unsigned int format,
1912
enum loader_dri3_buffer_type buffer_type,
1913
struct loader_dri3_drawable *draw)
1914
{
1915
struct loader_dri3_buffer *buffer;
1916
bool fence_await = buffer_type == loader_dri3_buffer_back;
1917
int buf_id;
1918
1919
if (buffer_type == loader_dri3_buffer_back) {
1920
draw->back_format = format;
1921
1922
buf_id = dri3_find_back(draw);
1923
1924
if (buf_id < 0)
1925
return NULL;
1926
} else {
1927
buf_id = LOADER_DRI3_FRONT_ID;
1928
}
1929
1930
buffer = draw->buffers[buf_id];
1931
1932
/* Allocate a new buffer if there isn't an old one, if that
1933
* old one is the wrong size, or if it's suboptimal
1934
*/
1935
if (!buffer || buffer->width != draw->width ||
1936
buffer->height != draw->height ||
1937
buffer->reallocate) {
1938
struct loader_dri3_buffer *new_buffer;
1939
1940
/* Allocate the new buffers
1941
*/
1942
new_buffer = dri3_alloc_render_buffer(draw,
1943
format,
1944
draw->width,
1945
draw->height,
1946
draw->depth);
1947
if (!new_buffer)
1948
return NULL;
1949
1950
/* When resizing, copy the contents of the old buffer, waiting for that
1951
* copy to complete using our fences before proceeding
1952
*/
1953
if ((buffer_type == loader_dri3_buffer_back ||
1954
(buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
1955
&& buffer) {
1956
1957
/* Fill the new buffer with data from an old buffer */
1958
if (!loader_dri3_blit_image(draw,
1959
new_buffer->image,
1960
buffer->image,
1961
0, 0,
1962
MIN2(buffer->width, new_buffer->width),
1963
MIN2(buffer->height, new_buffer->height),
1964
0, 0, 0) &&
1965
!buffer->linear_buffer) {
1966
dri3_fence_reset(draw->conn, new_buffer);
1967
dri3_copy_area(draw->conn,
1968
buffer->pixmap,
1969
new_buffer->pixmap,
1970
dri3_drawable_gc(draw),
1971
0, 0, 0, 0,
1972
draw->width, draw->height);
1973
dri3_fence_trigger(draw->conn, new_buffer);
1974
fence_await = true;
1975
}
1976
dri3_free_render_buffer(draw, buffer);
1977
} else if (buffer_type == loader_dri3_buffer_front) {
1978
/* Fill the new fake front with data from a real front */
1979
loader_dri3_swapbuffer_barrier(draw);
1980
dri3_fence_reset(draw->conn, new_buffer);
1981
dri3_copy_area(draw->conn,
1982
draw->drawable,
1983
new_buffer->pixmap,
1984
dri3_drawable_gc(draw),
1985
0, 0, 0, 0,
1986
draw->width, draw->height);
1987
dri3_fence_trigger(draw->conn, new_buffer);
1988
1989
if (new_buffer->linear_buffer) {
1990
dri3_fence_await(draw->conn, draw, new_buffer);
1991
(void) loader_dri3_blit_image(draw,
1992
new_buffer->image,
1993
new_buffer->linear_buffer,
1994
0, 0, draw->width, draw->height,
1995
0, 0, 0);
1996
} else
1997
fence_await = true;
1998
}
1999
buffer = new_buffer;
2000
draw->buffers[buf_id] = buffer;
2001
}
2002
2003
if (fence_await)
2004
dri3_fence_await(draw->conn, draw, buffer);
2005
2006
/*
2007
* Do we need to preserve the content of a previous buffer?
2008
*
2009
* Note that this blit is needed only to avoid a wait for a buffer that
2010
* is currently in the flip chain or being scanned out from. That's really
2011
* a tradeoff. If we're ok with the wait we can reduce the number of back
2012
* buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2013
* but in the latter case we must disallow page-flipping.
2014
*/
2015
if (buffer_type == loader_dri3_buffer_back &&
2016
draw->cur_blit_source != -1 &&
2017
draw->buffers[draw->cur_blit_source] &&
2018
buffer != draw->buffers[draw->cur_blit_source]) {
2019
2020
struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2021
2022
/* Avoid flushing here. Will propably do good for tiling hardware. */
2023
(void) loader_dri3_blit_image(draw,
2024
buffer->image,
2025
source->image,
2026
0, 0, draw->width, draw->height,
2027
0, 0, 0);
2028
buffer->last_swap = source->last_swap;
2029
draw->cur_blit_source = -1;
2030
}
2031
/* Return the requested buffer */
2032
return buffer;
2033
}
2034
2035
/** dri3_free_buffers
2036
*
2037
* Free the front bufffer or all of the back buffers. Used
2038
* when the application changes which buffers it needs
2039
*/
2040
static void
2041
dri3_free_buffers(__DRIdrawable *driDrawable,
2042
enum loader_dri3_buffer_type buffer_type,
2043
struct loader_dri3_drawable *draw)
2044
{
2045
struct loader_dri3_buffer *buffer;
2046
int first_id;
2047
int n_id;
2048
int buf_id;
2049
2050
switch (buffer_type) {
2051
case loader_dri3_buffer_back:
2052
first_id = LOADER_DRI3_BACK_ID(0);
2053
n_id = LOADER_DRI3_MAX_BACK;
2054
draw->cur_blit_source = -1;
2055
break;
2056
case loader_dri3_buffer_front:
2057
first_id = LOADER_DRI3_FRONT_ID;
2058
/* Don't free a fake front holding new backbuffer content. */
2059
n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2060
break;
2061
default:
2062
unreachable("unhandled buffer_type");
2063
}
2064
2065
for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
2066
buffer = draw->buffers[buf_id];
2067
if (buffer) {
2068
dri3_free_render_buffer(draw, buffer);
2069
draw->buffers[buf_id] = NULL;
2070
}
2071
}
2072
}
2073
2074
/** loader_dri3_get_buffers
2075
*
2076
* The published buffer allocation API.
2077
* Returns all of the necessary buffers, allocating
2078
* as needed.
2079
*/
2080
int
2081
loader_dri3_get_buffers(__DRIdrawable *driDrawable,
2082
unsigned int format,
2083
uint32_t *stamp,
2084
void *loaderPrivate,
2085
uint32_t buffer_mask,
2086
struct __DRIimageList *buffers)
2087
{
2088
struct loader_dri3_drawable *draw = loaderPrivate;
2089
struct loader_dri3_buffer *front, *back;
2090
int buf_id;
2091
2092
buffers->image_mask = 0;
2093
buffers->front = NULL;
2094
buffers->back = NULL;
2095
2096
front = NULL;
2097
back = NULL;
2098
2099
if (!dri3_update_drawable(draw))
2100
return false;
2101
2102
dri3_update_max_num_back(draw);
2103
2104
/* Free no longer needed back buffers */
2105
for (buf_id = draw->cur_num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2106
if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) {
2107
dri3_free_render_buffer(draw, draw->buffers[buf_id]);
2108
draw->buffers[buf_id] = NULL;
2109
}
2110
}
2111
2112
/* pixmaps always have front buffers.
2113
* Exchange swaps also mandate fake front buffers.
2114
*/
2115
if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE)
2116
buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2117
2118
if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2119
/* All pixmaps are owned by the server gpu.
2120
* When we use a different gpu, we can't use the pixmap
2121
* as buffer since it is potentially tiled a way
2122
* our device can't understand. In this case, use
2123
* a fake front buffer. Hopefully the pixmap
2124
* content will get synced with the fake front
2125
* buffer.
2126
*/
2127
if (draw->is_pixmap && !draw->is_different_gpu)
2128
front = dri3_get_pixmap_buffer(driDrawable,
2129
format,
2130
loader_dri3_buffer_front,
2131
draw);
2132
else
2133
front = dri3_get_buffer(driDrawable,
2134
format,
2135
loader_dri3_buffer_front,
2136
draw);
2137
2138
if (!front)
2139
return false;
2140
} else {
2141
dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2142
draw->have_fake_front = 0;
2143
}
2144
2145
if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2146
back = dri3_get_buffer(driDrawable,
2147
format,
2148
loader_dri3_buffer_back,
2149
draw);
2150
if (!back)
2151
return false;
2152
draw->have_back = 1;
2153
} else {
2154
dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2155
draw->have_back = 0;
2156
}
2157
2158
if (front) {
2159
buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2160
buffers->front = front->image;
2161
draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
2162
}
2163
2164
if (back) {
2165
buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2166
buffers->back = back->image;
2167
}
2168
2169
draw->stamp = stamp;
2170
2171
return true;
2172
}
2173
2174
/** loader_dri3_update_drawable_geometry
2175
*
2176
* Get the current drawable geometry.
2177
*/
2178
void
2179
loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2180
{
2181
xcb_get_geometry_cookie_t geom_cookie;
2182
xcb_get_geometry_reply_t *geom_reply;
2183
2184
geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2185
2186
geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2187
2188
if (geom_reply) {
2189
draw->width = geom_reply->width;
2190
draw->height = geom_reply->height;
2191
draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2192
draw->ext->flush->invalidate(draw->dri_drawable);
2193
2194
free(geom_reply);
2195
}
2196
}
2197
2198
2199
/**
2200
* Make sure the server has flushed all pending swap buffers to hardware
2201
* for this drawable. Ideally we'd want to send an X protocol request to
2202
* have the server block our connection until the swaps are complete. That
2203
* would avoid the potential round-trip here.
2204
*/
2205
void
2206
loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2207
{
2208
int64_t ust, msc, sbc;
2209
2210
(void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2211
}
2212
2213
/**
2214
* Perform any cleanup associated with a close screen operation.
2215
* \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2216
*
2217
* This function destroys the screen's cached swap context if any.
2218
*/
2219
void
2220
loader_dri3_close_screen(__DRIscreen *dri_screen)
2221
{
2222
mtx_lock(&blit_context.mtx);
2223
if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2224
blit_context.core->destroyContext(blit_context.ctx);
2225
blit_context.ctx = NULL;
2226
}
2227
mtx_unlock(&blit_context.mtx);
2228
}
2229
2230
/**
2231
* Find a backbuffer slot - potentially allocating a back buffer
2232
*
2233
* \param draw[in,out] Pointer to the drawable for which to find back.
2234
* \return Pointer to a new back buffer or NULL if allocation failed or was
2235
* not mandated.
2236
*
2237
* Find a potentially new back buffer, and if it's not been allocated yet and
2238
* in addition needs initializing, then try to allocate and initialize it.
2239
*/
2240
#include <stdio.h>
2241
static struct loader_dri3_buffer *
2242
dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2243
{
2244
struct loader_dri3_buffer *back;
2245
int id;
2246
2247
id = dri3_find_back(draw);
2248
if (id < 0)
2249
return NULL;
2250
2251
back = draw->buffers[id];
2252
/* Allocate a new back if we haven't got one */
2253
if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2254
dri3_update_drawable(draw))
2255
back = dri3_alloc_render_buffer(draw, draw->back_format,
2256
draw->width, draw->height, draw->depth);
2257
2258
if (!back)
2259
return NULL;
2260
2261
draw->buffers[id] = back;
2262
2263
/* If necessary, prefill the back with data according to swap_method mode. */
2264
if (draw->cur_blit_source != -1 &&
2265
draw->buffers[draw->cur_blit_source] &&
2266
back != draw->buffers[draw->cur_blit_source]) {
2267
struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2268
2269
dri3_fence_await(draw->conn, draw, source);
2270
dri3_fence_await(draw->conn, draw, back);
2271
(void) loader_dri3_blit_image(draw,
2272
back->image,
2273
source->image,
2274
0, 0, draw->width, draw->height,
2275
0, 0, 0);
2276
back->last_swap = source->last_swap;
2277
draw->cur_blit_source = -1;
2278
}
2279
2280
return back;
2281
}
2282
2283