Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/freedreno/freedreno_context.c
4570 views
1
/*
2
* Copyright (C) 2012 Rob Clark <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
*/
26
27
#include "freedreno_context.h"
28
#include "ir3/ir3_cache.h"
29
#include "util/u_upload_mgr.h"
30
#include "freedreno_blitter.h"
31
#include "freedreno_draw.h"
32
#include "freedreno_fence.h"
33
#include "freedreno_gmem.h"
34
#include "freedreno_program.h"
35
#include "freedreno_query.h"
36
#include "freedreno_query_hw.h"
37
#include "freedreno_resource.h"
38
#include "freedreno_state.h"
39
#include "freedreno_texture.h"
40
#include "freedreno_util.h"
41
42
static void
43
fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
44
unsigned flags) in_dt
45
{
46
struct fd_context *ctx = fd_context(pctx);
47
struct pipe_fence_handle *fence = NULL;
48
struct fd_batch *batch = NULL;
49
50
/* We want to lookup current batch if it exists, but not create a new
51
* one if not (unless we need a fence)
52
*/
53
fd_batch_reference(&batch, ctx->batch);
54
55
DBG("%p: flush: flags=%x, fencep=%p", batch, flags, fencep);
56
57
if (fencep && !batch) {
58
batch = fd_context_batch(ctx);
59
} else if (!batch) {
60
if (ctx->screen->reorder)
61
fd_bc_flush(ctx, flags & PIPE_FLUSH_DEFERRED);
62
fd_bc_dump(ctx, "%p: NULL batch, remaining:\n", ctx);
63
return;
64
}
65
66
/* With TC_FLUSH_ASYNC, the fence will have been pre-created from
67
* the front-end thread. But not yet associated with a batch,
68
* because we cannot safely access ctx->batch outside of the driver
69
* thread. So instead, replace the existing batch->fence with the
70
* one created earlier
71
*/
72
if ((flags & TC_FLUSH_ASYNC) && fencep) {
73
/* We don't currently expect async+flush in the fence-fd
74
* case.. for that to work properly we'd need TC to tell
75
* us in the create_fence callback that it needs an fd.
76
*/
77
assert(!(flags & PIPE_FLUSH_FENCE_FD));
78
79
fd_fence_set_batch(*fencep, batch);
80
fd_fence_ref(&batch->fence, *fencep);
81
82
/* If we have nothing to flush, update the pre-created unflushed
83
* fence with the current state of the last-fence:
84
*/
85
if (ctx->last_fence) {
86
fd_fence_repopulate(*fencep, ctx->last_fence);
87
fd_fence_ref(&fence, *fencep);
88
fd_bc_dump(ctx, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
89
goto out;
90
}
91
92
/* async flush is not compatible with deferred flush, since
93
* nothing triggers the batch flush which fence_flush() would
94
* be waiting for
95
*/
96
flags &= ~PIPE_FLUSH_DEFERRED;
97
} else if (!batch->fence) {
98
batch->fence = fd_fence_create(batch);
99
}
100
101
/* In some sequence of events, we can end up with a last_fence that is
102
* not an "fd" fence, which results in eglDupNativeFenceFDANDROID()
103
* errors.
104
*/
105
if ((flags & PIPE_FLUSH_FENCE_FD) && ctx->last_fence &&
106
!fd_fence_is_fd(ctx->last_fence))
107
fd_fence_ref(&ctx->last_fence, NULL);
108
109
/* if no rendering since last flush, ie. app just decided it needed
110
* a fence, re-use the last one:
111
*/
112
if (ctx->last_fence) {
113
fd_fence_ref(&fence, ctx->last_fence);
114
fd_bc_dump(ctx, "%p: reuse last_fence, remaining:\n", ctx);
115
goto out;
116
}
117
118
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
119
fd_fence_ref(&fence, batch->fence);
120
121
if (flags & PIPE_FLUSH_FENCE_FD)
122
fence->submit_fence.use_fence_fd = true;
123
124
fd_bc_dump(ctx, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
125
batch, batch->seqno, flags);
126
127
/* If we get here, we need to flush for a fence, even if there is
128
* no rendering yet:
129
*/
130
batch->needs_flush = true;
131
132
if (!ctx->screen->reorder) {
133
fd_batch_flush(batch);
134
} else {
135
fd_bc_flush(ctx, flags & PIPE_FLUSH_DEFERRED);
136
}
137
138
fd_bc_dump(ctx, "%p: remaining:\n", ctx);
139
140
out:
141
if (fencep)
142
fd_fence_ref(fencep, fence);
143
144
fd_fence_ref(&ctx->last_fence, fence);
145
146
fd_fence_ref(&fence, NULL);
147
148
fd_batch_reference(&batch, NULL);
149
150
u_trace_context_process(&ctx->trace_context,
151
!!(flags & PIPE_FLUSH_END_OF_FRAME));
152
}
153
154
static void
155
fd_texture_barrier(struct pipe_context *pctx, unsigned flags) in_dt
156
{
157
if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) {
158
struct fd_context *ctx = fd_context(pctx);
159
160
if (ctx->framebuffer_barrier) {
161
ctx->framebuffer_barrier(ctx);
162
return;
163
}
164
}
165
166
/* On devices that could sample from GMEM we could possibly do better.
167
* Or if we knew that we were doing GMEM bypass we could just emit a
168
* cache flush, perhaps? But we don't know if future draws would cause
169
* us to use GMEM, and a flush in bypass isn't the end of the world.
170
*/
171
fd_context_flush(pctx, NULL, 0);
172
}
173
174
static void
175
fd_memory_barrier(struct pipe_context *pctx, unsigned flags)
176
{
177
if (!(flags & ~PIPE_BARRIER_UPDATE))
178
return;
179
180
fd_context_flush(pctx, NULL, 0);
181
182
/* TODO do we need to check for persistently mapped buffers and
183
* fd_bo_cpu_prep()??
184
*/
185
}
186
187
static void
188
emit_string_tail(struct fd_ringbuffer *ring, const char *string, int len)
189
{
190
const uint32_t *buf = (const void *)string;
191
192
while (len >= 4) {
193
OUT_RING(ring, *buf);
194
buf++;
195
len -= 4;
196
}
197
198
/* copy remainder bytes without reading past end of input string: */
199
if (len > 0) {
200
uint32_t w = 0;
201
memcpy(&w, buf, len);
202
OUT_RING(ring, w);
203
}
204
}
205
206
/* for prior to a5xx: */
207
void
208
fd_emit_string(struct fd_ringbuffer *ring, const char *string, int len)
209
{
210
/* max packet size is 0x3fff+1 dwords: */
211
len = MIN2(len, 0x4000 * 4);
212
213
OUT_PKT3(ring, CP_NOP, align(len, 4) / 4);
214
emit_string_tail(ring, string, len);
215
}
216
217
/* for a5xx+ */
218
void
219
fd_emit_string5(struct fd_ringbuffer *ring, const char *string, int len)
220
{
221
/* max packet size is 0x3fff dwords: */
222
len = MIN2(len, 0x3fff * 4);
223
224
OUT_PKT7(ring, CP_NOP, align(len, 4) / 4);
225
emit_string_tail(ring, string, len);
226
}
227
228
/**
229
* emit marker string as payload of a no-op packet, which can be
230
* decoded by cffdump.
231
*/
232
static void
233
fd_emit_string_marker(struct pipe_context *pctx, const char *string,
234
int len) in_dt
235
{
236
struct fd_context *ctx = fd_context(pctx);
237
238
DBG("%.*s", len, string);
239
240
if (!ctx->batch)
241
return;
242
243
struct fd_batch *batch = fd_context_batch_locked(ctx);
244
245
fd_batch_needs_flush(batch);
246
247
if (ctx->screen->gpu_id >= 500) {
248
fd_emit_string5(batch->draw, string, len);
249
} else {
250
fd_emit_string(batch->draw, string, len);
251
}
252
253
fd_batch_unlock_submit(batch);
254
fd_batch_reference(&batch, NULL);
255
}
256
257
/**
258
* If we have a pending fence_server_sync() (GPU side sync), flush now.
259
* The alternative to try to track this with batch dependencies gets
260
* hairy quickly.
261
*
262
* Call this before switching to a different batch, to handle this case.
263
*/
264
void
265
fd_context_switch_from(struct fd_context *ctx)
266
{
267
if (ctx->batch && (ctx->batch->in_fence_fd != -1))
268
fd_batch_flush(ctx->batch);
269
}
270
271
/**
272
* If there is a pending fence-fd that we need to sync on, this will
273
* transfer the reference to the next batch we are going to render
274
* to.
275
*/
276
void
277
fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch)
278
{
279
if (ctx->in_fence_fd != -1) {
280
sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd);
281
close(ctx->in_fence_fd);
282
ctx->in_fence_fd = -1;
283
}
284
}
285
286
/**
287
* Return a reference to the current batch, caller must unref.
288
*/
289
struct fd_batch *
290
fd_context_batch(struct fd_context *ctx)
291
{
292
struct fd_batch *batch = NULL;
293
294
tc_assert_driver_thread(ctx->tc);
295
296
fd_batch_reference(&batch, ctx->batch);
297
298
if (unlikely(!batch)) {
299
batch =
300
fd_batch_from_fb(ctx, &ctx->framebuffer);
301
util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
302
fd_batch_reference(&ctx->batch, batch);
303
fd_context_all_dirty(ctx);
304
}
305
fd_context_switch_to(ctx, batch);
306
307
return batch;
308
}
309
310
/**
311
* Return a locked reference to the current batch. A batch with emit
312
* lock held is protected against flushing while the lock is held.
313
* The emit-lock should be acquired before screen-lock. The emit-lock
314
* should be held while emitting cmdstream.
315
*/
316
struct fd_batch *
317
fd_context_batch_locked(struct fd_context *ctx)
318
{
319
struct fd_batch *batch = NULL;
320
321
while (!batch) {
322
batch = fd_context_batch(ctx);
323
if (!fd_batch_lock_submit(batch)) {
324
fd_batch_reference(&batch, NULL);
325
}
326
}
327
328
return batch;
329
}
330
331
void
332
fd_context_destroy(struct pipe_context *pctx)
333
{
334
struct fd_context *ctx = fd_context(pctx);
335
unsigned i;
336
337
DBG("");
338
339
fd_screen_lock(ctx->screen);
340
list_del(&ctx->node);
341
fd_screen_unlock(ctx->screen);
342
343
fd_fence_ref(&ctx->last_fence, NULL);
344
345
if (ctx->in_fence_fd != -1)
346
close(ctx->in_fence_fd);
347
348
for (i = 0; i < ARRAY_SIZE(ctx->pvtmem); i++) {
349
if (ctx->pvtmem[i].bo)
350
fd_bo_del(ctx->pvtmem[i].bo);
351
}
352
353
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
354
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
355
356
/* Make sure nothing in the batch cache references our context any more. */
357
fd_bc_flush(ctx, false);
358
359
fd_prog_fini(pctx);
360
361
if (ctx->blitter)
362
util_blitter_destroy(ctx->blitter);
363
364
if (pctx->stream_uploader)
365
u_upload_destroy(pctx->stream_uploader);
366
367
for (i = 0; i < ARRAY_SIZE(ctx->clear_rs_state); i++)
368
if (ctx->clear_rs_state[i])
369
pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state[i]);
370
371
if (ctx->primconvert)
372
util_primconvert_destroy(ctx->primconvert);
373
374
slab_destroy_child(&ctx->transfer_pool);
375
slab_destroy_child(&ctx->transfer_pool_unsync);
376
377
for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe_bo); i++) {
378
if (!ctx->vsc_pipe_bo[i])
379
break;
380
fd_bo_del(ctx->vsc_pipe_bo[i]);
381
}
382
383
fd_device_del(ctx->dev);
384
fd_pipe_purge(ctx->pipe);
385
fd_pipe_del(ctx->pipe);
386
387
simple_mtx_destroy(&ctx->gmem_lock);
388
389
u_trace_context_fini(&ctx->trace_context);
390
391
fd_autotune_fini(&ctx->autotune);
392
393
ir3_cache_destroy(ctx->shader_cache);
394
395
if (FD_DBG(BSTAT) || FD_DBG(MSGS)) {
396
mesa_logi(
397
"batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, "
398
"batch_restore=%u\n",
399
(uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem,
400
(uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw,
401
(uint32_t)ctx->stats.batch_restore);
402
}
403
}
404
405
static void
406
fd_set_debug_callback(struct pipe_context *pctx,
407
const struct pipe_debug_callback *cb)
408
{
409
struct fd_context *ctx = fd_context(pctx);
410
411
if (cb)
412
ctx->debug = *cb;
413
else
414
memset(&ctx->debug, 0, sizeof(ctx->debug));
415
}
416
417
static uint32_t
418
fd_get_reset_count(struct fd_context *ctx, bool per_context)
419
{
420
uint64_t val;
421
enum fd_param_id param = per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS;
422
int ret = fd_pipe_get_param(ctx->pipe, param, &val);
423
debug_assert(!ret);
424
return val;
425
}
426
427
static enum pipe_reset_status
428
fd_get_device_reset_status(struct pipe_context *pctx)
429
{
430
struct fd_context *ctx = fd_context(pctx);
431
int context_faults = fd_get_reset_count(ctx, true);
432
int global_faults = fd_get_reset_count(ctx, false);
433
enum pipe_reset_status status;
434
435
/* Not called in driver thread, but threaded_context syncs
436
* before calling this:
437
*/
438
fd_context_access_begin(ctx);
439
440
if (context_faults != ctx->context_reset_count) {
441
status = PIPE_GUILTY_CONTEXT_RESET;
442
} else if (global_faults != ctx->global_reset_count) {
443
status = PIPE_INNOCENT_CONTEXT_RESET;
444
} else {
445
status = PIPE_NO_RESET;
446
}
447
448
ctx->context_reset_count = context_faults;
449
ctx->global_reset_count = global_faults;
450
451
fd_context_access_end(ctx);
452
453
return status;
454
}
455
456
static void
457
fd_trace_record_ts(struct u_trace *ut, struct pipe_resource *timestamps,
458
unsigned idx)
459
{
460
struct fd_batch *batch = container_of(ut, struct fd_batch, trace);
461
struct fd_ringbuffer *ring = batch->nondraw ? batch->draw : batch->gmem;
462
463
if (ring->cur == batch->last_timestamp_cmd) {
464
uint64_t *ts = fd_bo_map(fd_resource(timestamps)->bo);
465
ts[idx] = U_TRACE_NO_TIMESTAMP;
466
return;
467
}
468
469
unsigned ts_offset = idx * sizeof(uint64_t);
470
batch->ctx->record_timestamp(ring, fd_resource(timestamps)->bo, ts_offset);
471
batch->last_timestamp_cmd = ring->cur;
472
}
473
474
static uint64_t
475
fd_trace_read_ts(struct u_trace_context *utctx,
476
struct pipe_resource *timestamps, unsigned idx)
477
{
478
struct fd_context *ctx =
479
container_of(utctx, struct fd_context, trace_context);
480
struct fd_bo *ts_bo = fd_resource(timestamps)->bo;
481
482
/* Only need to stall on results for the first entry: */
483
if (idx == 0) {
484
/* Avoid triggering deferred submits from flushing, since that
485
* changes the behavior of what we are trying to measure:
486
*/
487
while (fd_bo_cpu_prep(ts_bo, ctx->pipe, FD_BO_PREP_NOSYNC))
488
usleep(10000);
489
int ret = fd_bo_cpu_prep(ts_bo, ctx->pipe, FD_BO_PREP_READ);
490
if (ret)
491
return U_TRACE_NO_TIMESTAMP;
492
}
493
494
uint64_t *ts = fd_bo_map(ts_bo);
495
496
/* Don't translate the no-timestamp marker: */
497
if (ts[idx] == U_TRACE_NO_TIMESTAMP)
498
return U_TRACE_NO_TIMESTAMP;
499
500
return ctx->ts_to_ns(ts[idx]);
501
}
502
503
/* TODO we could combine a few of these small buffers (solid_vbuf,
504
* blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and
505
* save a tiny bit of memory
506
*/
507
508
static struct pipe_resource *
509
create_solid_vertexbuf(struct pipe_context *pctx)
510
{
511
static const float init_shader_const[] = {
512
-1.000000, +1.000000, +1.000000, +1.000000, -1.000000, +1.000000,
513
};
514
struct pipe_resource *prsc =
515
pipe_buffer_create(pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
516
sizeof(init_shader_const));
517
pipe_buffer_write(pctx, prsc, 0, sizeof(init_shader_const),
518
init_shader_const);
519
return prsc;
520
}
521
522
static struct pipe_resource *
523
create_blit_texcoord_vertexbuf(struct pipe_context *pctx)
524
{
525
struct pipe_resource *prsc = pipe_buffer_create(
526
pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_DYNAMIC, 16);
527
return prsc;
528
}
529
530
void
531
fd_context_setup_common_vbos(struct fd_context *ctx)
532
{
533
struct pipe_context *pctx = &ctx->base;
534
535
ctx->solid_vbuf = create_solid_vertexbuf(pctx);
536
ctx->blit_texcoord_vbuf = create_blit_texcoord_vertexbuf(pctx);
537
538
/* setup solid_vbuf_state: */
539
ctx->solid_vbuf_state.vtx = pctx->create_vertex_elements_state(
540
pctx, 1,
541
(struct pipe_vertex_element[]){{
542
.vertex_buffer_index = 0,
543
.src_offset = 0,
544
.src_format = PIPE_FORMAT_R32G32B32_FLOAT,
545
}});
546
ctx->solid_vbuf_state.vertexbuf.count = 1;
547
ctx->solid_vbuf_state.vertexbuf.vb[0].stride = 12;
548
ctx->solid_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->solid_vbuf;
549
550
/* setup blit_vbuf_state: */
551
ctx->blit_vbuf_state.vtx = pctx->create_vertex_elements_state(
552
pctx, 2,
553
(struct pipe_vertex_element[]){
554
{
555
.vertex_buffer_index = 0,
556
.src_offset = 0,
557
.src_format = PIPE_FORMAT_R32G32_FLOAT,
558
},
559
{
560
.vertex_buffer_index = 1,
561
.src_offset = 0,
562
.src_format = PIPE_FORMAT_R32G32B32_FLOAT,
563
}});
564
ctx->blit_vbuf_state.vertexbuf.count = 2;
565
ctx->blit_vbuf_state.vertexbuf.vb[0].stride = 8;
566
ctx->blit_vbuf_state.vertexbuf.vb[0].buffer.resource =
567
ctx->blit_texcoord_vbuf;
568
ctx->blit_vbuf_state.vertexbuf.vb[1].stride = 12;
569
ctx->blit_vbuf_state.vertexbuf.vb[1].buffer.resource = ctx->solid_vbuf;
570
}
571
572
void
573
fd_context_cleanup_common_vbos(struct fd_context *ctx)
574
{
575
struct pipe_context *pctx = &ctx->base;
576
577
pctx->delete_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
578
pctx->delete_vertex_elements_state(pctx, ctx->blit_vbuf_state.vtx);
579
580
pipe_resource_reference(&ctx->solid_vbuf, NULL);
581
pipe_resource_reference(&ctx->blit_texcoord_vbuf, NULL);
582
}
583
584
struct pipe_context *
585
fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
586
const uint8_t *primtypes, void *priv,
587
unsigned flags) disable_thread_safety_analysis
588
{
589
struct fd_screen *screen = fd_screen(pscreen);
590
struct pipe_context *pctx;
591
unsigned prio = 1;
592
int i;
593
594
/* lower numerical value == higher priority: */
595
if (FD_DBG(HIPRIO))
596
prio = 0;
597
else if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
598
prio = 0;
599
else if (flags & PIPE_CONTEXT_LOW_PRIORITY)
600
prio = 2;
601
602
/* Some of the stats will get printed out at context destroy, so
603
* make sure they are collected:
604
*/
605
if (FD_DBG(BSTAT) || FD_DBG(MSGS))
606
ctx->stats_users++;
607
608
ctx->screen = screen;
609
ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
610
611
ctx->in_fence_fd = -1;
612
613
if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
614
ctx->context_reset_count = fd_get_reset_count(ctx, true);
615
ctx->global_reset_count = fd_get_reset_count(ctx, false);
616
}
617
618
ctx->primtypes = primtypes;
619
ctx->primtype_mask = 0;
620
for (i = 0; i <= PIPE_PRIM_MAX; i++)
621
if (primtypes[i])
622
ctx->primtype_mask |= (1 << i);
623
624
simple_mtx_init(&ctx->gmem_lock, mtx_plain);
625
626
/* need some sane default in case gallium frontends don't
627
* set some state:
628
*/
629
ctx->sample_mask = 0xffff;
630
ctx->active_queries = true;
631
632
pctx = &ctx->base;
633
pctx->screen = pscreen;
634
pctx->priv = priv;
635
pctx->flush = fd_context_flush;
636
pctx->emit_string_marker = fd_emit_string_marker;
637
pctx->set_debug_callback = fd_set_debug_callback;
638
pctx->get_device_reset_status = fd_get_device_reset_status;
639
pctx->create_fence_fd = fd_create_fence_fd;
640
pctx->fence_server_sync = fd_fence_server_sync;
641
pctx->fence_server_signal = fd_fence_server_signal;
642
pctx->texture_barrier = fd_texture_barrier;
643
pctx->memory_barrier = fd_memory_barrier;
644
645
pctx->stream_uploader = u_upload_create_default(pctx);
646
if (!pctx->stream_uploader)
647
goto fail;
648
pctx->const_uploader = pctx->stream_uploader;
649
650
slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
651
slab_create_child(&ctx->transfer_pool_unsync, &screen->transfer_pool);
652
653
fd_draw_init(pctx);
654
fd_resource_context_init(pctx);
655
fd_query_context_init(pctx);
656
fd_texture_init(pctx);
657
fd_state_init(pctx);
658
659
ctx->blitter = util_blitter_create(pctx);
660
if (!ctx->blitter)
661
goto fail;
662
663
ctx->primconvert = util_primconvert_create(pctx, ctx->primtype_mask);
664
if (!ctx->primconvert)
665
goto fail;
666
667
list_inithead(&ctx->hw_active_queries);
668
list_inithead(&ctx->acc_active_queries);
669
670
fd_screen_lock(ctx->screen);
671
ctx->seqno = ++screen->ctx_seqno;
672
list_add(&ctx->node, &ctx->screen->context_list);
673
fd_screen_unlock(ctx->screen);
674
675
ctx->current_scissor = &ctx->disabled_scissor;
676
677
u_trace_context_init(&ctx->trace_context, pctx, fd_trace_record_ts,
678
fd_trace_read_ts);
679
680
fd_autotune_init(&ctx->autotune, screen->dev);
681
682
return pctx;
683
684
fail:
685
pctx->destroy(pctx);
686
return NULL;
687
}
688
689
struct pipe_context *
690
fd_context_init_tc(struct pipe_context *pctx, unsigned flags)
691
{
692
struct fd_context *ctx = fd_context(pctx);
693
694
if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
695
return pctx;
696
697
/* Clover (compute-only) is unsupported. */
698
if (flags & PIPE_CONTEXT_COMPUTE_ONLY)
699
return pctx;
700
701
struct pipe_context *tc = threaded_context_create(
702
pctx, &ctx->screen->transfer_pool,
703
fd_replace_buffer_storage,
704
fd_fence_create_unflushed,
705
fd_resource_busy,
706
false,
707
&ctx->tc);
708
709
uint64_t total_ram;
710
if (tc && tc != pctx && os_get_total_physical_memory(&total_ram)) {
711
((struct threaded_context *)tc)->bytes_mapped_limit = total_ram / 16;
712
}
713
714
return tc;
715
}
716
717