Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/llvmpipe/lp_setup.c
4570 views
1
/**************************************************************************
2
*
3
* Copyright 2007 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
/**
29
* Tiling engine.
30
*
31
* Builds per-tile display lists and executes them on calls to
32
* lp_setup_flush().
33
*/
34
35
#include <limits.h>
36
37
#include "pipe/p_defines.h"
38
#include "util/u_framebuffer.h"
39
#include "util/u_inlines.h"
40
#include "util/u_memory.h"
41
#include "util/u_pack_color.h"
42
#include "util/u_viewport.h"
43
#include "draw/draw_pipe.h"
44
#include "util/os_time.h"
45
#include "lp_context.h"
46
#include "lp_memory.h"
47
#include "lp_scene.h"
48
#include "lp_texture.h"
49
#include "lp_debug.h"
50
#include "lp_fence.h"
51
#include "lp_query.h"
52
#include "lp_rast.h"
53
#include "lp_setup_context.h"
54
#include "lp_screen.h"
55
#include "lp_state.h"
56
#include "frontend/sw_winsys.h"
57
58
#include "draw/draw_context.h"
59
#include "draw/draw_vbuf.h"
60
61
62
static boolean set_scene_state( struct lp_setup_context *, enum setup_state,
63
const char *reason);
64
static boolean try_update_scene_state( struct lp_setup_context *setup );
65
66
67
static void
68
lp_setup_get_empty_scene(struct lp_setup_context *setup)
69
{
70
assert(setup->scene == NULL);
71
72
setup->scene_idx++;
73
setup->scene_idx %= ARRAY_SIZE(setup->scenes);
74
75
setup->scene = setup->scenes[setup->scene_idx];
76
77
if (setup->scene->fence) {
78
if (LP_DEBUG & DEBUG_SETUP)
79
debug_printf("%s: wait for scene %d\n",
80
__FUNCTION__, setup->scene->fence->id);
81
82
lp_fence_wait(setup->scene->fence);
83
}
84
85
lp_scene_begin_binning(setup->scene, &setup->fb);
86
87
}
88
89
90
static void
91
first_triangle( struct lp_setup_context *setup,
92
const float (*v0)[4],
93
const float (*v1)[4],
94
const float (*v2)[4])
95
{
96
assert(setup->state == SETUP_ACTIVE);
97
lp_setup_choose_triangle( setup );
98
setup->triangle( setup, v0, v1, v2 );
99
}
100
101
static void
102
first_line( struct lp_setup_context *setup,
103
const float (*v0)[4],
104
const float (*v1)[4])
105
{
106
assert(setup->state == SETUP_ACTIVE);
107
lp_setup_choose_line( setup );
108
setup->line( setup, v0, v1 );
109
}
110
111
static void
112
first_point( struct lp_setup_context *setup,
113
const float (*v0)[4])
114
{
115
assert(setup->state == SETUP_ACTIVE);
116
lp_setup_choose_point( setup );
117
setup->point( setup, v0 );
118
}
119
120
void lp_setup_reset( struct lp_setup_context *setup )
121
{
122
unsigned i;
123
124
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
125
126
/* Reset derived state */
127
for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
128
setup->constants[i].stored_size = 0;
129
setup->constants[i].stored_data = NULL;
130
}
131
132
setup->fs.stored = NULL;
133
setup->dirty = ~0;
134
135
/* no current bin */
136
setup->scene = NULL;
137
138
/* Reset some state:
139
*/
140
memset(&setup->clear, 0, sizeof setup->clear);
141
142
/* Have an explicit "start-binning" call and get rid of this
143
* pointer twiddling?
144
*/
145
setup->line = first_line;
146
setup->point = first_point;
147
setup->triangle = first_triangle;
148
}
149
150
151
/** Rasterize all scene's bins */
152
static void
153
lp_setup_rasterize_scene( struct lp_setup_context *setup )
154
{
155
struct lp_scene *scene = setup->scene;
156
struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);
157
158
scene->num_active_queries = setup->active_binned_queries;
159
memcpy(scene->active_queries, setup->active_queries,
160
scene->num_active_queries * sizeof(scene->active_queries[0]));
161
162
lp_scene_end_binning(scene);
163
164
lp_fence_reference(&setup->last_fence, scene->fence);
165
166
if (setup->last_fence)
167
setup->last_fence->issued = TRUE;
168
169
mtx_lock(&screen->rast_mutex);
170
171
/* FIXME: We enqueue the scene then wait on the rasterizer to finish.
172
* This means we never actually run any vertex stuff in parallel to
173
* rasterization (not in the same context at least) which is what the
174
* multiple scenes per setup is about - when we get a new empty scene
175
* any old one is already empty again because we waited here for
176
* raster tasks to be finished. Ideally, we shouldn't need to wait here
177
* and rely on fences elsewhere when waiting is necessary.
178
* Certainly, lp_scene_end_rasterization() would need to be deferred too
179
* and there's probably other bits why this doesn't actually work.
180
*/
181
lp_rast_queue_scene(screen->rast, scene);
182
lp_rast_finish(screen->rast);
183
mtx_unlock(&screen->rast_mutex);
184
185
lp_scene_end_rasterization(setup->scene);
186
lp_setup_reset( setup );
187
188
LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
189
}
190
191
192
193
static boolean
194
begin_binning( struct lp_setup_context *setup )
195
{
196
struct lp_scene *scene = setup->scene;
197
boolean need_zsload = FALSE;
198
boolean ok;
199
200
assert(scene);
201
assert(scene->fence == NULL);
202
203
/* Always create a fence:
204
*/
205
scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
206
if (!scene->fence)
207
return FALSE;
208
209
ok = try_update_scene_state(setup);
210
if (!ok)
211
return FALSE;
212
213
if (setup->fb.zsbuf &&
214
((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
215
util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
216
need_zsload = TRUE;
217
218
LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
219
setup->clear.flags >> 2,
220
need_zsload ? "clear": "load");
221
222
if (setup->clear.flags & PIPE_CLEAR_COLOR) {
223
unsigned cbuf;
224
for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
225
assert(PIPE_CLEAR_COLOR0 == 1 << 2);
226
if (setup->clear.flags & (1 << (2 + cbuf))) {
227
union lp_rast_cmd_arg clearrb_arg;
228
struct lp_rast_clear_rb *cc_scene =
229
(struct lp_rast_clear_rb *)
230
lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));
231
232
if (!cc_scene) {
233
return FALSE;
234
}
235
236
cc_scene->cbuf = cbuf;
237
cc_scene->color_val = setup->clear.color_val[cbuf];
238
clearrb_arg.clear_rb = cc_scene;
239
240
if (!lp_scene_bin_everywhere(scene,
241
LP_RAST_OP_CLEAR_COLOR,
242
clearrb_arg))
243
return FALSE;
244
}
245
}
246
}
247
248
if (setup->fb.zsbuf) {
249
if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
250
ok = lp_scene_bin_everywhere( scene,
251
LP_RAST_OP_CLEAR_ZSTENCIL,
252
lp_rast_arg_clearzs(
253
setup->clear.zsvalue,
254
setup->clear.zsmask));
255
if (!ok)
256
return FALSE;
257
}
258
}
259
260
setup->clear.flags = 0;
261
setup->clear.zsmask = 0;
262
setup->clear.zsvalue = 0;
263
264
scene->had_queries = !!setup->active_binned_queries;
265
266
LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
267
return TRUE;
268
}
269
270
271
/* This basically bins and then flushes any outstanding full-screen
272
* clears.
273
*
274
* TODO: fast path for fullscreen clears and no triangles.
275
*/
276
static boolean
277
execute_clears( struct lp_setup_context *setup )
278
{
279
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
280
281
return begin_binning( setup );
282
}
283
284
const char *states[] = {
285
"FLUSHED",
286
"CLEARED",
287
"ACTIVE "
288
};
289
290
291
static boolean
292
set_scene_state( struct lp_setup_context *setup,
293
enum setup_state new_state,
294
const char *reason)
295
{
296
unsigned old_state = setup->state;
297
298
if (old_state == new_state)
299
return TRUE;
300
301
if (LP_DEBUG & DEBUG_SCENE) {
302
debug_printf("%s old %s new %s%s%s\n",
303
__FUNCTION__,
304
states[old_state],
305
states[new_state],
306
(new_state == SETUP_FLUSHED) ? ": " : "",
307
(new_state == SETUP_FLUSHED) ? reason : "");
308
309
if (new_state == SETUP_FLUSHED && setup->scene)
310
lp_debug_draw_bins_by_cmd_length(setup->scene);
311
}
312
313
/* wait for a free/empty scene
314
*/
315
if (old_state == SETUP_FLUSHED)
316
lp_setup_get_empty_scene(setup);
317
318
switch (new_state) {
319
case SETUP_CLEARED:
320
break;
321
322
case SETUP_ACTIVE:
323
if (!begin_binning( setup ))
324
goto fail;
325
break;
326
327
case SETUP_FLUSHED:
328
if (old_state == SETUP_CLEARED)
329
if (!execute_clears( setup ))
330
goto fail;
331
332
lp_setup_rasterize_scene( setup );
333
assert(setup->scene == NULL);
334
break;
335
336
default:
337
assert(0 && "invalid setup state mode");
338
goto fail;
339
}
340
341
setup->state = new_state;
342
return TRUE;
343
344
fail:
345
if (setup->scene) {
346
lp_scene_end_rasterization(setup->scene);
347
setup->scene = NULL;
348
}
349
350
setup->state = SETUP_FLUSHED;
351
lp_setup_reset( setup );
352
return FALSE;
353
}
354
355
356
void
357
lp_setup_flush( struct lp_setup_context *setup,
358
struct pipe_fence_handle **fence,
359
const char *reason)
360
{
361
set_scene_state( setup, SETUP_FLUSHED, reason );
362
363
if (fence) {
364
lp_fence_reference((struct lp_fence **)fence, setup->last_fence);
365
if (!*fence)
366
*fence = (struct pipe_fence_handle *)lp_fence_create(0);
367
}
368
}
369
370
371
void
372
lp_setup_bind_framebuffer( struct lp_setup_context *setup,
373
const struct pipe_framebuffer_state *fb )
374
{
375
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
376
377
/* Flush any old scene.
378
*/
379
set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
380
381
/*
382
* Ensure the old scene is not reused.
383
*/
384
assert(!setup->scene);
385
386
/* Set new state. This will be picked up later when we next need a
387
* scene.
388
*/
389
util_copy_framebuffer_state(&setup->fb, fb);
390
setup->framebuffer.x0 = 0;
391
setup->framebuffer.y0 = 0;
392
setup->framebuffer.x1 = fb->width-1;
393
setup->framebuffer.y1 = fb->height-1;
394
setup->dirty |= LP_SETUP_NEW_SCISSOR;
395
}
396
397
398
/*
399
* Try to clear one color buffer of the attached fb, either by binning a clear
400
* command or queuing up the clear for later (when binning is started).
401
*/
402
static boolean
403
lp_setup_try_clear_color_buffer(struct lp_setup_context *setup,
404
const union pipe_color_union *color,
405
unsigned cbuf)
406
{
407
union lp_rast_cmd_arg clearrb_arg;
408
union util_color uc;
409
enum pipe_format format = setup->fb.cbufs[cbuf]->format;
410
411
LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
412
413
util_pack_color_union(format, &uc, color);
414
415
if (setup->state == SETUP_ACTIVE) {
416
struct lp_scene *scene = setup->scene;
417
418
/* Add the clear to existing scene. In the unusual case where
419
* both color and depth-stencil are being cleared when there's
420
* already been some rendering, we could discard the currently
421
* binned scene and start again, but I don't see that as being
422
* a common usage.
423
*/
424
struct lp_rast_clear_rb *cc_scene =
425
(struct lp_rast_clear_rb *)
426
lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8);
427
428
if (!cc_scene) {
429
return FALSE;
430
}
431
432
cc_scene->cbuf = cbuf;
433
cc_scene->color_val = uc;
434
clearrb_arg.clear_rb = cc_scene;
435
436
if (!lp_scene_bin_everywhere(scene,
437
LP_RAST_OP_CLEAR_COLOR,
438
clearrb_arg))
439
return FALSE;
440
}
441
else {
442
/* Put ourselves into the 'pre-clear' state, specifically to try
443
* and accumulate multiple clears to color and depth_stencil
444
* buffers which the app or gallium frontend might issue
445
* separately.
446
*/
447
set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
448
449
assert(PIPE_CLEAR_COLOR0 == (1 << 2));
450
setup->clear.flags |= 1 << (cbuf + 2);
451
setup->clear.color_val[cbuf] = uc;
452
}
453
454
return TRUE;
455
}
456
457
static boolean
458
lp_setup_try_clear_zs(struct lp_setup_context *setup,
459
double depth,
460
unsigned stencil,
461
unsigned flags)
462
{
463
uint64_t zsmask = 0;
464
uint64_t zsvalue = 0;
465
uint32_t zmask32;
466
uint8_t smask8;
467
enum pipe_format format = setup->fb.zsbuf->format;
468
469
LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
470
471
zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0;
472
smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0;
473
474
zsvalue = util_pack64_z_stencil(format, depth, stencil);
475
476
zsmask = util_pack64_mask_z_stencil(format, zmask32, smask8);
477
478
zsvalue &= zsmask;
479
480
if (format == PIPE_FORMAT_Z24X8_UNORM ||
481
format == PIPE_FORMAT_X8Z24_UNORM) {
482
/*
483
* Make full mask if there's "X" bits so we can do full
484
* clear (without rmw).
485
*/
486
uint32_t zsmask_full = 0;
487
zsmask_full = util_pack_mask_z_stencil(format, ~0, ~0);
488
zsmask |= ~zsmask_full;
489
}
490
491
if (setup->state == SETUP_ACTIVE) {
492
struct lp_scene *scene = setup->scene;
493
494
/* Add the clear to existing scene. In the unusual case where
495
* both color and depth-stencil are being cleared when there's
496
* already been some rendering, we could discard the currently
497
* binned scene and start again, but I don't see that as being
498
* a common usage.
499
*/
500
if (!lp_scene_bin_everywhere(scene,
501
LP_RAST_OP_CLEAR_ZSTENCIL,
502
lp_rast_arg_clearzs(zsvalue, zsmask)))
503
return FALSE;
504
}
505
else {
506
/* Put ourselves into the 'pre-clear' state, specifically to try
507
* and accumulate multiple clears to color and depth_stencil
508
* buffers which the app or gallium frontend might issue
509
* separately.
510
*/
511
set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
512
513
setup->clear.flags |= flags;
514
515
setup->clear.zsmask |= zsmask;
516
setup->clear.zsvalue =
517
(setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask);
518
}
519
520
return TRUE;
521
}
522
523
void
524
lp_setup_clear( struct lp_setup_context *setup,
525
const union pipe_color_union *color,
526
double depth,
527
unsigned stencil,
528
unsigned flags )
529
{
530
unsigned i;
531
532
/*
533
* Note any of these (max 9) clears could fail (but at most there should
534
* be just one failure!). This avoids doing the previous succeeded
535
* clears again (we still clear tiles twice if a clear command succeeded
536
* partially for one buffer).
537
*/
538
if (flags & PIPE_CLEAR_DEPTHSTENCIL) {
539
unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL;
540
if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) {
541
lp_setup_flush(setup, NULL, __FUNCTION__);
542
543
if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs))
544
assert(0);
545
}
546
}
547
548
if (flags & PIPE_CLEAR_COLOR) {
549
assert(PIPE_CLEAR_COLOR0 == (1 << 2));
550
for (i = 0; i < setup->fb.nr_cbufs; i++) {
551
if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) {
552
if (!lp_setup_try_clear_color_buffer(setup, color, i)) {
553
lp_setup_flush(setup, NULL, __FUNCTION__);
554
555
if (!lp_setup_try_clear_color_buffer(setup, color, i))
556
assert(0);
557
}
558
}
559
}
560
}
561
}
562
563
564
565
void
566
lp_setup_set_triangle_state( struct lp_setup_context *setup,
567
unsigned cull_mode,
568
boolean ccw_is_frontface,
569
boolean scissor,
570
boolean half_pixel_center,
571
boolean bottom_edge_rule,
572
boolean multisample)
573
{
574
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
575
576
setup->ccw_is_frontface = ccw_is_frontface;
577
setup->cullmode = cull_mode;
578
setup->triangle = first_triangle;
579
setup->multisample = multisample;
580
setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f;
581
setup->bottom_edge_rule = bottom_edge_rule;
582
583
if (setup->scissor_test != scissor) {
584
setup->dirty |= LP_SETUP_NEW_SCISSOR;
585
setup->scissor_test = scissor;
586
}
587
}
588
589
void
590
lp_setup_set_line_state( struct lp_setup_context *setup,
591
float line_width,
592
boolean line_rectangular)
593
{
594
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
595
596
setup->line_width = line_width;
597
setup->rectangular_lines = line_rectangular;
598
}
599
600
void
601
lp_setup_set_point_state( struct lp_setup_context *setup,
602
float point_size,
603
boolean point_size_per_vertex,
604
uint sprite_coord_enable,
605
uint sprite_coord_origin,
606
boolean point_quad_rasterization)
607
{
608
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
609
610
setup->point_size = point_size;
611
setup->sprite_coord_enable = sprite_coord_enable;
612
setup->sprite_coord_origin = sprite_coord_origin;
613
setup->point_size_per_vertex = point_size_per_vertex;
614
setup->legacy_points = !point_quad_rasterization;
615
}
616
617
void
618
lp_setup_set_setup_variant( struct lp_setup_context *setup,
619
const struct lp_setup_variant *variant)
620
{
621
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
622
623
setup->setup.variant = variant;
624
}
625
626
void
627
lp_setup_set_fs_variant( struct lp_setup_context *setup,
628
struct lp_fragment_shader_variant *variant)
629
{
630
LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__,
631
variant);
632
633
setup->fs.current.variant = variant;
634
setup->dirty |= LP_SETUP_NEW_FS;
635
}
636
637
void
638
lp_setup_set_fs_constants(struct lp_setup_context *setup,
639
unsigned num,
640
struct pipe_constant_buffer *buffers)
641
{
642
unsigned i;
643
644
LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
645
646
assert(num <= ARRAY_SIZE(setup->constants));
647
648
for (i = 0; i < num; ++i) {
649
util_copy_constant_buffer(&setup->constants[i].current, &buffers[i], false);
650
}
651
for (; i < ARRAY_SIZE(setup->constants); i++) {
652
util_copy_constant_buffer(&setup->constants[i].current, NULL, false);
653
}
654
setup->dirty |= LP_SETUP_NEW_CONSTANTS;
655
}
656
657
void
658
lp_setup_set_fs_ssbos(struct lp_setup_context *setup,
659
unsigned num,
660
struct pipe_shader_buffer *buffers)
661
{
662
unsigned i;
663
664
LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
665
666
assert(num <= ARRAY_SIZE(setup->ssbos));
667
668
for (i = 0; i < num; ++i) {
669
util_copy_shader_buffer(&setup->ssbos[i].current, &buffers[i]);
670
}
671
for (; i < ARRAY_SIZE(setup->ssbos); i++) {
672
util_copy_shader_buffer(&setup->ssbos[i].current, NULL);
673
}
674
setup->dirty |= LP_SETUP_NEW_SSBOS;
675
}
676
677
void
678
lp_setup_set_fs_images(struct lp_setup_context *setup,
679
unsigned num,
680
struct pipe_image_view *images)
681
{
682
unsigned i;
683
684
LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) images);
685
686
assert(num <= ARRAY_SIZE(setup->images));
687
688
for (i = 0; i < num; ++i) {
689
struct pipe_image_view *image = &images[i];
690
util_copy_image_view(&setup->images[i].current, &images[i]);
691
692
struct pipe_resource *res = image->resource;
693
struct llvmpipe_resource *lp_res = llvmpipe_resource(res);
694
struct lp_jit_image *jit_image;
695
696
jit_image = &setup->fs.current.jit_context.images[i];
697
if (!lp_res)
698
continue;
699
if (!lp_res->dt) {
700
/* regular texture - setup array of mipmap level offsets */
701
if (llvmpipe_resource_is_texture(res)) {
702
jit_image->base = lp_res->tex_data;
703
} else
704
jit_image->base = lp_res->data;
705
706
jit_image->width = res->width0;
707
jit_image->height = res->height0;
708
jit_image->depth = res->depth0;
709
jit_image->num_samples = res->nr_samples;
710
711
if (llvmpipe_resource_is_texture(res)) {
712
uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
713
714
jit_image->width = u_minify(jit_image->width, image->u.tex.level);
715
jit_image->height = u_minify(jit_image->height, image->u.tex.level);
716
717
if (res->target == PIPE_TEXTURE_1D_ARRAY ||
718
res->target == PIPE_TEXTURE_2D_ARRAY ||
719
res->target == PIPE_TEXTURE_3D ||
720
res->target == PIPE_TEXTURE_CUBE ||
721
res->target == PIPE_TEXTURE_CUBE_ARRAY) {
722
/*
723
* For array textures, we don't have first_layer, instead
724
* adjust last_layer (stored as depth) plus the mip level offsets
725
* (as we have mip-first layout can't just adjust base ptr).
726
* XXX For mip levels, could do something similar.
727
*/
728
jit_image->depth = image->u.tex.last_layer - image->u.tex.first_layer + 1;
729
mip_offset += image->u.tex.first_layer * lp_res->img_stride[image->u.tex.level];
730
} else
731
jit_image->depth = u_minify(jit_image->depth, image->u.tex.level);
732
733
jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
734
jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
735
jit_image->sample_stride = lp_res->sample_stride;
736
jit_image->base = (uint8_t *)jit_image->base + mip_offset;
737
}
738
else {
739
unsigned view_blocksize = util_format_get_blocksize(image->format);
740
jit_image->width = image->u.buf.size / view_blocksize;
741
jit_image->base = (uint8_t *)jit_image->base + image->u.buf.offset;
742
}
743
}
744
}
745
for (; i < ARRAY_SIZE(setup->images); i++) {
746
util_copy_image_view(&setup->images[i].current, NULL);
747
}
748
setup->dirty |= LP_SETUP_NEW_FS;
749
}
750
751
void
752
lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
753
float alpha_ref_value )
754
{
755
LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value);
756
757
if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) {
758
setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value;
759
setup->dirty |= LP_SETUP_NEW_FS;
760
}
761
}
762
763
void
764
lp_setup_set_stencil_ref_values( struct lp_setup_context *setup,
765
const ubyte refs[2] )
766
{
767
LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]);
768
769
if (setup->fs.current.jit_context.stencil_ref_front != refs[0] ||
770
setup->fs.current.jit_context.stencil_ref_back != refs[1]) {
771
setup->fs.current.jit_context.stencil_ref_front = refs[0];
772
setup->fs.current.jit_context.stencil_ref_back = refs[1];
773
setup->dirty |= LP_SETUP_NEW_FS;
774
}
775
}
776
777
void
778
lp_setup_set_blend_color( struct lp_setup_context *setup,
779
const struct pipe_blend_color *blend_color )
780
{
781
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
782
783
assert(blend_color);
784
785
if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) {
786
memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color);
787
setup->dirty |= LP_SETUP_NEW_BLEND_COLOR;
788
}
789
}
790
791
792
void
793
lp_setup_set_scissors( struct lp_setup_context *setup,
794
const struct pipe_scissor_state *scissors )
795
{
796
unsigned i;
797
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
798
799
assert(scissors);
800
801
for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
802
setup->scissors[i].x0 = scissors[i].minx;
803
setup->scissors[i].x1 = scissors[i].maxx-1;
804
setup->scissors[i].y0 = scissors[i].miny;
805
setup->scissors[i].y1 = scissors[i].maxy-1;
806
}
807
setup->dirty |= LP_SETUP_NEW_SCISSOR;
808
}
809
810
void
811
lp_setup_set_sample_mask(struct lp_setup_context *setup,
812
uint32_t sample_mask)
813
{
814
if (setup->fs.current.jit_context.sample_mask != sample_mask) {
815
setup->fs.current.jit_context.sample_mask = sample_mask;
816
setup->dirty |= LP_SETUP_NEW_FS;
817
}
818
}
819
820
void
821
lp_setup_set_flatshade_first(struct lp_setup_context *setup,
822
boolean flatshade_first)
823
{
824
setup->flatshade_first = flatshade_first;
825
}
826
827
void
828
lp_setup_set_rasterizer_discard(struct lp_setup_context *setup,
829
boolean rasterizer_discard)
830
{
831
if (setup->rasterizer_discard != rasterizer_discard) {
832
setup->rasterizer_discard = rasterizer_discard;
833
setup->line = first_line;
834
setup->point = first_point;
835
setup->triangle = first_triangle;
836
}
837
}
838
839
void
840
lp_setup_set_vertex_info(struct lp_setup_context *setup,
841
struct vertex_info *vertex_info)
842
{
843
/* XXX: just silently holding onto the pointer:
844
*/
845
setup->vertex_info = vertex_info;
846
}
847
848
849
/**
850
* Called during state validation when LP_NEW_VIEWPORT is set.
851
*/
852
void
853
lp_setup_set_viewports(struct lp_setup_context *setup,
854
unsigned num_viewports,
855
const struct pipe_viewport_state *viewports)
856
{
857
struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
858
unsigned i;
859
860
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
861
862
assert(num_viewports <= PIPE_MAX_VIEWPORTS);
863
assert(viewports);
864
865
/*
866
* For use in lp_state_fs.c, propagate the viewport values for all viewports.
867
*/
868
for (i = 0; i < num_viewports; i++) {
869
float min_depth;
870
float max_depth;
871
util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz,
872
&min_depth, &max_depth);
873
874
if (setup->viewports[i].min_depth != min_depth ||
875
setup->viewports[i].max_depth != max_depth) {
876
setup->viewports[i].min_depth = min_depth;
877
setup->viewports[i].max_depth = max_depth;
878
setup->dirty |= LP_SETUP_NEW_VIEWPORTS;
879
}
880
}
881
}
882
883
884
/**
885
* Called during state validation when LP_NEW_SAMPLER_VIEW is set.
886
*/
887
void
888
lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
889
unsigned num,
890
struct pipe_sampler_view **views)
891
{
892
unsigned i, max_tex_num;
893
894
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
895
896
assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
897
898
max_tex_num = MAX2(num, setup->fs.current_tex_num);
899
900
for (i = 0; i < max_tex_num; i++) {
901
struct pipe_sampler_view *view = i < num ? views[i] : NULL;
902
903
/* We are going to overwrite/unref the current texture further below. If
904
* set, make sure to unmap its resource to avoid leaking previous
905
* mapping. */
906
if (setup->fs.current_tex[i])
907
llvmpipe_resource_unmap(setup->fs.current_tex[i], 0, 0);
908
909
if (view) {
910
struct pipe_resource *res = view->texture;
911
struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
912
struct lp_jit_texture *jit_tex;
913
jit_tex = &setup->fs.current.jit_context.textures[i];
914
915
/* We're referencing the texture's internal data, so save a
916
* reference to it.
917
*/
918
pipe_resource_reference(&setup->fs.current_tex[i], res);
919
920
if (!lp_tex->dt) {
921
/* regular texture - setup array of mipmap level offsets */
922
int j;
923
unsigned first_level = 0;
924
unsigned last_level = 0;
925
926
if (llvmpipe_resource_is_texture(res)) {
927
first_level = view->u.tex.first_level;
928
last_level = view->u.tex.last_level;
929
assert(first_level <= last_level);
930
assert(last_level <= res->last_level);
931
jit_tex->base = lp_tex->tex_data;
932
}
933
else {
934
jit_tex->base = lp_tex->data;
935
}
936
937
if (LP_PERF & PERF_TEX_MEM) {
938
/* use dummy tile memory */
939
jit_tex->base = lp_dummy_tile;
940
jit_tex->width = TILE_SIZE/8;
941
jit_tex->height = TILE_SIZE/8;
942
jit_tex->depth = 1;
943
jit_tex->first_level = 0;
944
jit_tex->last_level = 0;
945
jit_tex->mip_offsets[0] = 0;
946
jit_tex->row_stride[0] = 0;
947
jit_tex->img_stride[0] = 0;
948
jit_tex->num_samples = 0;
949
jit_tex->sample_stride = 0;
950
}
951
else {
952
jit_tex->width = res->width0;
953
jit_tex->height = res->height0;
954
jit_tex->depth = res->depth0;
955
jit_tex->first_level = first_level;
956
jit_tex->last_level = last_level;
957
jit_tex->num_samples = res->nr_samples;
958
jit_tex->sample_stride = 0;
959
960
if (llvmpipe_resource_is_texture(res)) {
961
for (j = first_level; j <= last_level; j++) {
962
jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
963
jit_tex->row_stride[j] = lp_tex->row_stride[j];
964
jit_tex->img_stride[j] = lp_tex->img_stride[j];
965
}
966
967
jit_tex->sample_stride = lp_tex->sample_stride;
968
969
if (res->target == PIPE_TEXTURE_1D_ARRAY ||
970
res->target == PIPE_TEXTURE_2D_ARRAY ||
971
res->target == PIPE_TEXTURE_CUBE ||
972
res->target == PIPE_TEXTURE_CUBE_ARRAY) {
973
/*
974
* For array textures, we don't have first_layer, instead
975
* adjust last_layer (stored as depth) plus the mip level offsets
976
* (as we have mip-first layout can't just adjust base ptr).
977
* XXX For mip levels, could do something similar.
978
*/
979
jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
980
for (j = first_level; j <= last_level; j++) {
981
jit_tex->mip_offsets[j] += view->u.tex.first_layer *
982
lp_tex->img_stride[j];
983
}
984
if (view->target == PIPE_TEXTURE_CUBE ||
985
view->target == PIPE_TEXTURE_CUBE_ARRAY) {
986
assert(jit_tex->depth % 6 == 0);
987
}
988
assert(view->u.tex.first_layer <= view->u.tex.last_layer);
989
assert(view->u.tex.last_layer < res->array_size);
990
}
991
}
992
else {
993
/*
994
* For buffers, we don't have "offset", instead adjust
995
* the size (stored as width) plus the base pointer.
996
*/
997
unsigned view_blocksize = util_format_get_blocksize(view->format);
998
/* probably don't really need to fill that out */
999
jit_tex->mip_offsets[0] = 0;
1000
jit_tex->row_stride[0] = 0;
1001
jit_tex->img_stride[0] = 0;
1002
1003
/* everything specified in number of elements here. */
1004
jit_tex->width = view->u.buf.size / view_blocksize;
1005
jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
1006
/* XXX Unsure if we need to sanitize parameters? */
1007
assert(view->u.buf.offset + view->u.buf.size <= res->width0);
1008
}
1009
}
1010
}
1011
else {
1012
/* display target texture/surface */
1013
jit_tex->base = llvmpipe_resource_map(res, 0, 0, LP_TEX_USAGE_READ);
1014
jit_tex->row_stride[0] = lp_tex->row_stride[0];
1015
jit_tex->img_stride[0] = lp_tex->img_stride[0];
1016
jit_tex->mip_offsets[0] = 0;
1017
jit_tex->width = res->width0;
1018
jit_tex->height = res->height0;
1019
jit_tex->depth = res->depth0;
1020
jit_tex->first_level = jit_tex->last_level = 0;
1021
jit_tex->num_samples = res->nr_samples;
1022
jit_tex->sample_stride = 0;
1023
assert(jit_tex->base);
1024
}
1025
}
1026
else {
1027
pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1028
}
1029
}
1030
setup->fs.current_tex_num = num;
1031
1032
setup->dirty |= LP_SETUP_NEW_FS;
1033
}
1034
1035
1036
/**
1037
* Called during state validation when LP_NEW_SAMPLER is set.
1038
*/
1039
void
1040
lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup,
1041
unsigned num,
1042
struct pipe_sampler_state **samplers)
1043
{
1044
unsigned i;
1045
1046
LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
1047
1048
assert(num <= PIPE_MAX_SAMPLERS);
1049
1050
for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
1051
const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
1052
1053
if (sampler) {
1054
struct lp_jit_sampler *jit_sam;
1055
jit_sam = &setup->fs.current.jit_context.samplers[i];
1056
1057
jit_sam->min_lod = sampler->min_lod;
1058
jit_sam->max_lod = sampler->max_lod;
1059
jit_sam->lod_bias = sampler->lod_bias;
1060
COPY_4V(jit_sam->border_color, sampler->border_color.f);
1061
}
1062
}
1063
1064
setup->dirty |= LP_SETUP_NEW_FS;
1065
}
1066
1067
1068
/**
1069
* Is the given texture referenced by any scene?
1070
* Note: we have to check all scenes including any scenes currently
1071
* being rendered and the current scene being built.
1072
*/
1073
unsigned
1074
lp_setup_is_resource_referenced( const struct lp_setup_context *setup,
1075
const struct pipe_resource *texture )
1076
{
1077
unsigned i;
1078
1079
/* check the render targets */
1080
for (i = 0; i < setup->fb.nr_cbufs; i++) {
1081
if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture)
1082
return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1083
}
1084
if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) {
1085
return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1086
}
1087
1088
/* check textures referenced by the scene */
1089
for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1090
if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) {
1091
return LP_REFERENCED_FOR_READ;
1092
}
1093
}
1094
1095
for (i = 0; i < ARRAY_SIZE(setup->ssbos); i++) {
1096
if (setup->ssbos[i].current.buffer == texture)
1097
return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1098
}
1099
1100
for (i = 0; i < ARRAY_SIZE(setup->images); i++) {
1101
if (setup->images[i].current.resource == texture)
1102
return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
1103
}
1104
1105
return LP_UNREFERENCED;
1106
}
1107
1108
1109
/**
1110
* Called by vbuf code when we're about to draw something.
1111
*
1112
* This function stores all dirty state in the current scene's display list
1113
* memory, via lp_scene_alloc(). We can not pass pointers of mutable state to
1114
* the JIT functions, as the JIT functions will be called later on, most likely
1115
* on a different thread.
1116
*
1117
* When processing dirty state it is imperative that we don't refer to any
1118
* pointers previously allocated with lp_scene_alloc() in this function (or any
1119
* function) as they may belong to a scene freed since then.
1120
*/
1121
static boolean
1122
try_update_scene_state( struct lp_setup_context *setup )
1123
{
1124
static const float fake_const_buf[4];
1125
boolean new_scene = (setup->fs.stored == NULL);
1126
struct lp_scene *scene = setup->scene;
1127
unsigned i;
1128
1129
assert(scene);
1130
1131
if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) {
1132
/*
1133
* Record new depth range state for changes due to viewport updates.
1134
*
1135
* TODO: Collapse the existing viewport and depth range information
1136
* into one structure, for access by JIT.
1137
*/
1138
struct lp_jit_viewport *stored;
1139
1140
stored = (struct lp_jit_viewport *)
1141
lp_scene_alloc(scene, sizeof setup->viewports);
1142
1143
if (!stored) {
1144
assert(!new_scene);
1145
return FALSE;
1146
}
1147
1148
memcpy(stored, setup->viewports, sizeof setup->viewports);
1149
1150
setup->fs.current.jit_context.viewports = stored;
1151
setup->dirty |= LP_SETUP_NEW_FS;
1152
}
1153
1154
if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) {
1155
uint8_t *stored;
1156
float* fstored;
1157
unsigned i, j;
1158
unsigned size;
1159
1160
/* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */
1161
size = 4 * 16 * sizeof(uint8_t);
1162
size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float);
1163
stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN);
1164
1165
if (!stored) {
1166
assert(!new_scene);
1167
return FALSE;
1168
}
1169
1170
/* Store floating point colour */
1171
fstored = (float*)(stored + 4*16);
1172
for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) {
1173
fstored[i] = setup->blend_color.current.color[i % 4];
1174
}
1175
1176
/* smear each blend color component across 16 ubyte elements */
1177
for (i = 0; i < 4; ++i) {
1178
uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]);
1179
for (j = 0; j < 16; ++j)
1180
stored[i*16 + j] = c;
1181
}
1182
1183
setup->blend_color.stored = stored;
1184
setup->fs.current.jit_context.u8_blend_color = stored;
1185
setup->fs.current.jit_context.f_blend_color = fstored;
1186
setup->dirty |= LP_SETUP_NEW_FS;
1187
}
1188
1189
struct llvmpipe_context *llvmpipe = llvmpipe_context(setup->pipe);
1190
if (llvmpipe->dirty & LP_NEW_FS_CONSTANTS)
1191
lp_setup_set_fs_constants(llvmpipe->setup,
1192
ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_FRAGMENT]),
1193
llvmpipe->constants[PIPE_SHADER_FRAGMENT]);
1194
1195
if (setup->dirty & LP_SETUP_NEW_CONSTANTS) {
1196
for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
1197
struct pipe_resource *buffer = setup->constants[i].current.buffer;
1198
const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
1199
LP_MAX_TGSI_CONST_BUFFER_SIZE);
1200
const ubyte *current_data = NULL;
1201
int num_constants;
1202
1203
STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
1204
1205
if (buffer) {
1206
/* resource buffer */
1207
current_data = (ubyte *) llvmpipe_resource_data(buffer);
1208
}
1209
else if (setup->constants[i].current.user_buffer) {
1210
/* user-space buffer */
1211
current_data = (ubyte *) setup->constants[i].current.user_buffer;
1212
}
1213
1214
if (current_data && current_size >= sizeof(float)) {
1215
current_data += setup->constants[i].current.buffer_offset;
1216
1217
/* TODO: copy only the actually used constants? */
1218
1219
if (setup->constants[i].stored_size != current_size ||
1220
!setup->constants[i].stored_data ||
1221
memcmp(setup->constants[i].stored_data,
1222
current_data,
1223
current_size) != 0) {
1224
void *stored;
1225
1226
stored = lp_scene_alloc(scene, current_size);
1227
if (!stored) {
1228
assert(!new_scene);
1229
return FALSE;
1230
}
1231
1232
memcpy(stored,
1233
current_data,
1234
current_size);
1235
setup->constants[i].stored_size = current_size;
1236
setup->constants[i].stored_data = stored;
1237
}
1238
setup->fs.current.jit_context.constants[i] =
1239
setup->constants[i].stored_data;
1240
}
1241
else {
1242
setup->constants[i].stored_size = 0;
1243
setup->constants[i].stored_data = NULL;
1244
setup->fs.current.jit_context.constants[i] = fake_const_buf;
1245
}
1246
1247
num_constants =
1248
DIV_ROUND_UP(setup->constants[i].stored_size, lp_get_constant_buffer_stride(scene->pipe->screen));
1249
setup->fs.current.jit_context.num_constants[i] = num_constants;
1250
setup->dirty |= LP_SETUP_NEW_FS;
1251
}
1252
}
1253
1254
if (setup->dirty & LP_SETUP_NEW_SSBOS) {
1255
for (i = 0; i < ARRAY_SIZE(setup->ssbos); ++i) {
1256
struct pipe_resource *buffer = setup->ssbos[i].current.buffer;
1257
const ubyte *current_data = NULL;
1258
1259
if (!buffer)
1260
continue;
1261
/* resource buffer */
1262
current_data = (ubyte *) llvmpipe_resource_data(buffer);
1263
if (current_data) {
1264
current_data += setup->ssbos[i].current.buffer_offset;
1265
1266
setup->fs.current.jit_context.ssbos[i] = (const uint32_t *)current_data;
1267
setup->fs.current.jit_context.num_ssbos[i] = setup->ssbos[i].current.buffer_size;
1268
} else {
1269
setup->fs.current.jit_context.ssbos[i] = NULL;
1270
setup->fs.current.jit_context.num_ssbos[i] = 0;
1271
}
1272
setup->dirty |= LP_SETUP_NEW_FS;
1273
}
1274
}
1275
if (setup->dirty & LP_SETUP_NEW_FS) {
1276
if (!setup->fs.stored ||
1277
memcmp(setup->fs.stored,
1278
&setup->fs.current,
1279
sizeof setup->fs.current) != 0)
1280
{
1281
struct lp_rast_state *stored;
1282
1283
/* The fs state that's been stored in the scene is different from
1284
* the new, current state. So allocate a new lp_rast_state object
1285
* and append it to the bin's setup data buffer.
1286
*/
1287
stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored);
1288
if (!stored) {
1289
assert(!new_scene);
1290
return FALSE;
1291
}
1292
1293
memcpy(&stored->jit_context,
1294
&setup->fs.current.jit_context,
1295
sizeof setup->fs.current.jit_context);
1296
stored->variant = setup->fs.current.variant;
1297
1298
if (!lp_scene_add_frag_shader_reference(scene,
1299
setup->fs.current.variant))
1300
return FALSE;
1301
setup->fs.stored = stored;
1302
1303
/* The scene now references the textures in the rasterization
1304
* state record. Note that now.
1305
*/
1306
for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1307
if (setup->fs.current_tex[i]) {
1308
if (!lp_scene_add_resource_reference(scene,
1309
setup->fs.current_tex[i],
1310
new_scene)) {
1311
assert(!new_scene);
1312
return FALSE;
1313
}
1314
}
1315
}
1316
}
1317
}
1318
1319
if (setup->dirty & LP_SETUP_NEW_SCISSOR) {
1320
unsigned i;
1321
for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
1322
setup->draw_regions[i] = setup->framebuffer;
1323
if (setup->scissor_test) {
1324
u_rect_possible_intersection(&setup->scissors[i],
1325
&setup->draw_regions[i]);
1326
}
1327
}
1328
}
1329
1330
setup->dirty = 0;
1331
1332
assert(setup->fs.stored);
1333
return TRUE;
1334
}
1335
1336
boolean
1337
lp_setup_update_state( struct lp_setup_context *setup,
1338
boolean update_scene )
1339
{
1340
/* Some of the 'draw' pipeline stages may have changed some driver state.
1341
* Make sure we've processed those state changes before anything else.
1342
*
1343
* XXX this is the only place where llvmpipe_context is used in the
1344
* setup code. This may get refactored/changed...
1345
*/
1346
{
1347
struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
1348
if (lp->dirty) {
1349
llvmpipe_update_derived(lp);
1350
}
1351
1352
if (lp->setup->dirty) {
1353
llvmpipe_update_setup(lp);
1354
}
1355
1356
assert(setup->setup.variant);
1357
1358
/* Will probably need to move this somewhere else, just need
1359
* to know about vertex shader point size attribute.
1360
*/
1361
setup->psize_slot = lp->psize_slot;
1362
setup->viewport_index_slot = lp->viewport_index_slot;
1363
setup->layer_slot = lp->layer_slot;
1364
setup->face_slot = lp->face_slot;
1365
1366
assert(lp->dirty == 0);
1367
1368
assert(lp->setup_variant.key.size ==
1369
setup->setup.variant->key.size);
1370
1371
assert(memcmp(&lp->setup_variant.key,
1372
&setup->setup.variant->key,
1373
setup->setup.variant->key.size) == 0);
1374
}
1375
1376
if (update_scene && setup->state != SETUP_ACTIVE) {
1377
if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ ))
1378
return FALSE;
1379
}
1380
1381
/* Only call into update_scene_state() if we already have a
1382
* scene:
1383
*/
1384
if (update_scene && setup->scene) {
1385
assert(setup->state == SETUP_ACTIVE);
1386
1387
if (try_update_scene_state(setup))
1388
return TRUE;
1389
1390
/* Update failed, try to restart the scene.
1391
*
1392
* Cannot call lp_setup_flush_and_restart() directly here
1393
* because of potential recursion.
1394
*/
1395
if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1396
return FALSE;
1397
1398
if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__))
1399
return FALSE;
1400
1401
if (!setup->scene)
1402
return FALSE;
1403
1404
return try_update_scene_state(setup);
1405
}
1406
1407
return TRUE;
1408
}
1409
1410
1411
1412
/* Only caller is lp_setup_vbuf_destroy()
1413
*/
1414
void
1415
lp_setup_destroy( struct lp_setup_context *setup )
1416
{
1417
uint i;
1418
1419
lp_setup_reset( setup );
1420
1421
util_unreference_framebuffer_state(&setup->fb);
1422
1423
for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1424
struct pipe_resource **res_ptr = &setup->fs.current_tex[i];
1425
if (*res_ptr)
1426
llvmpipe_resource_unmap(*res_ptr, 0, 0);
1427
pipe_resource_reference(res_ptr, NULL);
1428
}
1429
1430
for (i = 0; i < ARRAY_SIZE(setup->constants); i++) {
1431
pipe_resource_reference(&setup->constants[i].current.buffer, NULL);
1432
}
1433
1434
for (i = 0; i < ARRAY_SIZE(setup->ssbos); i++) {
1435
pipe_resource_reference(&setup->ssbos[i].current.buffer, NULL);
1436
}
1437
1438
/* free the scenes in the 'empty' queue */
1439
for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1440
struct lp_scene *scene = setup->scenes[i];
1441
1442
if (scene->fence)
1443
lp_fence_wait(scene->fence);
1444
1445
lp_scene_destroy(scene);
1446
}
1447
1448
lp_fence_reference(&setup->last_fence, NULL);
1449
1450
FREE( setup );
1451
}
1452
1453
1454
/**
1455
* Create a new primitive tiling engine. Plug it into the backend of
1456
* the draw module. Currently also creates a rasterizer to use with
1457
* it.
1458
*/
1459
struct lp_setup_context *
1460
lp_setup_create( struct pipe_context *pipe,
1461
struct draw_context *draw )
1462
{
1463
struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1464
struct lp_setup_context *setup;
1465
unsigned i;
1466
1467
setup = CALLOC_STRUCT(lp_setup_context);
1468
if (!setup) {
1469
goto no_setup;
1470
}
1471
1472
lp_setup_init_vbuf(setup);
1473
1474
/* Used only in update_state():
1475
*/
1476
setup->pipe = pipe;
1477
1478
1479
setup->num_threads = screen->num_threads;
1480
setup->vbuf = draw_vbuf_stage(draw, &setup->base);
1481
if (!setup->vbuf) {
1482
goto no_vbuf;
1483
}
1484
1485
draw_set_rasterize_stage(draw, setup->vbuf);
1486
draw_set_render(draw, &setup->base);
1487
1488
/* create some empty scenes */
1489
for (i = 0; i < MAX_SCENES; i++) {
1490
setup->scenes[i] = lp_scene_create( pipe );
1491
if (!setup->scenes[i]) {
1492
goto no_scenes;
1493
}
1494
}
1495
1496
setup->triangle = first_triangle;
1497
setup->line = first_line;
1498
setup->point = first_point;
1499
1500
setup->dirty = ~0;
1501
1502
/* Initialize empty default fb correctly, so the rect is empty */
1503
setup->framebuffer.x1 = -1;
1504
setup->framebuffer.y1 = -1;
1505
1506
return setup;
1507
1508
no_scenes:
1509
for (i = 0; i < MAX_SCENES; i++) {
1510
if (setup->scenes[i]) {
1511
lp_scene_destroy(setup->scenes[i]);
1512
}
1513
}
1514
1515
setup->vbuf->destroy(setup->vbuf);
1516
no_vbuf:
1517
FREE(setup);
1518
no_setup:
1519
return NULL;
1520
}
1521
1522
1523
/**
1524
* Put a BeginQuery command into all bins.
1525
*/
1526
void
1527
lp_setup_begin_query(struct lp_setup_context *setup,
1528
struct llvmpipe_query *pq)
1529
{
1530
set_scene_state(setup, SETUP_ACTIVE, "begin_query");
1531
1532
if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1533
pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1534
pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1535
pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1536
pq->type == PIPE_QUERY_TIME_ELAPSED))
1537
return;
1538
1539
/* init the query to its beginning state */
1540
assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES);
1541
/* exceeding list size so just ignore the query */
1542
if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) {
1543
return;
1544
}
1545
assert(setup->active_queries[setup->active_binned_queries] == NULL);
1546
setup->active_queries[setup->active_binned_queries] = pq;
1547
setup->active_binned_queries++;
1548
1549
assert(setup->scene);
1550
if (setup->scene) {
1551
if (!lp_scene_bin_everywhere(setup->scene,
1552
LP_RAST_OP_BEGIN_QUERY,
1553
lp_rast_arg_query(pq))) {
1554
1555
if (!lp_setup_flush_and_restart(setup))
1556
return;
1557
1558
if (!lp_scene_bin_everywhere(setup->scene,
1559
LP_RAST_OP_BEGIN_QUERY,
1560
lp_rast_arg_query(pq))) {
1561
return;
1562
}
1563
}
1564
setup->scene->had_queries |= TRUE;
1565
}
1566
}
1567
1568
1569
/**
1570
* Put an EndQuery command into all bins.
1571
*/
1572
void
1573
lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
1574
{
1575
set_scene_state(setup, SETUP_ACTIVE, "end_query");
1576
1577
assert(setup->scene);
1578
if (setup->scene) {
1579
/* pq->fence should be the fence of the *last* scene which
1580
* contributed to the query result.
1581
*/
1582
lp_fence_reference(&pq->fence, setup->scene->fence);
1583
1584
if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1585
pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1586
pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1587
pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1588
pq->type == PIPE_QUERY_TIMESTAMP ||
1589
pq->type == PIPE_QUERY_TIME_ELAPSED) {
1590
if (pq->type == PIPE_QUERY_TIMESTAMP &&
1591
!(setup->scene->tiles_x | setup->scene->tiles_y)) {
1592
/*
1593
* If there's a zero width/height framebuffer, there's no bins and
1594
* hence no rast task is ever run. So fill in something here instead.
1595
*/
1596
pq->end[0] = os_time_get_nano();
1597
}
1598
1599
if (!lp_scene_bin_everywhere(setup->scene,
1600
LP_RAST_OP_END_QUERY,
1601
lp_rast_arg_query(pq))) {
1602
if (!lp_setup_flush_and_restart(setup))
1603
goto fail;
1604
1605
if (!lp_scene_bin_everywhere(setup->scene,
1606
LP_RAST_OP_END_QUERY,
1607
lp_rast_arg_query(pq))) {
1608
goto fail;
1609
}
1610
}
1611
setup->scene->had_queries |= TRUE;
1612
}
1613
}
1614
else {
1615
lp_fence_reference(&pq->fence, setup->last_fence);
1616
}
1617
1618
fail:
1619
/* Need to do this now not earlier since it still needs to be marked as
1620
* active when binning it would cause a flush.
1621
*/
1622
if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1623
pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1624
pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1625
pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1626
pq->type == PIPE_QUERY_TIME_ELAPSED) {
1627
unsigned i;
1628
1629
/* remove from active binned query list */
1630
for (i = 0; i < setup->active_binned_queries; i++) {
1631
if (setup->active_queries[i] == pq)
1632
break;
1633
}
1634
assert(i < setup->active_binned_queries);
1635
if (i == setup->active_binned_queries)
1636
return;
1637
setup->active_binned_queries--;
1638
setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
1639
setup->active_queries[setup->active_binned_queries] = NULL;
1640
}
1641
}
1642
1643
1644
boolean
1645
lp_setup_flush_and_restart(struct lp_setup_context *setup)
1646
{
1647
if (0) debug_printf("%s\n", __FUNCTION__);
1648
1649
assert(setup->state == SETUP_ACTIVE);
1650
1651
if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1652
return FALSE;
1653
1654
if (!lp_setup_update_state(setup, TRUE))
1655
return FALSE;
1656
1657
return TRUE;
1658
}
1659
1660
1661
1662