Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/auxiliary/cso_cache/cso_context.c
4565 views
1
/**************************************************************************
2
*
3
* Copyright 2007 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
/**
29
* @file
30
*
31
* Wrap the cso cache & hash mechanisms in a simplified
32
* pipe-driver-specific interface.
33
*
34
* @author Zack Rusin <[email protected]>
35
* @author Keith Whitwell <[email protected]>
36
*/
37
38
#include "pipe/p_state.h"
39
#include "util/u_draw.h"
40
#include "util/u_framebuffer.h"
41
#include "util/u_inlines.h"
42
#include "util/u_math.h"
43
#include "util/u_memory.h"
44
#include "util/u_vbuf.h"
45
#include "tgsi/tgsi_parse.h"
46
47
#include "cso_cache/cso_context.h"
48
#include "cso_cache/cso_cache.h"
49
#include "cso_cache/cso_hash.h"
50
#include "cso_context.h"
51
52
53
/**
54
* Per-shader sampler information.
55
*/
56
struct sampler_info
57
{
58
struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
59
void *samplers[PIPE_MAX_SAMPLERS];
60
};
61
62
63
64
struct cso_context {
65
struct pipe_context *pipe;
66
67
struct u_vbuf *vbuf;
68
struct u_vbuf *vbuf_current;
69
bool always_use_vbuf;
70
71
boolean has_geometry_shader;
72
boolean has_tessellation;
73
boolean has_compute_shader;
74
boolean has_streamout;
75
76
unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
77
unsigned saved_compute_state; /**< bitmask of CSO_BIT_COMPUTE_x flags */
78
79
struct sampler_info fragment_samplers_saved;
80
struct sampler_info compute_samplers_saved;
81
struct sampler_info samplers[PIPE_SHADER_TYPES];
82
83
/* Temporary number until cso_single_sampler_done is called.
84
* It tracks the highest sampler seen in cso_single_sampler.
85
*/
86
int max_sampler_seen;
87
88
unsigned nr_so_targets;
89
struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
90
91
unsigned nr_so_targets_saved;
92
struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
93
94
/** Current and saved state.
95
* The saved state is used as a 1-deep stack.
96
*/
97
void *blend, *blend_saved;
98
void *depth_stencil, *depth_stencil_saved;
99
void *rasterizer, *rasterizer_saved;
100
void *fragment_shader, *fragment_shader_saved;
101
void *vertex_shader, *vertex_shader_saved;
102
void *geometry_shader, *geometry_shader_saved;
103
void *tessctrl_shader, *tessctrl_shader_saved;
104
void *tesseval_shader, *tesseval_shader_saved;
105
void *compute_shader, *compute_shader_saved;
106
void *velements, *velements_saved;
107
struct pipe_query *render_condition, *render_condition_saved;
108
uint render_condition_mode, render_condition_mode_saved;
109
boolean render_condition_cond, render_condition_cond_saved;
110
bool flatshade_first, flatshade_first_saved;
111
112
struct pipe_framebuffer_state fb, fb_saved;
113
struct pipe_viewport_state vp, vp_saved;
114
unsigned sample_mask, sample_mask_saved;
115
unsigned min_samples, min_samples_saved;
116
struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
117
118
/* This should be last to keep all of the above together in memory. */
119
struct cso_cache cache;
120
};
121
122
struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
123
{
124
return cso->pipe;
125
}
126
127
static inline boolean delete_cso(struct cso_context *ctx,
128
void *state, enum cso_cache_type type)
129
{
130
switch (type) {
131
case CSO_BLEND:
132
if (ctx->blend == ((struct cso_blend*)state)->data)
133
return false;
134
break;
135
case CSO_DEPTH_STENCIL_ALPHA:
136
if (ctx->depth_stencil == ((struct cso_depth_stencil_alpha*)state)->data)
137
return false;
138
break;
139
case CSO_RASTERIZER:
140
if (ctx->rasterizer == ((struct cso_rasterizer*)state)->data)
141
return false;
142
break;
143
case CSO_VELEMENTS:
144
if (ctx->velements == ((struct cso_velements*)state)->data)
145
return false;
146
break;
147
case CSO_SAMPLER:
148
/* nothing to do for samplers */
149
break;
150
default:
151
assert(0);
152
}
153
154
cso_delete_state(ctx->pipe, state, type);
155
return true;
156
}
157
158
static inline void
159
sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
160
int max_size, void *user_data)
161
{
162
struct cso_context *ctx = (struct cso_context *)user_data;
163
/* if we're approach the maximum size, remove fourth of the entries
164
* otherwise every subsequent call will go through the same */
165
int hash_size = cso_hash_size(hash);
166
int max_entries = (max_size > hash_size) ? max_size : hash_size;
167
int to_remove = (max_size < max_entries) * max_entries/4;
168
struct cso_hash_iter iter;
169
struct cso_sampler **samplers_to_restore = NULL;
170
unsigned to_restore = 0;
171
172
if (hash_size > max_size)
173
to_remove += hash_size - max_size;
174
175
if (to_remove == 0)
176
return;
177
178
if (type == CSO_SAMPLER) {
179
int i, j;
180
181
samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
182
sizeof(*samplers_to_restore));
183
184
/* Temporarily remove currently bound sampler states from the hash
185
* table, to prevent them from being deleted
186
*/
187
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
188
for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
189
struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
190
191
if (sampler && cso_hash_take(hash, sampler->hash_key))
192
samplers_to_restore[to_restore++] = sampler;
193
}
194
}
195
}
196
197
iter = cso_hash_first_node(hash);
198
while (to_remove) {
199
/*remove elements until we're good */
200
/*fixme: currently we pick the nodes to remove at random*/
201
void *cso = cso_hash_iter_data(iter);
202
203
if (!cso)
204
break;
205
206
if (delete_cso(ctx, cso, type)) {
207
iter = cso_hash_erase(hash, iter);
208
--to_remove;
209
} else
210
iter = cso_hash_iter_next(iter);
211
}
212
213
if (type == CSO_SAMPLER) {
214
/* Put currently bound sampler states back into the hash table */
215
while (to_restore--) {
216
struct cso_sampler *sampler = samplers_to_restore[to_restore];
217
218
cso_hash_insert(hash, sampler->hash_key, sampler);
219
}
220
221
FREE(samplers_to_restore);
222
}
223
}
224
225
static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
226
{
227
struct u_vbuf_caps caps;
228
bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
229
bool needs64b = !(flags & CSO_NO_64B_VERTEX_BUFFERS);
230
231
u_vbuf_get_caps(cso->pipe->screen, &caps, needs64b);
232
233
/* Enable u_vbuf if needed. */
234
if (caps.fallback_always ||
235
(uses_user_vertex_buffers &&
236
caps.fallback_only_for_user_vbuffers)) {
237
cso->vbuf = u_vbuf_create(cso->pipe, &caps);
238
cso->vbuf_current = cso->vbuf;
239
cso->always_use_vbuf = caps.fallback_always;
240
}
241
}
242
243
struct cso_context *
244
cso_create_context(struct pipe_context *pipe, unsigned flags)
245
{
246
struct cso_context *ctx = CALLOC_STRUCT(cso_context);
247
if (!ctx)
248
return NULL;
249
250
cso_cache_init(&ctx->cache, pipe);
251
cso_cache_set_sanitize_callback(&ctx->cache, sanitize_hash, ctx);
252
253
ctx->pipe = pipe;
254
ctx->sample_mask = ~0;
255
256
if (!(flags & CSO_NO_VBUF))
257
cso_init_vbuf(ctx, flags);
258
259
/* Enable for testing: */
260
if (0) cso_set_maximum_cache_size(&ctx->cache, 4);
261
262
if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
263
PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
264
ctx->has_geometry_shader = TRUE;
265
}
266
if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
267
PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
268
ctx->has_tessellation = TRUE;
269
}
270
if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
271
PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
272
int supported_irs =
273
pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
274
PIPE_SHADER_CAP_SUPPORTED_IRS);
275
if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
276
(1 << PIPE_SHADER_IR_NIR))) {
277
ctx->has_compute_shader = TRUE;
278
}
279
}
280
if (pipe->screen->get_param(pipe->screen,
281
PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
282
ctx->has_streamout = TRUE;
283
}
284
285
ctx->max_sampler_seen = -1;
286
return ctx;
287
}
288
289
void cso_unbind_context(struct cso_context *ctx)
290
{
291
unsigned i;
292
293
if (ctx->pipe) {
294
ctx->pipe->bind_blend_state( ctx->pipe, NULL );
295
ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
296
297
{
298
static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
299
static struct pipe_shader_buffer ssbos[PIPE_MAX_SHADER_BUFFERS] = { 0 };
300
static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
301
struct pipe_screen *scr = ctx->pipe->screen;
302
enum pipe_shader_type sh;
303
for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
304
switch (sh) {
305
case PIPE_SHADER_GEOMETRY:
306
if (!ctx->has_geometry_shader)
307
continue;
308
break;
309
case PIPE_SHADER_TESS_CTRL:
310
case PIPE_SHADER_TESS_EVAL:
311
if (!ctx->has_tessellation)
312
continue;
313
break;
314
case PIPE_SHADER_COMPUTE:
315
if (!ctx->has_compute_shader)
316
continue;
317
break;
318
default:
319
break;
320
}
321
322
int maxsam = scr->get_shader_param(scr, sh,
323
PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
324
int maxview = scr->get_shader_param(scr, sh,
325
PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
326
int maxssbo = scr->get_shader_param(scr, sh,
327
PIPE_SHADER_CAP_MAX_SHADER_BUFFERS);
328
int maxcb = scr->get_shader_param(scr, sh,
329
PIPE_SHADER_CAP_MAX_CONST_BUFFERS);
330
int maximg = scr->get_shader_param(scr, sh,
331
PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
332
assert(maxsam <= PIPE_MAX_SAMPLERS);
333
assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
334
assert(maxssbo <= PIPE_MAX_SHADER_BUFFERS);
335
assert(maxcb <= PIPE_MAX_CONSTANT_BUFFERS);
336
assert(maximg <= PIPE_MAX_SHADER_IMAGES);
337
if (maxsam > 0) {
338
ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
339
}
340
if (maxview > 0) {
341
ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, 0, views);
342
}
343
if (maxssbo > 0) {
344
ctx->pipe->set_shader_buffers(ctx->pipe, sh, 0, maxssbo, ssbos, 0);
345
}
346
if (maximg > 0) {
347
ctx->pipe->set_shader_images(ctx->pipe, sh, 0, 0, maximg, NULL);
348
}
349
for (int i = 0; i < maxcb; i++) {
350
ctx->pipe->set_constant_buffer(ctx->pipe, sh, i, false, NULL);
351
}
352
}
353
}
354
355
ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
356
struct pipe_stencil_ref sr = {0};
357
ctx->pipe->set_stencil_ref(ctx->pipe, sr);
358
ctx->pipe->bind_fs_state( ctx->pipe, NULL );
359
ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
360
ctx->pipe->bind_vs_state( ctx->pipe, NULL );
361
ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
362
if (ctx->has_geometry_shader) {
363
ctx->pipe->bind_gs_state(ctx->pipe, NULL);
364
}
365
if (ctx->has_tessellation) {
366
ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
367
ctx->pipe->bind_tes_state(ctx->pipe, NULL);
368
}
369
if (ctx->has_compute_shader) {
370
ctx->pipe->bind_compute_state(ctx->pipe, NULL);
371
}
372
ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
373
374
if (ctx->has_streamout)
375
ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
376
}
377
378
util_unreference_framebuffer_state(&ctx->fb);
379
util_unreference_framebuffer_state(&ctx->fb_saved);
380
381
for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
382
pipe_so_target_reference(&ctx->so_targets[i], NULL);
383
pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
384
}
385
386
memset(&ctx->samplers, 0, sizeof(ctx->samplers));
387
memset(&ctx->nr_so_targets, 0, offsetof(struct cso_context, cache) - offsetof(struct cso_context, nr_so_targets));
388
ctx->sample_mask = ~0;
389
/*
390
* If the cso context is reused (with the same pipe context),
391
* need to really make sure the context state doesn't get out of sync.
392
*/
393
ctx->pipe->set_sample_mask(ctx->pipe, ctx->sample_mask);
394
if (ctx->pipe->set_min_samples)
395
ctx->pipe->set_min_samples(ctx->pipe, ctx->min_samples);
396
}
397
398
/**
399
* Free the CSO context.
400
*/
401
void cso_destroy_context( struct cso_context *ctx )
402
{
403
cso_unbind_context(ctx);
404
cso_cache_delete(&ctx->cache);
405
406
if (ctx->vbuf)
407
u_vbuf_destroy(ctx->vbuf);
408
FREE( ctx );
409
}
410
411
412
/* Those function will either find the state of the given template
413
* in the cache or they will create a new state from the given
414
* template, insert it in the cache and return it.
415
*/
416
417
/*
418
* If the driver returns 0 from the create method then they will assign
419
* the data member of the cso to be the template itself.
420
*/
421
422
enum pipe_error cso_set_blend(struct cso_context *ctx,
423
const struct pipe_blend_state *templ)
424
{
425
unsigned key_size, hash_key;
426
struct cso_hash_iter iter;
427
void *handle;
428
429
key_size = templ->independent_blend_enable ?
430
sizeof(struct pipe_blend_state) :
431
(char *)&(templ->rt[1]) - (char *)templ;
432
hash_key = cso_construct_key((void*)templ, key_size);
433
iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
434
(void*)templ, key_size);
435
436
if (cso_hash_iter_is_null(iter)) {
437
struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
438
if (!cso)
439
return PIPE_ERROR_OUT_OF_MEMORY;
440
441
memset(&cso->state, 0, sizeof cso->state);
442
memcpy(&cso->state, templ, key_size);
443
cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
444
445
iter = cso_insert_state(&ctx->cache, hash_key, CSO_BLEND, cso);
446
if (cso_hash_iter_is_null(iter)) {
447
FREE(cso);
448
return PIPE_ERROR_OUT_OF_MEMORY;
449
}
450
451
handle = cso->data;
452
}
453
else {
454
handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
455
}
456
457
if (ctx->blend != handle) {
458
ctx->blend = handle;
459
ctx->pipe->bind_blend_state(ctx->pipe, handle);
460
}
461
return PIPE_OK;
462
}
463
464
static void
465
cso_save_blend(struct cso_context *ctx)
466
{
467
assert(!ctx->blend_saved);
468
ctx->blend_saved = ctx->blend;
469
}
470
471
static void
472
cso_restore_blend(struct cso_context *ctx)
473
{
474
if (ctx->blend != ctx->blend_saved) {
475
ctx->blend = ctx->blend_saved;
476
ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
477
}
478
ctx->blend_saved = NULL;
479
}
480
481
482
483
enum pipe_error
484
cso_set_depth_stencil_alpha(struct cso_context *ctx,
485
const struct pipe_depth_stencil_alpha_state *templ)
486
{
487
unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
488
unsigned hash_key = cso_construct_key((void*)templ, key_size);
489
struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
490
hash_key,
491
CSO_DEPTH_STENCIL_ALPHA,
492
(void*)templ, key_size);
493
void *handle;
494
495
if (cso_hash_iter_is_null(iter)) {
496
struct cso_depth_stencil_alpha *cso =
497
MALLOC(sizeof(struct cso_depth_stencil_alpha));
498
if (!cso)
499
return PIPE_ERROR_OUT_OF_MEMORY;
500
501
memcpy(&cso->state, templ, sizeof(*templ));
502
cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
503
&cso->state);
504
505
iter = cso_insert_state(&ctx->cache, hash_key,
506
CSO_DEPTH_STENCIL_ALPHA, cso);
507
if (cso_hash_iter_is_null(iter)) {
508
FREE(cso);
509
return PIPE_ERROR_OUT_OF_MEMORY;
510
}
511
512
handle = cso->data;
513
}
514
else {
515
handle = ((struct cso_depth_stencil_alpha *)
516
cso_hash_iter_data(iter))->data;
517
}
518
519
if (ctx->depth_stencil != handle) {
520
ctx->depth_stencil = handle;
521
ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
522
}
523
return PIPE_OK;
524
}
525
526
static void
527
cso_save_depth_stencil_alpha(struct cso_context *ctx)
528
{
529
assert(!ctx->depth_stencil_saved);
530
ctx->depth_stencil_saved = ctx->depth_stencil;
531
}
532
533
static void
534
cso_restore_depth_stencil_alpha(struct cso_context *ctx)
535
{
536
if (ctx->depth_stencil != ctx->depth_stencil_saved) {
537
ctx->depth_stencil = ctx->depth_stencil_saved;
538
ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
539
ctx->depth_stencil_saved);
540
}
541
ctx->depth_stencil_saved = NULL;
542
}
543
544
545
546
enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
547
const struct pipe_rasterizer_state *templ)
548
{
549
unsigned key_size = sizeof(struct pipe_rasterizer_state);
550
unsigned hash_key = cso_construct_key((void*)templ, key_size);
551
struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
552
hash_key,
553
CSO_RASTERIZER,
554
(void*)templ, key_size);
555
void *handle = NULL;
556
557
/* We can't have both point_quad_rasterization (sprites) and point_smooth
558
* (round AA points) enabled at the same time.
559
*/
560
assert(!(templ->point_quad_rasterization && templ->point_smooth));
561
562
if (cso_hash_iter_is_null(iter)) {
563
struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
564
if (!cso)
565
return PIPE_ERROR_OUT_OF_MEMORY;
566
567
memcpy(&cso->state, templ, sizeof(*templ));
568
cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
569
570
iter = cso_insert_state(&ctx->cache, hash_key, CSO_RASTERIZER, cso);
571
if (cso_hash_iter_is_null(iter)) {
572
FREE(cso);
573
return PIPE_ERROR_OUT_OF_MEMORY;
574
}
575
576
handle = cso->data;
577
}
578
else {
579
handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
580
}
581
582
if (ctx->rasterizer != handle) {
583
ctx->rasterizer = handle;
584
ctx->flatshade_first = templ->flatshade_first;
585
if (ctx->vbuf)
586
u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
587
ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
588
}
589
return PIPE_OK;
590
}
591
592
static void
593
cso_save_rasterizer(struct cso_context *ctx)
594
{
595
assert(!ctx->rasterizer_saved);
596
ctx->rasterizer_saved = ctx->rasterizer;
597
ctx->flatshade_first_saved = ctx->flatshade_first;
598
}
599
600
static void
601
cso_restore_rasterizer(struct cso_context *ctx)
602
{
603
if (ctx->rasterizer != ctx->rasterizer_saved) {
604
ctx->rasterizer = ctx->rasterizer_saved;
605
ctx->flatshade_first = ctx->flatshade_first_saved;
606
if (ctx->vbuf)
607
u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
608
ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
609
}
610
ctx->rasterizer_saved = NULL;
611
}
612
613
614
void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
615
{
616
if (ctx->fragment_shader != handle) {
617
ctx->fragment_shader = handle;
618
ctx->pipe->bind_fs_state(ctx->pipe, handle);
619
}
620
}
621
622
static void
623
cso_save_fragment_shader(struct cso_context *ctx)
624
{
625
assert(!ctx->fragment_shader_saved);
626
ctx->fragment_shader_saved = ctx->fragment_shader;
627
}
628
629
static void
630
cso_restore_fragment_shader(struct cso_context *ctx)
631
{
632
if (ctx->fragment_shader_saved != ctx->fragment_shader) {
633
ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
634
ctx->fragment_shader = ctx->fragment_shader_saved;
635
}
636
ctx->fragment_shader_saved = NULL;
637
}
638
639
640
void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
641
{
642
if (ctx->vertex_shader != handle) {
643
ctx->vertex_shader = handle;
644
ctx->pipe->bind_vs_state(ctx->pipe, handle);
645
}
646
}
647
648
static void
649
cso_save_vertex_shader(struct cso_context *ctx)
650
{
651
assert(!ctx->vertex_shader_saved);
652
ctx->vertex_shader_saved = ctx->vertex_shader;
653
}
654
655
static void
656
cso_restore_vertex_shader(struct cso_context *ctx)
657
{
658
if (ctx->vertex_shader_saved != ctx->vertex_shader) {
659
ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
660
ctx->vertex_shader = ctx->vertex_shader_saved;
661
}
662
ctx->vertex_shader_saved = NULL;
663
}
664
665
666
void cso_set_framebuffer(struct cso_context *ctx,
667
const struct pipe_framebuffer_state *fb)
668
{
669
if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
670
util_copy_framebuffer_state(&ctx->fb, fb);
671
ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
672
}
673
}
674
675
static void
676
cso_save_framebuffer(struct cso_context *ctx)
677
{
678
util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
679
}
680
681
static void
682
cso_restore_framebuffer(struct cso_context *ctx)
683
{
684
if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
685
util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
686
ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
687
util_unreference_framebuffer_state(&ctx->fb_saved);
688
}
689
}
690
691
692
void cso_set_viewport(struct cso_context *ctx,
693
const struct pipe_viewport_state *vp)
694
{
695
if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
696
ctx->vp = *vp;
697
ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
698
}
699
}
700
701
/**
702
* Setup viewport state for given width and height (position is always (0,0)).
703
* Invert the Y axis if 'invert' is true.
704
*/
705
void
706
cso_set_viewport_dims(struct cso_context *ctx,
707
float width, float height, boolean invert)
708
{
709
struct pipe_viewport_state vp;
710
vp.scale[0] = width * 0.5f;
711
vp.scale[1] = height * (invert ? -0.5f : 0.5f);
712
vp.scale[2] = 0.5f;
713
vp.translate[0] = 0.5f * width;
714
vp.translate[1] = 0.5f * height;
715
vp.translate[2] = 0.5f;
716
vp.swizzle_x = PIPE_VIEWPORT_SWIZZLE_POSITIVE_X;
717
vp.swizzle_y = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y;
718
vp.swizzle_z = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z;
719
vp.swizzle_w = PIPE_VIEWPORT_SWIZZLE_POSITIVE_W;
720
cso_set_viewport(ctx, &vp);
721
}
722
723
static void
724
cso_save_viewport(struct cso_context *ctx)
725
{
726
ctx->vp_saved = ctx->vp;
727
}
728
729
730
static void
731
cso_restore_viewport(struct cso_context *ctx)
732
{
733
if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
734
ctx->vp = ctx->vp_saved;
735
ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
736
}
737
}
738
739
void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
740
{
741
if (ctx->sample_mask != sample_mask) {
742
ctx->sample_mask = sample_mask;
743
ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
744
}
745
}
746
747
static void
748
cso_save_sample_mask(struct cso_context *ctx)
749
{
750
ctx->sample_mask_saved = ctx->sample_mask;
751
}
752
753
static void
754
cso_restore_sample_mask(struct cso_context *ctx)
755
{
756
cso_set_sample_mask(ctx, ctx->sample_mask_saved);
757
}
758
759
void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
760
{
761
if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
762
ctx->min_samples = min_samples;
763
ctx->pipe->set_min_samples(ctx->pipe, min_samples);
764
}
765
}
766
767
static void
768
cso_save_min_samples(struct cso_context *ctx)
769
{
770
ctx->min_samples_saved = ctx->min_samples;
771
}
772
773
static void
774
cso_restore_min_samples(struct cso_context *ctx)
775
{
776
cso_set_min_samples(ctx, ctx->min_samples_saved);
777
}
778
779
void cso_set_stencil_ref(struct cso_context *ctx,
780
const struct pipe_stencil_ref sr)
781
{
782
if (memcmp(&ctx->stencil_ref, &sr, sizeof(ctx->stencil_ref))) {
783
ctx->stencil_ref = sr;
784
ctx->pipe->set_stencil_ref(ctx->pipe, sr);
785
}
786
}
787
788
static void
789
cso_save_stencil_ref(struct cso_context *ctx)
790
{
791
ctx->stencil_ref_saved = ctx->stencil_ref;
792
}
793
794
795
static void
796
cso_restore_stencil_ref(struct cso_context *ctx)
797
{
798
if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
799
sizeof(ctx->stencil_ref))) {
800
ctx->stencil_ref = ctx->stencil_ref_saved;
801
ctx->pipe->set_stencil_ref(ctx->pipe, ctx->stencil_ref);
802
}
803
}
804
805
void cso_set_render_condition(struct cso_context *ctx,
806
struct pipe_query *query,
807
boolean condition,
808
enum pipe_render_cond_flag mode)
809
{
810
struct pipe_context *pipe = ctx->pipe;
811
812
if (ctx->render_condition != query ||
813
ctx->render_condition_mode != mode ||
814
ctx->render_condition_cond != condition) {
815
pipe->render_condition(pipe, query, condition, mode);
816
ctx->render_condition = query;
817
ctx->render_condition_cond = condition;
818
ctx->render_condition_mode = mode;
819
}
820
}
821
822
static void
823
cso_save_render_condition(struct cso_context *ctx)
824
{
825
ctx->render_condition_saved = ctx->render_condition;
826
ctx->render_condition_cond_saved = ctx->render_condition_cond;
827
ctx->render_condition_mode_saved = ctx->render_condition_mode;
828
}
829
830
static void
831
cso_restore_render_condition(struct cso_context *ctx)
832
{
833
cso_set_render_condition(ctx, ctx->render_condition_saved,
834
ctx->render_condition_cond_saved,
835
ctx->render_condition_mode_saved);
836
}
837
838
void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
839
{
840
assert(ctx->has_geometry_shader || !handle);
841
842
if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
843
ctx->geometry_shader = handle;
844
ctx->pipe->bind_gs_state(ctx->pipe, handle);
845
}
846
}
847
848
static void
849
cso_save_geometry_shader(struct cso_context *ctx)
850
{
851
if (!ctx->has_geometry_shader) {
852
return;
853
}
854
855
assert(!ctx->geometry_shader_saved);
856
ctx->geometry_shader_saved = ctx->geometry_shader;
857
}
858
859
static void
860
cso_restore_geometry_shader(struct cso_context *ctx)
861
{
862
if (!ctx->has_geometry_shader) {
863
return;
864
}
865
866
if (ctx->geometry_shader_saved != ctx->geometry_shader) {
867
ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
868
ctx->geometry_shader = ctx->geometry_shader_saved;
869
}
870
ctx->geometry_shader_saved = NULL;
871
}
872
873
void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
874
{
875
assert(ctx->has_tessellation || !handle);
876
877
if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
878
ctx->tessctrl_shader = handle;
879
ctx->pipe->bind_tcs_state(ctx->pipe, handle);
880
}
881
}
882
883
static void
884
cso_save_tessctrl_shader(struct cso_context *ctx)
885
{
886
if (!ctx->has_tessellation) {
887
return;
888
}
889
890
assert(!ctx->tessctrl_shader_saved);
891
ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
892
}
893
894
static void
895
cso_restore_tessctrl_shader(struct cso_context *ctx)
896
{
897
if (!ctx->has_tessellation) {
898
return;
899
}
900
901
if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
902
ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
903
ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
904
}
905
ctx->tessctrl_shader_saved = NULL;
906
}
907
908
void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
909
{
910
assert(ctx->has_tessellation || !handle);
911
912
if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
913
ctx->tesseval_shader = handle;
914
ctx->pipe->bind_tes_state(ctx->pipe, handle);
915
}
916
}
917
918
static void
919
cso_save_tesseval_shader(struct cso_context *ctx)
920
{
921
if (!ctx->has_tessellation) {
922
return;
923
}
924
925
assert(!ctx->tesseval_shader_saved);
926
ctx->tesseval_shader_saved = ctx->tesseval_shader;
927
}
928
929
static void
930
cso_restore_tesseval_shader(struct cso_context *ctx)
931
{
932
if (!ctx->has_tessellation) {
933
return;
934
}
935
936
if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
937
ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
938
ctx->tesseval_shader = ctx->tesseval_shader_saved;
939
}
940
ctx->tesseval_shader_saved = NULL;
941
}
942
943
void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
944
{
945
assert(ctx->has_compute_shader || !handle);
946
947
if (ctx->has_compute_shader && ctx->compute_shader != handle) {
948
ctx->compute_shader = handle;
949
ctx->pipe->bind_compute_state(ctx->pipe, handle);
950
}
951
}
952
953
static void
954
cso_save_compute_shader(struct cso_context *ctx)
955
{
956
if (!ctx->has_compute_shader) {
957
return;
958
}
959
960
assert(!ctx->compute_shader_saved);
961
ctx->compute_shader_saved = ctx->compute_shader;
962
}
963
964
static void
965
cso_restore_compute_shader(struct cso_context *ctx)
966
{
967
if (!ctx->has_compute_shader) {
968
return;
969
}
970
971
if (ctx->compute_shader_saved != ctx->compute_shader) {
972
ctx->pipe->bind_compute_state(ctx->pipe, ctx->compute_shader_saved);
973
ctx->compute_shader = ctx->compute_shader_saved;
974
}
975
ctx->compute_shader_saved = NULL;
976
}
977
978
979
static void
980
cso_save_compute_samplers(struct cso_context *ctx)
981
{
982
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
983
struct sampler_info *saved = &ctx->compute_samplers_saved;
984
985
memcpy(saved->cso_samplers, info->cso_samplers,
986
sizeof(info->cso_samplers));
987
memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
988
}
989
990
991
static void
992
cso_restore_compute_samplers(struct cso_context *ctx)
993
{
994
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
995
struct sampler_info *saved = &ctx->compute_samplers_saved;
996
997
memcpy(info->cso_samplers, saved->cso_samplers,
998
sizeof(info->cso_samplers));
999
memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1000
1001
for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1002
if (info->samplers[i]) {
1003
ctx->max_sampler_seen = i;
1004
break;
1005
}
1006
}
1007
1008
cso_single_sampler_done(ctx, PIPE_SHADER_COMPUTE);
1009
}
1010
1011
1012
static void
1013
cso_set_vertex_elements_direct(struct cso_context *ctx,
1014
const struct cso_velems_state *velems)
1015
{
1016
unsigned key_size, hash_key;
1017
struct cso_hash_iter iter;
1018
void *handle;
1019
1020
/* Need to include the count into the stored state data too.
1021
* Otherwise first few count pipe_vertex_elements could be identical
1022
* even if count is different, and there's no guarantee the hash would
1023
* be different in that case neither.
1024
*/
1025
key_size = sizeof(struct pipe_vertex_element) * velems->count +
1026
sizeof(unsigned);
1027
hash_key = cso_construct_key((void*)velems, key_size);
1028
iter = cso_find_state_template(&ctx->cache, hash_key, CSO_VELEMENTS,
1029
(void*)velems, key_size);
1030
1031
if (cso_hash_iter_is_null(iter)) {
1032
struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1033
if (!cso)
1034
return;
1035
1036
memcpy(&cso->state, velems, key_size);
1037
cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe,
1038
velems->count,
1039
&cso->state.velems[0]);
1040
1041
iter = cso_insert_state(&ctx->cache, hash_key, CSO_VELEMENTS, cso);
1042
if (cso_hash_iter_is_null(iter)) {
1043
FREE(cso);
1044
return;
1045
}
1046
1047
handle = cso->data;
1048
}
1049
else {
1050
handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1051
}
1052
1053
if (ctx->velements != handle) {
1054
ctx->velements = handle;
1055
ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1056
}
1057
}
1058
1059
enum pipe_error
1060
cso_set_vertex_elements(struct cso_context *ctx,
1061
const struct cso_velems_state *velems)
1062
{
1063
struct u_vbuf *vbuf = ctx->vbuf_current;
1064
1065
if (vbuf) {
1066
u_vbuf_set_vertex_elements(vbuf, velems);
1067
return PIPE_OK;
1068
}
1069
1070
cso_set_vertex_elements_direct(ctx, velems);
1071
return PIPE_OK;
1072
}
1073
1074
static void
1075
cso_save_vertex_elements(struct cso_context *ctx)
1076
{
1077
struct u_vbuf *vbuf = ctx->vbuf_current;
1078
1079
if (vbuf) {
1080
u_vbuf_save_vertex_elements(vbuf);
1081
return;
1082
}
1083
1084
assert(!ctx->velements_saved);
1085
ctx->velements_saved = ctx->velements;
1086
}
1087
1088
static void
1089
cso_restore_vertex_elements(struct cso_context *ctx)
1090
{
1091
struct u_vbuf *vbuf = ctx->vbuf_current;
1092
1093
if (vbuf) {
1094
u_vbuf_restore_vertex_elements(vbuf);
1095
return;
1096
}
1097
1098
if (ctx->velements != ctx->velements_saved) {
1099
ctx->velements = ctx->velements_saved;
1100
ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1101
}
1102
ctx->velements_saved = NULL;
1103
}
1104
1105
/* vertex buffers */
1106
1107
void cso_set_vertex_buffers(struct cso_context *ctx,
1108
unsigned start_slot, unsigned count,
1109
const struct pipe_vertex_buffer *buffers)
1110
{
1111
struct u_vbuf *vbuf = ctx->vbuf_current;
1112
1113
if (!count)
1114
return;
1115
1116
if (vbuf) {
1117
u_vbuf_set_vertex_buffers(vbuf, start_slot, count, 0, false, buffers);
1118
return;
1119
}
1120
1121
struct pipe_context *pipe = ctx->pipe;
1122
pipe->set_vertex_buffers(pipe, start_slot, count, 0, false, buffers);
1123
}
1124
1125
/**
1126
* Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1127
* for user vertex buffers and user vertex buffers are not set by this call.
1128
* u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1129
*
1130
* Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1131
* such as VBOs, glBegin/End, and display lists.
1132
*
1133
* Internal operations that do "save states, draw, restore states" shouldn't
1134
* use this, because the states are only saved in either cso_context or
1135
* u_vbuf, not both.
1136
*/
1137
void
1138
cso_set_vertex_buffers_and_elements(struct cso_context *ctx,
1139
const struct cso_velems_state *velems,
1140
unsigned vb_count,
1141
unsigned unbind_trailing_vb_count,
1142
bool take_ownership,
1143
bool uses_user_vertex_buffers,
1144
const struct pipe_vertex_buffer *vbuffers)
1145
{
1146
struct u_vbuf *vbuf = ctx->vbuf;
1147
struct pipe_context *pipe = ctx->pipe;
1148
1149
if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1150
if (!ctx->vbuf_current) {
1151
/* Unbind all buffers in cso_context, because we'll use u_vbuf. */
1152
unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1153
if (unbind_vb_count)
1154
pipe->set_vertex_buffers(pipe, 0, 0, unbind_vb_count, false, NULL);
1155
1156
/* Unset this to make sure the CSO is re-bound on the next use. */
1157
ctx->velements = NULL;
1158
ctx->vbuf_current = vbuf;
1159
unbind_trailing_vb_count = 0;
1160
}
1161
1162
if (vb_count || unbind_trailing_vb_count) {
1163
u_vbuf_set_vertex_buffers(vbuf, 0, vb_count,
1164
unbind_trailing_vb_count,
1165
take_ownership, vbuffers);
1166
}
1167
u_vbuf_set_vertex_elements(vbuf, velems);
1168
return;
1169
}
1170
1171
if (ctx->vbuf_current) {
1172
/* Unbind all buffers in u_vbuf, because we'll use cso_context. */
1173
unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1174
if (unbind_vb_count)
1175
u_vbuf_set_vertex_buffers(vbuf, 0, 0, unbind_vb_count, false, NULL);
1176
1177
/* Unset this to make sure the CSO is re-bound on the next use. */
1178
u_vbuf_unset_vertex_elements(vbuf);
1179
ctx->vbuf_current = NULL;
1180
unbind_trailing_vb_count = 0;
1181
}
1182
1183
if (vb_count || unbind_trailing_vb_count) {
1184
pipe->set_vertex_buffers(pipe, 0, vb_count, unbind_trailing_vb_count,
1185
take_ownership, vbuffers);
1186
}
1187
cso_set_vertex_elements_direct(ctx, velems);
1188
}
1189
1190
static bool
1191
cso_set_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1192
unsigned idx, const struct pipe_sampler_state *templ)
1193
{
1194
unsigned key_size = sizeof(struct pipe_sampler_state);
1195
unsigned hash_key = cso_construct_key((void*)templ, key_size);
1196
struct cso_sampler *cso;
1197
struct cso_hash_iter iter =
1198
cso_find_state_template(&ctx->cache,
1199
hash_key, CSO_SAMPLER,
1200
(void *) templ, key_size);
1201
1202
if (cso_hash_iter_is_null(iter)) {
1203
cso = MALLOC(sizeof(struct cso_sampler));
1204
if (!cso)
1205
return false;
1206
1207
memcpy(&cso->state, templ, sizeof(*templ));
1208
cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1209
cso->hash_key = hash_key;
1210
1211
iter = cso_insert_state(&ctx->cache, hash_key, CSO_SAMPLER, cso);
1212
if (cso_hash_iter_is_null(iter)) {
1213
FREE(cso);
1214
return false;
1215
}
1216
} else {
1217
cso = cso_hash_iter_data(iter);
1218
}
1219
1220
ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1221
ctx->samplers[shader_stage].samplers[idx] = cso->data;
1222
return true;
1223
}
1224
1225
void
1226
cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1227
unsigned idx, const struct pipe_sampler_state *templ)
1228
{
1229
if (cso_set_sampler(ctx, shader_stage, idx, templ))
1230
ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1231
}
1232
1233
/**
1234
* Send staged sampler state to the driver.
1235
*/
1236
void
1237
cso_single_sampler_done(struct cso_context *ctx,
1238
enum pipe_shader_type shader_stage)
1239
{
1240
struct sampler_info *info = &ctx->samplers[shader_stage];
1241
1242
if (ctx->max_sampler_seen == -1)
1243
return;
1244
1245
ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1246
ctx->max_sampler_seen + 1,
1247
info->samplers);
1248
ctx->max_sampler_seen = -1;
1249
}
1250
1251
1252
/*
1253
* If the function encouters any errors it will return the
1254
* last one. Done to always try to set as many samplers
1255
* as possible.
1256
*/
1257
void
1258
cso_set_samplers(struct cso_context *ctx,
1259
enum pipe_shader_type shader_stage,
1260
unsigned nr,
1261
const struct pipe_sampler_state **templates)
1262
{
1263
int last = -1;
1264
1265
for (unsigned i = 0; i < nr; i++) {
1266
if (!templates[i])
1267
continue;
1268
1269
/* Reuse the same sampler state CSO if 2 consecutive sampler states
1270
* are identical.
1271
*
1272
* The trivial case where both pointers are equal doesn't occur in
1273
* frequented codepaths.
1274
*
1275
* Reuse rate:
1276
* - Borderlands 2: 55%
1277
* - Hitman: 65%
1278
* - Rocket League: 75%
1279
* - Tomb Raider: 50-65%
1280
* - XCOM 2: 55%
1281
*/
1282
if (last >= 0 &&
1283
!memcmp(templates[i], templates[last],
1284
sizeof(struct pipe_sampler_state))) {
1285
ctx->samplers[shader_stage].cso_samplers[i] =
1286
ctx->samplers[shader_stage].cso_samplers[last];
1287
ctx->samplers[shader_stage].samplers[i] =
1288
ctx->samplers[shader_stage].samplers[last];
1289
} else {
1290
/* Look up the sampler state CSO. */
1291
cso_set_sampler(ctx, shader_stage, i, templates[i]);
1292
}
1293
1294
last = i;
1295
}
1296
1297
ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, last);
1298
cso_single_sampler_done(ctx, shader_stage);
1299
}
1300
1301
static void
1302
cso_save_fragment_samplers(struct cso_context *ctx)
1303
{
1304
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1305
struct sampler_info *saved = &ctx->fragment_samplers_saved;
1306
1307
memcpy(saved->cso_samplers, info->cso_samplers,
1308
sizeof(info->cso_samplers));
1309
memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1310
}
1311
1312
1313
static void
1314
cso_restore_fragment_samplers(struct cso_context *ctx)
1315
{
1316
struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1317
struct sampler_info *saved = &ctx->fragment_samplers_saved;
1318
1319
memcpy(info->cso_samplers, saved->cso_samplers,
1320
sizeof(info->cso_samplers));
1321
memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1322
1323
for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1324
if (info->samplers[i]) {
1325
ctx->max_sampler_seen = i;
1326
break;
1327
}
1328
}
1329
1330
cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1331
}
1332
1333
1334
void
1335
cso_set_stream_outputs(struct cso_context *ctx,
1336
unsigned num_targets,
1337
struct pipe_stream_output_target **targets,
1338
const unsigned *offsets)
1339
{
1340
struct pipe_context *pipe = ctx->pipe;
1341
uint i;
1342
1343
if (!ctx->has_streamout) {
1344
assert(num_targets == 0);
1345
return;
1346
}
1347
1348
if (ctx->nr_so_targets == 0 && num_targets == 0) {
1349
/* Nothing to do. */
1350
return;
1351
}
1352
1353
/* reference new targets */
1354
for (i = 0; i < num_targets; i++) {
1355
pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1356
}
1357
/* unref extra old targets, if any */
1358
for (; i < ctx->nr_so_targets; i++) {
1359
pipe_so_target_reference(&ctx->so_targets[i], NULL);
1360
}
1361
1362
pipe->set_stream_output_targets(pipe, num_targets, targets,
1363
offsets);
1364
ctx->nr_so_targets = num_targets;
1365
}
1366
1367
static void
1368
cso_save_stream_outputs(struct cso_context *ctx)
1369
{
1370
uint i;
1371
1372
if (!ctx->has_streamout) {
1373
return;
1374
}
1375
1376
ctx->nr_so_targets_saved = ctx->nr_so_targets;
1377
1378
for (i = 0; i < ctx->nr_so_targets; i++) {
1379
assert(!ctx->so_targets_saved[i]);
1380
pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1381
}
1382
}
1383
1384
static void
1385
cso_restore_stream_outputs(struct cso_context *ctx)
1386
{
1387
struct pipe_context *pipe = ctx->pipe;
1388
uint i;
1389
unsigned offset[PIPE_MAX_SO_BUFFERS];
1390
1391
if (!ctx->has_streamout) {
1392
return;
1393
}
1394
1395
if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1396
/* Nothing to do. */
1397
return;
1398
}
1399
1400
assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1401
for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1402
pipe_so_target_reference(&ctx->so_targets[i], NULL);
1403
/* move the reference from one pointer to another */
1404
ctx->so_targets[i] = ctx->so_targets_saved[i];
1405
ctx->so_targets_saved[i] = NULL;
1406
/* -1 means append */
1407
offset[i] = (unsigned)-1;
1408
}
1409
for (; i < ctx->nr_so_targets; i++) {
1410
pipe_so_target_reference(&ctx->so_targets[i], NULL);
1411
}
1412
1413
pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1414
ctx->so_targets, offset);
1415
1416
ctx->nr_so_targets = ctx->nr_so_targets_saved;
1417
ctx->nr_so_targets_saved = 0;
1418
}
1419
1420
1421
/**
1422
* Save all the CSO state items specified by the state_mask bitmask
1423
* of CSO_BIT_x flags.
1424
*/
1425
void
1426
cso_save_state(struct cso_context *cso, unsigned state_mask)
1427
{
1428
assert(cso->saved_state == 0);
1429
1430
cso->saved_state = state_mask;
1431
1432
if (state_mask & CSO_BIT_BLEND)
1433
cso_save_blend(cso);
1434
if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1435
cso_save_depth_stencil_alpha(cso);
1436
if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1437
cso_save_fragment_samplers(cso);
1438
if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1439
cso_save_fragment_shader(cso);
1440
if (state_mask & CSO_BIT_FRAMEBUFFER)
1441
cso_save_framebuffer(cso);
1442
if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1443
cso_save_geometry_shader(cso);
1444
if (state_mask & CSO_BIT_MIN_SAMPLES)
1445
cso_save_min_samples(cso);
1446
if (state_mask & CSO_BIT_RASTERIZER)
1447
cso_save_rasterizer(cso);
1448
if (state_mask & CSO_BIT_RENDER_CONDITION)
1449
cso_save_render_condition(cso);
1450
if (state_mask & CSO_BIT_SAMPLE_MASK)
1451
cso_save_sample_mask(cso);
1452
if (state_mask & CSO_BIT_STENCIL_REF)
1453
cso_save_stencil_ref(cso);
1454
if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1455
cso_save_stream_outputs(cso);
1456
if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1457
cso_save_tessctrl_shader(cso);
1458
if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1459
cso_save_tesseval_shader(cso);
1460
if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1461
cso_save_vertex_elements(cso);
1462
if (state_mask & CSO_BIT_VERTEX_SHADER)
1463
cso_save_vertex_shader(cso);
1464
if (state_mask & CSO_BIT_VIEWPORT)
1465
cso_save_viewport(cso);
1466
if (state_mask & CSO_BIT_PAUSE_QUERIES)
1467
cso->pipe->set_active_query_state(cso->pipe, false);
1468
}
1469
1470
1471
/**
1472
* Restore the state which was saved by cso_save_state().
1473
*/
1474
void
1475
cso_restore_state(struct cso_context *cso)
1476
{
1477
unsigned state_mask = cso->saved_state;
1478
1479
assert(state_mask);
1480
1481
if (state_mask & CSO_BIT_BLEND)
1482
cso_restore_blend(cso);
1483
if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1484
cso_restore_depth_stencil_alpha(cso);
1485
if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1486
cso_restore_fragment_samplers(cso);
1487
if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1488
cso_restore_fragment_shader(cso);
1489
if (state_mask & CSO_BIT_FRAMEBUFFER)
1490
cso_restore_framebuffer(cso);
1491
if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1492
cso_restore_geometry_shader(cso);
1493
if (state_mask & CSO_BIT_MIN_SAMPLES)
1494
cso_restore_min_samples(cso);
1495
if (state_mask & CSO_BIT_RASTERIZER)
1496
cso_restore_rasterizer(cso);
1497
if (state_mask & CSO_BIT_RENDER_CONDITION)
1498
cso_restore_render_condition(cso);
1499
if (state_mask & CSO_BIT_SAMPLE_MASK)
1500
cso_restore_sample_mask(cso);
1501
if (state_mask & CSO_BIT_STENCIL_REF)
1502
cso_restore_stencil_ref(cso);
1503
if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1504
cso_restore_stream_outputs(cso);
1505
if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1506
cso_restore_tessctrl_shader(cso);
1507
if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1508
cso_restore_tesseval_shader(cso);
1509
if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1510
cso_restore_vertex_elements(cso);
1511
if (state_mask & CSO_BIT_VERTEX_SHADER)
1512
cso_restore_vertex_shader(cso);
1513
if (state_mask & CSO_BIT_VIEWPORT)
1514
cso_restore_viewport(cso);
1515
if (state_mask & CSO_BIT_PAUSE_QUERIES)
1516
cso->pipe->set_active_query_state(cso->pipe, true);
1517
1518
cso->saved_state = 0;
1519
}
1520
1521
/**
1522
* Save all the CSO state items specified by the state_mask bitmask
1523
* of CSO_BIT_COMPUTE_x flags.
1524
*/
1525
void
1526
cso_save_compute_state(struct cso_context *cso, unsigned state_mask)
1527
{
1528
assert(cso->saved_compute_state == 0);
1529
1530
cso->saved_compute_state = state_mask;
1531
1532
if (state_mask & CSO_BIT_COMPUTE_SHADER)
1533
cso_save_compute_shader(cso);
1534
1535
if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1536
cso_save_compute_samplers(cso);
1537
}
1538
1539
1540
/**
1541
* Restore the state which was saved by cso_save_compute_state().
1542
*/
1543
void
1544
cso_restore_compute_state(struct cso_context *cso)
1545
{
1546
unsigned state_mask = cso->saved_compute_state;
1547
1548
assert(state_mask);
1549
1550
if (state_mask & CSO_BIT_COMPUTE_SHADER)
1551
cso_restore_compute_shader(cso);
1552
1553
if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1554
cso_restore_compute_samplers(cso);
1555
1556
cso->saved_compute_state = 0;
1557
}
1558
1559
1560
1561
/* drawing */
1562
1563
void
1564
cso_draw_vbo(struct cso_context *cso,
1565
const struct pipe_draw_info *info,
1566
unsigned drawid_offset,
1567
const struct pipe_draw_indirect_info *indirect,
1568
const struct pipe_draw_start_count_bias draw)
1569
{
1570
struct u_vbuf *vbuf = cso->vbuf_current;
1571
1572
/* We can't have both indirect drawing and SO-vertex-count drawing */
1573
assert(!indirect ||
1574
indirect->buffer == NULL ||
1575
indirect->count_from_stream_output == NULL);
1576
1577
/* We can't have SO-vertex-count drawing with an index buffer */
1578
assert(info->index_size == 0 ||
1579
!indirect ||
1580
indirect->count_from_stream_output == NULL);
1581
1582
if (vbuf) {
1583
u_vbuf_draw_vbo(vbuf, info, drawid_offset, indirect, draw);
1584
} else {
1585
struct pipe_context *pipe = cso->pipe;
1586
pipe->draw_vbo(pipe, info, drawid_offset, indirect, &draw, 1);
1587
}
1588
}
1589
1590
/* info->draw_id can be changed by the callee if increment_draw_id is true. */
1591
void
1592
cso_multi_draw(struct cso_context *cso,
1593
struct pipe_draw_info *info,
1594
unsigned drawid_offset,
1595
const struct pipe_draw_start_count_bias *draws,
1596
unsigned num_draws)
1597
{
1598
struct u_vbuf *vbuf = cso->vbuf_current;
1599
1600
if (vbuf) {
1601
/* Increase refcount to be able to use take_index_buffer_ownership with
1602
* all draws.
1603
*/
1604
if (num_draws > 1 && info->take_index_buffer_ownership)
1605
p_atomic_add(&info->index.resource->reference.count, num_draws - 1);
1606
1607
unsigned drawid = drawid_offset;
1608
for (unsigned i = 0; i < num_draws; i++) {
1609
u_vbuf_draw_vbo(vbuf, info, drawid, NULL, draws[i]);
1610
1611
if (info->increment_draw_id)
1612
drawid++;
1613
}
1614
} else {
1615
struct pipe_context *pipe = cso->pipe;
1616
1617
pipe->draw_vbo(pipe, info, drawid_offset, NULL, draws, num_draws);
1618
}
1619
}
1620
1621
void
1622
cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1623
{
1624
struct pipe_draw_info info;
1625
struct pipe_draw_start_count_bias draw;
1626
1627
util_draw_init_info(&info);
1628
1629
info.mode = mode;
1630
info.index_bounds_valid = true;
1631
info.min_index = start;
1632
info.max_index = start + count - 1;
1633
1634
draw.start = start;
1635
draw.count = count;
1636
draw.index_bias = 0;
1637
1638
cso_draw_vbo(cso, &info, 0, NULL, draw);
1639
}
1640
1641
void
1642
cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1643
uint start, uint count,
1644
uint start_instance, uint instance_count)
1645
{
1646
struct pipe_draw_info info;
1647
struct pipe_draw_start_count_bias draw;
1648
1649
util_draw_init_info(&info);
1650
1651
info.mode = mode;
1652
info.index_bounds_valid = true;
1653
info.min_index = start;
1654
info.max_index = start + count - 1;
1655
info.start_instance = start_instance;
1656
info.instance_count = instance_count;
1657
1658
draw.start = start;
1659
draw.count = count;
1660
draw.index_bias = 0;
1661
1662
cso_draw_vbo(cso, &info, 0, NULL, draw);
1663
}
1664
1665