Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/auxiliary/util/u_inlines.h
4561 views
1
/**************************************************************************
2
*
3
* Copyright 2007 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
#ifndef U_INLINES_H
29
#define U_INLINES_H
30
31
#include "pipe/p_context.h"
32
#include "pipe/p_defines.h"
33
#include "pipe/p_shader_tokens.h"
34
#include "pipe/p_state.h"
35
#include "pipe/p_screen.h"
36
#include "util/compiler.h"
37
#include "util/format/u_format.h"
38
#include "util/u_debug.h"
39
#include "util/u_debug_describe.h"
40
#include "util/u_debug_refcnt.h"
41
#include "util/u_atomic.h"
42
#include "util/u_box.h"
43
#include "util/u_math.h"
44
45
46
#ifdef __cplusplus
47
extern "C" {
48
#endif
49
50
51
/*
52
* Reference counting helper functions.
53
*/
54
55
56
static inline void
57
pipe_reference_init(struct pipe_reference *dst, unsigned count)
58
{
59
dst->count = count;
60
}
61
62
static inline boolean
63
pipe_is_referenced(struct pipe_reference *src)
64
{
65
return p_atomic_read(&src->count) != 0;
66
}
67
68
/**
69
* Update reference counting.
70
* The old thing pointed to, if any, will be unreferenced.
71
* Both 'dst' and 'src' may be NULL.
72
* \return TRUE if the object's refcount hits zero and should be destroyed.
73
*/
74
static inline boolean
75
pipe_reference_described(struct pipe_reference *dst,
76
struct pipe_reference *src,
77
debug_reference_descriptor get_desc)
78
{
79
if (dst != src) {
80
/* bump the src.count first */
81
if (src) {
82
ASSERTED int count = p_atomic_inc_return(&src->count);
83
assert(count != 1); /* src had to be referenced */
84
debug_reference(src, get_desc, 1);
85
}
86
87
if (dst) {
88
int count = p_atomic_dec_return(&dst->count);
89
assert(count != -1); /* dst had to be referenced */
90
debug_reference(dst, get_desc, -1);
91
if (!count)
92
return true;
93
}
94
}
95
96
return false;
97
}
98
99
static inline boolean
100
pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
101
{
102
return pipe_reference_described(dst, src,
103
(debug_reference_descriptor)
104
debug_describe_reference);
105
}
106
107
static inline void
108
pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
109
{
110
struct pipe_surface *old_dst = *dst;
111
112
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
113
src ? &src->reference : NULL,
114
(debug_reference_descriptor)
115
debug_describe_surface))
116
old_dst->context->surface_destroy(old_dst->context, old_dst);
117
*dst = src;
118
}
119
120
/**
121
* Similar to pipe_surface_reference() but always set the pointer to NULL
122
* and pass in an explicit context. The explicit context avoids the problem
123
* of using a deleted context's surface_destroy() method when freeing a surface
124
* that's shared by multiple contexts.
125
*/
126
static inline void
127
pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
128
{
129
struct pipe_surface *old = *ptr;
130
131
if (pipe_reference_described(&old->reference, NULL,
132
(debug_reference_descriptor)
133
debug_describe_surface))
134
pipe->surface_destroy(pipe, old);
135
*ptr = NULL;
136
}
137
138
static inline void
139
pipe_resource_destroy(struct pipe_resource *res)
140
{
141
/* Avoid recursion, which would prevent inlining this function */
142
do {
143
struct pipe_resource *next = res->next;
144
145
res->screen->resource_destroy(res->screen, res);
146
res = next;
147
} while (pipe_reference_described(res ? &res->reference : NULL,
148
NULL,
149
(debug_reference_descriptor)
150
debug_describe_resource));
151
}
152
153
static inline void
154
pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
155
{
156
struct pipe_resource *old_dst = *dst;
157
158
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
159
src ? &src->reference : NULL,
160
(debug_reference_descriptor)
161
debug_describe_resource)) {
162
pipe_resource_destroy(old_dst);
163
}
164
*dst = src;
165
}
166
167
/**
168
* Subtract the given number of references.
169
*/
170
static inline void
171
pipe_drop_resource_references(struct pipe_resource *dst, int num_refs)
172
{
173
int count = p_atomic_add_return(&dst->reference.count, -num_refs);
174
175
assert(count >= 0);
176
/* Underflows shouldn't happen, but let's be safe. */
177
if (count <= 0)
178
pipe_resource_destroy(dst);
179
}
180
181
/**
182
* Same as pipe_surface_release, but used when pipe_context doesn't exist
183
* anymore.
184
*/
185
static inline void
186
pipe_surface_release_no_context(struct pipe_surface **ptr)
187
{
188
struct pipe_surface *surf = *ptr;
189
190
if (pipe_reference_described(&surf->reference, NULL,
191
(debug_reference_descriptor)
192
debug_describe_surface)) {
193
/* trivially destroy pipe_surface */
194
pipe_resource_reference(&surf->texture, NULL);
195
free(surf);
196
}
197
*ptr = NULL;
198
}
199
200
/**
201
* Set *dst to \p src with proper reference counting.
202
*
203
* The caller must guarantee that \p src and *dst were created in
204
* the same context (if they exist), and that this must be the current context.
205
*/
206
static inline void
207
pipe_sampler_view_reference(struct pipe_sampler_view **dst,
208
struct pipe_sampler_view *src)
209
{
210
struct pipe_sampler_view *old_dst = *dst;
211
212
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
213
src ? &src->reference : NULL,
214
(debug_reference_descriptor)
215
debug_describe_sampler_view))
216
old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
217
*dst = src;
218
}
219
220
static inline void
221
pipe_so_target_reference(struct pipe_stream_output_target **dst,
222
struct pipe_stream_output_target *src)
223
{
224
struct pipe_stream_output_target *old_dst = *dst;
225
226
if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
227
src ? &src->reference : NULL,
228
(debug_reference_descriptor)debug_describe_so_target))
229
old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
230
*dst = src;
231
}
232
233
static inline void
234
pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
235
{
236
if (dst->is_user_buffer)
237
dst->buffer.user = NULL;
238
else
239
pipe_resource_reference(&dst->buffer.resource, NULL);
240
}
241
242
static inline void
243
pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
244
const struct pipe_vertex_buffer *src)
245
{
246
if (dst->buffer.resource == src->buffer.resource) {
247
/* Just copy the fields, don't touch reference counts. */
248
dst->stride = src->stride;
249
dst->is_user_buffer = src->is_user_buffer;
250
dst->buffer_offset = src->buffer_offset;
251
return;
252
}
253
254
pipe_vertex_buffer_unreference(dst);
255
if (!src->is_user_buffer)
256
pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
257
memcpy(dst, src, sizeof(*src));
258
}
259
260
static inline void
261
pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
262
struct pipe_resource *pt, unsigned level, unsigned layer)
263
{
264
pipe_resource_reference(&ps->texture, pt);
265
ps->format = pt->format;
266
ps->width = u_minify(pt->width0, level);
267
ps->height = u_minify(pt->height0, level);
268
ps->u.tex.level = level;
269
ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
270
ps->context = ctx;
271
}
272
273
static inline void
274
pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
275
struct pipe_resource *pt, unsigned level, unsigned layer)
276
{
277
ps->texture = 0;
278
pipe_reference_init(&ps->reference, 1);
279
pipe_surface_reset(ctx, ps, pt, level, layer);
280
}
281
282
/* Return true if the surfaces are equal. */
283
static inline boolean
284
pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
285
{
286
return s1->texture == s2->texture &&
287
s1->format == s2->format &&
288
(s1->texture->target != PIPE_BUFFER ||
289
(s1->u.buf.first_element == s2->u.buf.first_element &&
290
s1->u.buf.last_element == s2->u.buf.last_element)) &&
291
(s1->texture->target == PIPE_BUFFER ||
292
(s1->u.tex.level == s2->u.tex.level &&
293
s1->u.tex.first_layer == s2->u.tex.first_layer &&
294
s1->u.tex.last_layer == s2->u.tex.last_layer));
295
}
296
297
/*
298
* Convenience wrappers for screen buffer functions.
299
*/
300
301
302
/**
303
* Create a new resource.
304
* \param bind bitmask of PIPE_BIND_x flags
305
* \param usage a PIPE_USAGE_x value
306
*/
307
static inline struct pipe_resource *
308
pipe_buffer_create(struct pipe_screen *screen,
309
unsigned bind,
310
enum pipe_resource_usage usage,
311
unsigned size)
312
{
313
struct pipe_resource buffer;
314
memset(&buffer, 0, sizeof buffer);
315
buffer.target = PIPE_BUFFER;
316
buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
317
buffer.bind = bind;
318
buffer.usage = usage;
319
buffer.flags = 0;
320
buffer.width0 = size;
321
buffer.height0 = 1;
322
buffer.depth0 = 1;
323
buffer.array_size = 1;
324
return screen->resource_create(screen, &buffer);
325
}
326
327
328
static inline struct pipe_resource *
329
pipe_buffer_create_const0(struct pipe_screen *screen,
330
unsigned bind,
331
enum pipe_resource_usage usage,
332
unsigned size)
333
{
334
struct pipe_resource buffer;
335
memset(&buffer, 0, sizeof buffer);
336
buffer.target = PIPE_BUFFER;
337
buffer.format = PIPE_FORMAT_R8_UNORM;
338
buffer.bind = bind;
339
buffer.usage = usage;
340
buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
341
buffer.width0 = size;
342
buffer.height0 = 1;
343
buffer.depth0 = 1;
344
buffer.array_size = 1;
345
return screen->resource_create(screen, &buffer);
346
}
347
348
349
/**
350
* Map a range of a resource.
351
* \param offset start of region, in bytes
352
* \param length size of region, in bytes
353
* \param access bitmask of PIPE_MAP_x flags
354
* \param transfer returns a transfer object
355
*/
356
static inline void *
357
pipe_buffer_map_range(struct pipe_context *pipe,
358
struct pipe_resource *buffer,
359
unsigned offset,
360
unsigned length,
361
unsigned access,
362
struct pipe_transfer **transfer)
363
{
364
struct pipe_box box;
365
void *map;
366
367
assert(offset < buffer->width0);
368
assert(offset + length <= buffer->width0);
369
assert(length);
370
371
u_box_1d(offset, length, &box);
372
373
map = pipe->buffer_map(pipe, buffer, 0, access, &box, transfer);
374
if (!map) {
375
return NULL;
376
}
377
378
return map;
379
}
380
381
382
/**
383
* Map whole resource.
384
* \param access bitmask of PIPE_MAP_x flags
385
* \param transfer returns a transfer object
386
*/
387
static inline void *
388
pipe_buffer_map(struct pipe_context *pipe,
389
struct pipe_resource *buffer,
390
unsigned access,
391
struct pipe_transfer **transfer)
392
{
393
return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
394
access, transfer);
395
}
396
397
398
static inline void
399
pipe_buffer_unmap(struct pipe_context *pipe,
400
struct pipe_transfer *transfer)
401
{
402
pipe->buffer_unmap(pipe, transfer);
403
}
404
405
static inline void
406
pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
407
struct pipe_transfer *transfer,
408
unsigned offset,
409
unsigned length)
410
{
411
struct pipe_box box;
412
int transfer_offset;
413
414
assert(length);
415
assert(transfer->box.x <= (int) offset);
416
assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
417
418
/* Match old screen->buffer_flush_mapped_range() behaviour, where
419
* offset parameter is relative to the start of the buffer, not the
420
* mapped range.
421
*/
422
transfer_offset = offset - transfer->box.x;
423
424
u_box_1d(transfer_offset, length, &box);
425
426
pipe->transfer_flush_region(pipe, transfer, &box);
427
}
428
429
static inline void
430
pipe_buffer_write(struct pipe_context *pipe,
431
struct pipe_resource *buf,
432
unsigned offset,
433
unsigned size,
434
const void *data)
435
{
436
/* Don't set any other usage bits. Drivers should derive them. */
437
pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
438
}
439
440
/**
441
* Special case for writing non-overlapping ranges.
442
*
443
* We can avoid GPU/CPU synchronization when writing range that has never
444
* been written before.
445
*/
446
static inline void
447
pipe_buffer_write_nooverlap(struct pipe_context *pipe,
448
struct pipe_resource *buf,
449
unsigned offset, unsigned size,
450
const void *data)
451
{
452
pipe->buffer_subdata(pipe, buf,
453
(PIPE_MAP_WRITE |
454
PIPE_MAP_UNSYNCHRONIZED),
455
offset, size, data);
456
}
457
458
/**
459
* Utility for simplifying pipe_context::resource_copy_region calls
460
*/
461
static inline void
462
pipe_buffer_copy(struct pipe_context *pipe,
463
struct pipe_resource *dst,
464
struct pipe_resource *src,
465
unsigned dst_offset,
466
unsigned src_offset,
467
unsigned size)
468
{
469
struct pipe_box box;
470
/* only these fields are used */
471
box.x = (int)src_offset;
472
box.width = (int)size;
473
pipe->resource_copy_region(pipe, dst, 0, dst_offset, 0, 0, src, 0, &box);
474
}
475
476
/**
477
* Create a new resource and immediately put data into it
478
* \param bind bitmask of PIPE_BIND_x flags
479
* \param usage bitmask of PIPE_USAGE_x flags
480
*/
481
static inline struct pipe_resource *
482
pipe_buffer_create_with_data(struct pipe_context *pipe,
483
unsigned bind,
484
enum pipe_resource_usage usage,
485
unsigned size,
486
const void *ptr)
487
{
488
struct pipe_resource *res = pipe_buffer_create(pipe->screen,
489
bind, usage, size);
490
pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
491
return res;
492
}
493
494
static inline void
495
pipe_buffer_read(struct pipe_context *pipe,
496
struct pipe_resource *buf,
497
unsigned offset,
498
unsigned size,
499
void *data)
500
{
501
struct pipe_transfer *src_transfer;
502
ubyte *map;
503
504
map = (ubyte *) pipe_buffer_map_range(pipe,
505
buf,
506
offset, size,
507
PIPE_MAP_READ,
508
&src_transfer);
509
if (!map)
510
return;
511
512
memcpy(data, map, size);
513
pipe_buffer_unmap(pipe, src_transfer);
514
}
515
516
517
/**
518
* Map a resource for reading/writing.
519
* \param access bitmask of PIPE_MAP_x flags
520
*/
521
static inline void *
522
pipe_texture_map(struct pipe_context *context,
523
struct pipe_resource *resource,
524
unsigned level, unsigned layer,
525
unsigned access,
526
unsigned x, unsigned y,
527
unsigned w, unsigned h,
528
struct pipe_transfer **transfer)
529
{
530
struct pipe_box box;
531
u_box_2d_zslice(x, y, layer, w, h, &box);
532
return context->texture_map(context, resource, level, access,
533
&box, transfer);
534
}
535
536
537
/**
538
* Map a 3D (texture) resource for reading/writing.
539
* \param access bitmask of PIPE_MAP_x flags
540
*/
541
static inline void *
542
pipe_texture_map_3d(struct pipe_context *context,
543
struct pipe_resource *resource,
544
unsigned level,
545
unsigned access,
546
unsigned x, unsigned y, unsigned z,
547
unsigned w, unsigned h, unsigned d,
548
struct pipe_transfer **transfer)
549
{
550
struct pipe_box box;
551
u_box_3d(x, y, z, w, h, d, &box);
552
return context->texture_map(context, resource, level, access,
553
&box, transfer);
554
}
555
556
static inline void
557
pipe_texture_unmap(struct pipe_context *context,
558
struct pipe_transfer *transfer)
559
{
560
context->texture_unmap(context, transfer);
561
}
562
563
static inline void
564
pipe_set_constant_buffer(struct pipe_context *pipe,
565
enum pipe_shader_type shader, uint index,
566
struct pipe_resource *buf)
567
{
568
if (buf) {
569
struct pipe_constant_buffer cb;
570
cb.buffer = buf;
571
cb.buffer_offset = 0;
572
cb.buffer_size = buf->width0;
573
cb.user_buffer = NULL;
574
pipe->set_constant_buffer(pipe, shader, index, false, &cb);
575
} else {
576
pipe->set_constant_buffer(pipe, shader, index, false, NULL);
577
}
578
}
579
580
581
/**
582
* Get the polygon offset enable/disable flag for the given polygon fill mode.
583
* \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
584
*/
585
static inline boolean
586
util_get_offset(const struct pipe_rasterizer_state *templ,
587
unsigned fill_mode)
588
{
589
switch(fill_mode) {
590
case PIPE_POLYGON_MODE_POINT:
591
return templ->offset_point;
592
case PIPE_POLYGON_MODE_LINE:
593
return templ->offset_line;
594
case PIPE_POLYGON_MODE_FILL:
595
return templ->offset_tri;
596
default:
597
assert(0);
598
return FALSE;
599
}
600
}
601
602
static inline float
603
util_get_min_point_size(const struct pipe_rasterizer_state *state)
604
{
605
/* The point size should be clamped to this value at the rasterizer stage.
606
*/
607
return !state->point_quad_rasterization &&
608
!state->point_smooth &&
609
!state->multisample ? 1.0f : 0.0f;
610
}
611
612
static inline void
613
util_query_clear_result(union pipe_query_result *result, unsigned type)
614
{
615
switch (type) {
616
case PIPE_QUERY_OCCLUSION_PREDICATE:
617
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
618
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
619
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
620
case PIPE_QUERY_GPU_FINISHED:
621
result->b = FALSE;
622
break;
623
case PIPE_QUERY_OCCLUSION_COUNTER:
624
case PIPE_QUERY_TIMESTAMP:
625
case PIPE_QUERY_TIME_ELAPSED:
626
case PIPE_QUERY_PRIMITIVES_GENERATED:
627
case PIPE_QUERY_PRIMITIVES_EMITTED:
628
result->u64 = 0;
629
break;
630
case PIPE_QUERY_SO_STATISTICS:
631
memset(&result->so_statistics, 0, sizeof(result->so_statistics));
632
break;
633
case PIPE_QUERY_TIMESTAMP_DISJOINT:
634
memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
635
break;
636
case PIPE_QUERY_PIPELINE_STATISTICS:
637
memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
638
break;
639
default:
640
memset(result, 0, sizeof(*result));
641
}
642
}
643
644
/** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
645
static inline enum tgsi_texture_type
646
util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
647
unsigned nr_samples)
648
{
649
switch (pipe_tex_target) {
650
case PIPE_BUFFER:
651
return TGSI_TEXTURE_BUFFER;
652
653
case PIPE_TEXTURE_1D:
654
assert(nr_samples <= 1);
655
return TGSI_TEXTURE_1D;
656
657
case PIPE_TEXTURE_2D:
658
return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
659
660
case PIPE_TEXTURE_RECT:
661
assert(nr_samples <= 1);
662
return TGSI_TEXTURE_RECT;
663
664
case PIPE_TEXTURE_3D:
665
assert(nr_samples <= 1);
666
return TGSI_TEXTURE_3D;
667
668
case PIPE_TEXTURE_CUBE:
669
assert(nr_samples <= 1);
670
return TGSI_TEXTURE_CUBE;
671
672
case PIPE_TEXTURE_1D_ARRAY:
673
assert(nr_samples <= 1);
674
return TGSI_TEXTURE_1D_ARRAY;
675
676
case PIPE_TEXTURE_2D_ARRAY:
677
return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
678
TGSI_TEXTURE_2D_ARRAY;
679
680
case PIPE_TEXTURE_CUBE_ARRAY:
681
return TGSI_TEXTURE_CUBE_ARRAY;
682
683
default:
684
assert(0 && "unexpected texture target");
685
return TGSI_TEXTURE_UNKNOWN;
686
}
687
}
688
689
690
static inline void
691
util_copy_constant_buffer(struct pipe_constant_buffer *dst,
692
const struct pipe_constant_buffer *src,
693
bool take_ownership)
694
{
695
if (src) {
696
if (take_ownership) {
697
pipe_resource_reference(&dst->buffer, NULL);
698
dst->buffer = src->buffer;
699
} else {
700
pipe_resource_reference(&dst->buffer, src->buffer);
701
}
702
dst->buffer_offset = src->buffer_offset;
703
dst->buffer_size = src->buffer_size;
704
dst->user_buffer = src->user_buffer;
705
}
706
else {
707
pipe_resource_reference(&dst->buffer, NULL);
708
dst->buffer_offset = 0;
709
dst->buffer_size = 0;
710
dst->user_buffer = NULL;
711
}
712
}
713
714
static inline void
715
util_copy_shader_buffer(struct pipe_shader_buffer *dst,
716
const struct pipe_shader_buffer *src)
717
{
718
if (src) {
719
pipe_resource_reference(&dst->buffer, src->buffer);
720
dst->buffer_offset = src->buffer_offset;
721
dst->buffer_size = src->buffer_size;
722
}
723
else {
724
pipe_resource_reference(&dst->buffer, NULL);
725
dst->buffer_offset = 0;
726
dst->buffer_size = 0;
727
}
728
}
729
730
static inline void
731
util_copy_image_view(struct pipe_image_view *dst,
732
const struct pipe_image_view *src)
733
{
734
if (src) {
735
pipe_resource_reference(&dst->resource, src->resource);
736
dst->format = src->format;
737
dst->access = src->access;
738
dst->shader_access = src->shader_access;
739
dst->u = src->u;
740
} else {
741
pipe_resource_reference(&dst->resource, NULL);
742
dst->format = PIPE_FORMAT_NONE;
743
dst->access = 0;
744
dst->shader_access = 0;
745
memset(&dst->u, 0, sizeof(dst->u));
746
}
747
}
748
749
static inline unsigned
750
util_max_layer(const struct pipe_resource *r, unsigned level)
751
{
752
switch (r->target) {
753
case PIPE_TEXTURE_3D:
754
return u_minify(r->depth0, level) - 1;
755
case PIPE_TEXTURE_CUBE:
756
assert(r->array_size == 6);
757
FALLTHROUGH;
758
case PIPE_TEXTURE_1D_ARRAY:
759
case PIPE_TEXTURE_2D_ARRAY:
760
case PIPE_TEXTURE_CUBE_ARRAY:
761
return r->array_size - 1;
762
default:
763
return 0;
764
}
765
}
766
767
static inline unsigned
768
util_num_layers(const struct pipe_resource *r, unsigned level)
769
{
770
return util_max_layer(r, level) + 1;
771
}
772
773
static inline bool
774
util_texrange_covers_whole_level(const struct pipe_resource *tex,
775
unsigned level, unsigned x, unsigned y,
776
unsigned z, unsigned width,
777
unsigned height, unsigned depth)
778
{
779
return x == 0 && y == 0 && z == 0 &&
780
width == u_minify(tex->width0, level) &&
781
height == u_minify(tex->height0, level) &&
782
depth == util_num_layers(tex, level);
783
}
784
785
/**
786
* Returns true if the blit will fully initialize all pixels in the resource.
787
*/
788
static inline bool
789
util_blit_covers_whole_resource(const struct pipe_blit_info *info)
790
{
791
/* No conditional rendering or scissoring. (We assume that the caller would
792
* have dropped any redundant scissoring)
793
*/
794
if (info->scissor_enable || info->window_rectangle_include || info->render_condition_enable || info->alpha_blend)
795
return false;
796
797
const struct pipe_resource *dst = info->dst.resource;
798
/* A single blit can't initialize a miptree. */
799
if (dst->last_level != 0)
800
return false;
801
802
assert(info->dst.level == 0);
803
804
/* Make sure the dst box covers the whole resource. */
805
if (!(util_texrange_covers_whole_level(dst, 0,
806
0, 0, 0,
807
info->dst.box.width, info->dst.box.height, info->dst.box.depth))) {
808
return false;
809
}
810
811
/* Make sure the mask actually updates all the channels present in the dst format. */
812
if (info->mask & PIPE_MASK_RGBA) {
813
if ((info->mask & PIPE_MASK_RGBA) != PIPE_MASK_RGBA)
814
return false;
815
}
816
817
if (info->mask & PIPE_MASK_ZS) {
818
const struct util_format_description *format_desc = util_format_description(info->dst.format);
819
uint32_t dst_has = 0;
820
if (util_format_has_depth(format_desc))
821
dst_has |= PIPE_MASK_Z;
822
if (util_format_has_stencil(format_desc))
823
dst_has |= PIPE_MASK_S;
824
if (dst_has & ~(info->mask & PIPE_MASK_ZS))
825
return false;
826
}
827
828
return true;
829
}
830
831
static inline bool
832
util_logicop_reads_dest(enum pipe_logicop op)
833
{
834
switch (op) {
835
case PIPE_LOGICOP_NOR:
836
case PIPE_LOGICOP_AND_INVERTED:
837
case PIPE_LOGICOP_AND_REVERSE:
838
case PIPE_LOGICOP_INVERT:
839
case PIPE_LOGICOP_XOR:
840
case PIPE_LOGICOP_NAND:
841
case PIPE_LOGICOP_AND:
842
case PIPE_LOGICOP_EQUIV:
843
case PIPE_LOGICOP_NOOP:
844
case PIPE_LOGICOP_OR_INVERTED:
845
case PIPE_LOGICOP_OR_REVERSE:
846
case PIPE_LOGICOP_OR:
847
return true;
848
case PIPE_LOGICOP_CLEAR:
849
case PIPE_LOGICOP_COPY_INVERTED:
850
case PIPE_LOGICOP_COPY:
851
case PIPE_LOGICOP_SET:
852
return false;
853
}
854
unreachable("bad logicop");
855
}
856
857
static inline bool
858
util_writes_stencil(const struct pipe_stencil_state *s)
859
{
860
return s->enabled && s->writemask &&
861
((s->fail_op != PIPE_STENCIL_OP_KEEP) ||
862
(s->zpass_op != PIPE_STENCIL_OP_KEEP) ||
863
(s->zfail_op != PIPE_STENCIL_OP_KEEP));
864
}
865
866
static inline bool
867
util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state *zsa)
868
{
869
if (zsa->depth_enabled && zsa->depth_writemask &&
870
(zsa->depth_func != PIPE_FUNC_NEVER))
871
return true;
872
873
return util_writes_stencil(&zsa->stencil[0]) ||
874
util_writes_stencil(&zsa->stencil[1]);
875
}
876
877
static inline struct pipe_context *
878
pipe_create_multimedia_context(struct pipe_screen *screen)
879
{
880
unsigned flags = 0;
881
882
if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
883
flags |= PIPE_CONTEXT_COMPUTE_ONLY;
884
885
return screen->context_create(screen, NULL, flags);
886
}
887
888
static inline unsigned util_res_sample_count(struct pipe_resource *res)
889
{
890
return res->nr_samples > 0 ? res->nr_samples : 1;
891
}
892
893
#ifdef __cplusplus
894
}
895
#endif
896
897
#endif /* U_INLINES_H */
898
899