Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/include/pipe/p_context.h
4566 views
1
/**************************************************************************
2
*
3
* Copyright 2007 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
#ifndef PIPE_CONTEXT_H
29
#define PIPE_CONTEXT_H
30
31
#include "p_compiler.h"
32
#include "p_format.h"
33
#include "p_video_enums.h"
34
#include "p_defines.h"
35
#include <stdio.h>
36
37
#ifdef __cplusplus
38
extern "C" {
39
#endif
40
41
42
struct pipe_blend_color;
43
struct pipe_blend_state;
44
struct pipe_blit_info;
45
struct pipe_box;
46
struct pipe_clip_state;
47
struct pipe_constant_buffer;
48
struct pipe_debug_callback;
49
struct pipe_depth_stencil_alpha_state;
50
struct pipe_device_reset_callback;
51
struct pipe_draw_info;
52
struct pipe_draw_indirect_info;
53
struct pipe_draw_start_count_bias;
54
struct pipe_grid_info;
55
struct pipe_fence_handle;
56
struct pipe_framebuffer_state;
57
struct pipe_image_view;
58
struct pipe_query;
59
struct pipe_poly_stipple;
60
struct pipe_rasterizer_state;
61
struct pipe_resolve_info;
62
struct pipe_resource;
63
struct pipe_sampler_state;
64
struct pipe_sampler_view;
65
struct pipe_scissor_state;
66
struct pipe_shader_buffer;
67
struct pipe_shader_state;
68
struct pipe_stencil_ref;
69
struct pipe_stream_output_target;
70
struct pipe_surface;
71
struct pipe_transfer;
72
struct pipe_vertex_buffer;
73
struct pipe_vertex_element;
74
struct pipe_video_buffer;
75
struct pipe_video_codec;
76
struct pipe_viewport_state;
77
struct pipe_compute_state;
78
union pipe_color_union;
79
union pipe_query_result;
80
struct u_log_context;
81
struct u_upload_mgr;
82
83
/**
84
* Gallium rendering context. Basically:
85
* - state setting functions
86
* - VBO drawing functions
87
* - surface functions
88
*/
89
struct pipe_context {
90
struct pipe_screen *screen;
91
92
void *priv; /**< context private data (for DRI for example) */
93
void *draw; /**< private, for draw module (temporary?) */
94
95
/**
96
* Stream uploaders created by the driver. All drivers, gallium frontends, and
97
* modules should use them.
98
*
99
* Use u_upload_alloc or u_upload_data as many times as you want.
100
* Once you are done, use u_upload_unmap.
101
*/
102
struct u_upload_mgr *stream_uploader; /* everything but shader constants */
103
struct u_upload_mgr *const_uploader; /* shader constants only */
104
105
void (*destroy)( struct pipe_context * );
106
107
/**
108
* VBO drawing
109
*/
110
/*@{*/
111
/**
112
* Multi draw.
113
*
114
* For indirect multi draws, num_draws is 1 and indirect->draw_count
115
* is used instead.
116
*
117
* Caps:
118
* - Always supported: Direct multi draws
119
* - PIPE_CAP_MULTI_DRAW_INDIRECT: Indirect multi draws
120
* - PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: Indirect draw count
121
*
122
* Differences against glMultiDraw and glMultiMode:
123
* - "info->mode" and "draws->index_bias" are always constant due to the lack
124
* of hardware support and CPU performance concerns. Only start and count
125
* vary.
126
* - if "info->increment_draw_id" is false, draw_id doesn't change between
127
* draws
128
*
129
* Direct multi draws are also generated by u_threaded_context, which looks
130
* ahead in gallium command buffers and merges single draws.
131
*
132
* \param pipe context
133
* \param info draw info
134
* \param drawid_offset offset to add for drawid param of each draw
135
* \param indirect indirect multi draws
136
* \param draws array of (start, count) pairs for direct draws
137
* \param num_draws number of direct draws; 1 for indirect multi draws
138
*/
139
void (*draw_vbo)(struct pipe_context *pipe,
140
const struct pipe_draw_info *info,
141
unsigned drawid_offset,
142
const struct pipe_draw_indirect_info *indirect,
143
const struct pipe_draw_start_count_bias *draws,
144
unsigned num_draws);
145
/*@}*/
146
147
/**
148
* Predicate subsequent rendering on occlusion query result
149
* \param query the query predicate, or NULL if no predicate
150
* \param condition whether to skip on FALSE or TRUE query results
151
* \param mode one of PIPE_RENDER_COND_x
152
*/
153
void (*render_condition)( struct pipe_context *pipe,
154
struct pipe_query *query,
155
bool condition,
156
enum pipe_render_cond_flag mode );
157
158
/**
159
* Predicate subsequent rendering on a value in a buffer
160
* \param buffer The buffer to query for the value
161
* \param offset Offset in the buffer to query 32-bit
162
* \param condition whether to skip on FALSE or TRUE query results
163
*/
164
void (*render_condition_mem)( struct pipe_context *pipe,
165
struct pipe_resource *buffer,
166
uint32_t offset,
167
bool condition );
168
/**
169
* Query objects
170
*/
171
/*@{*/
172
struct pipe_query *(*create_query)( struct pipe_context *pipe,
173
unsigned query_type,
174
unsigned index );
175
176
/**
177
* Create a query object that queries all given query types simultaneously.
178
*
179
* This can only be used for those query types for which
180
* get_driver_query_info indicates that it must be used. Only one batch
181
* query object may be active at a time.
182
*
183
* There may be additional constraints on which query types can be used
184
* together, in particular those that are implied by
185
* get_driver_query_group_info.
186
*
187
* \param num_queries the number of query types
188
* \param query_types array of \p num_queries query types
189
* \return a query object, or NULL on error.
190
*/
191
struct pipe_query *(*create_batch_query)( struct pipe_context *pipe,
192
unsigned num_queries,
193
unsigned *query_types );
194
195
void (*destroy_query)(struct pipe_context *pipe,
196
struct pipe_query *q);
197
198
bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
199
bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
200
201
/**
202
* Get results of a query.
203
* \param wait if true, this query will block until the result is ready
204
* \return TRUE if results are ready, FALSE otherwise
205
*/
206
bool (*get_query_result)(struct pipe_context *pipe,
207
struct pipe_query *q,
208
bool wait,
209
union pipe_query_result *result);
210
211
/**
212
* Get results of a query, storing into resource. Note that this may not
213
* be used with batch queries.
214
*
215
* \param wait if true, this query will block until the result is ready
216
* \param result_type the type of the value being stored:
217
* \param index for queries that return multiple pieces of data, which
218
* item of that data to store (e.g. for
219
* PIPE_QUERY_PIPELINE_STATISTICS).
220
* When the index is -1, instead of the value of the query
221
* the driver should instead write a 1 or 0 to the appropriate
222
* location with 1 meaning that the query result is available.
223
*/
224
void (*get_query_result_resource)(struct pipe_context *pipe,
225
struct pipe_query *q,
226
bool wait,
227
enum pipe_query_value_type result_type,
228
int index,
229
struct pipe_resource *resource,
230
unsigned offset);
231
232
/**
233
* Set whether all current non-driver queries except TIME_ELAPSED are
234
* active or paused.
235
*/
236
void (*set_active_query_state)(struct pipe_context *pipe, bool enable);
237
238
/**
239
* INTEL Performance Query
240
*/
241
/*@{*/
242
243
unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe);
244
245
void (*get_intel_perf_query_info)(struct pipe_context *pipe,
246
unsigned query_index,
247
const char **name,
248
uint32_t *data_size,
249
uint32_t *n_counters,
250
uint32_t *n_active);
251
252
void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe,
253
unsigned query_index,
254
unsigned counter_index,
255
const char **name,
256
const char **desc,
257
uint32_t *offset,
258
uint32_t *data_size,
259
uint32_t *type_enum,
260
uint32_t *data_type_enum,
261
uint64_t *raw_max);
262
263
struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe,
264
unsigned query_index);
265
266
bool (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
267
268
void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
269
270
void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
271
272
void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q);
273
274
bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q);
275
276
bool (*get_intel_perf_query_data)(struct pipe_context *pipe,
277
struct pipe_query *q,
278
size_t data_size,
279
uint32_t *data,
280
uint32_t *bytes_written);
281
282
/*@}*/
283
284
/**
285
* State functions (create/bind/destroy state objects)
286
*/
287
/*@{*/
288
void * (*create_blend_state)(struct pipe_context *,
289
const struct pipe_blend_state *);
290
void (*bind_blend_state)(struct pipe_context *, void *);
291
void (*delete_blend_state)(struct pipe_context *, void *);
292
293
void * (*create_sampler_state)(struct pipe_context *,
294
const struct pipe_sampler_state *);
295
void (*bind_sampler_states)(struct pipe_context *,
296
enum pipe_shader_type shader,
297
unsigned start_slot, unsigned num_samplers,
298
void **samplers);
299
void (*delete_sampler_state)(struct pipe_context *, void *);
300
301
void * (*create_rasterizer_state)(struct pipe_context *,
302
const struct pipe_rasterizer_state *);
303
void (*bind_rasterizer_state)(struct pipe_context *, void *);
304
void (*delete_rasterizer_state)(struct pipe_context *, void *);
305
306
void * (*create_depth_stencil_alpha_state)(struct pipe_context *,
307
const struct pipe_depth_stencil_alpha_state *);
308
void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *);
309
void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *);
310
311
void * (*create_fs_state)(struct pipe_context *,
312
const struct pipe_shader_state *);
313
void (*bind_fs_state)(struct pipe_context *, void *);
314
void (*delete_fs_state)(struct pipe_context *, void *);
315
316
void * (*create_vs_state)(struct pipe_context *,
317
const struct pipe_shader_state *);
318
void (*bind_vs_state)(struct pipe_context *, void *);
319
void (*delete_vs_state)(struct pipe_context *, void *);
320
321
void * (*create_gs_state)(struct pipe_context *,
322
const struct pipe_shader_state *);
323
void (*bind_gs_state)(struct pipe_context *, void *);
324
void (*delete_gs_state)(struct pipe_context *, void *);
325
326
void * (*create_tcs_state)(struct pipe_context *,
327
const struct pipe_shader_state *);
328
void (*bind_tcs_state)(struct pipe_context *, void *);
329
void (*delete_tcs_state)(struct pipe_context *, void *);
330
331
void * (*create_tes_state)(struct pipe_context *,
332
const struct pipe_shader_state *);
333
void (*bind_tes_state)(struct pipe_context *, void *);
334
void (*delete_tes_state)(struct pipe_context *, void *);
335
336
void * (*create_vertex_elements_state)(struct pipe_context *,
337
unsigned num_elements,
338
const struct pipe_vertex_element *);
339
void (*bind_vertex_elements_state)(struct pipe_context *, void *);
340
void (*delete_vertex_elements_state)(struct pipe_context *, void *);
341
342
/*@}*/
343
344
/**
345
* Parameter-like state (or properties)
346
*/
347
/*@{*/
348
void (*set_blend_color)( struct pipe_context *,
349
const struct pipe_blend_color * );
350
351
void (*set_stencil_ref)( struct pipe_context *,
352
const struct pipe_stencil_ref ref);
353
354
void (*set_sample_mask)( struct pipe_context *,
355
unsigned sample_mask );
356
357
void (*set_min_samples)( struct pipe_context *,
358
unsigned min_samples );
359
360
void (*set_clip_state)( struct pipe_context *,
361
const struct pipe_clip_state * );
362
363
/**
364
* Set constant buffer
365
*
366
* \param shader Shader stage
367
* \param index Buffer binding slot index within a shader stage
368
* \param take_ownership The callee takes ownership of the buffer reference.
369
* (the callee shouldn't increment the ref count)
370
* \param buf Constant buffer parameters
371
*/
372
void (*set_constant_buffer)( struct pipe_context *,
373
enum pipe_shader_type shader, uint index,
374
bool take_ownership,
375
const struct pipe_constant_buffer *buf );
376
377
/**
378
* Set inlinable constants for constant buffer 0.
379
*
380
* These are constants that the driver would like to inline in the IR
381
* of the current shader and recompile it. Drivers can determine which
382
* constants they prefer to inline in finalize_nir and store that
383
* information in shader_info::*inlinable_uniform*. When the state tracker
384
* or frontend uploads constants to a constant buffer, it can pass
385
* inlinable constants separately via this call.
386
*
387
* Any set_constant_buffer call invalidates this state, so this function
388
* must be called after it. Binding a shader also invalidates this state.
389
*
390
* There is no PIPE_CAP for this. Drivers shouldn't set the shader_info
391
* fields if they don't want this or if they don't implement this.
392
*/
393
void (*set_inlinable_constants)( struct pipe_context *,
394
enum pipe_shader_type shader,
395
uint num_values, uint32_t *values );
396
397
void (*set_framebuffer_state)( struct pipe_context *,
398
const struct pipe_framebuffer_state * );
399
400
/**
401
* Set the sample locations used during rasterization. When NULL or sized
402
* zero, the default locations are used.
403
*
404
* Note that get_sample_position() still returns the default locations.
405
*
406
* The samples are accessed with
407
* locations[(pixel_y*grid_w+pixel_x)*ms+i],
408
* where:
409
* ms = the sample count
410
* grid_w = the pixel grid width for the sample count
411
* grid_w = the pixel grid height for the sample count
412
* pixel_x = the window x coordinate modulo grid_w
413
* pixel_y = the window y coordinate modulo grid_w
414
* i = the sample index
415
* This gives a result with the x coordinate as the low 4 bits and the y
416
* coordinate as the high 4 bits. For each coordinate 0 is the left or top
417
* edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge.
418
*
419
* Out of bounds accesses are return undefined values.
420
*
421
* The pixel grid is used to vary sample locations across pixels and its
422
* size can be queried with get_sample_pixel_grid().
423
*/
424
void (*set_sample_locations)( struct pipe_context *,
425
size_t size, const uint8_t *locations );
426
427
void (*set_polygon_stipple)( struct pipe_context *,
428
const struct pipe_poly_stipple * );
429
430
void (*set_scissor_states)( struct pipe_context *,
431
unsigned start_slot,
432
unsigned num_scissors,
433
const struct pipe_scissor_state * );
434
435
void (*set_window_rectangles)( struct pipe_context *,
436
bool include,
437
unsigned num_rectangles,
438
const struct pipe_scissor_state * );
439
440
void (*set_viewport_states)( struct pipe_context *,
441
unsigned start_slot,
442
unsigned num_viewports,
443
const struct pipe_viewport_state *);
444
445
void (*set_sampler_views)(struct pipe_context *,
446
enum pipe_shader_type shader,
447
unsigned start_slot, unsigned num_views,
448
unsigned unbind_num_trailing_slots,
449
struct pipe_sampler_view **views);
450
451
void (*set_tess_state)(struct pipe_context *,
452
const float default_outer_level[4],
453
const float default_inner_level[2]);
454
455
/**
456
* Sets the debug callback. If the pointer is null, then no callback is
457
* set, otherwise a copy of the data should be made.
458
*/
459
void (*set_debug_callback)(struct pipe_context *,
460
const struct pipe_debug_callback *);
461
462
/**
463
* Bind an array of shader buffers that will be used by a shader.
464
* Any buffers that were previously bound to the specified range
465
* will be unbound.
466
*
467
* \param shader selects shader stage
468
* \param start_slot first buffer slot to bind.
469
* \param count number of consecutive buffers to bind.
470
* \param buffers array of pointers to the buffers to bind, it
471
* should contain at least \a count elements
472
* unless it's NULL, in which case no buffers will
473
* be bound.
474
* \param writable_bitmask If bit i is not set, buffers[i] will only be
475
* used with loads. If unsure, set to ~0.
476
*/
477
void (*set_shader_buffers)(struct pipe_context *,
478
enum pipe_shader_type shader,
479
unsigned start_slot, unsigned count,
480
const struct pipe_shader_buffer *buffers,
481
unsigned writable_bitmask);
482
483
/**
484
* Bind an array of hw atomic buffers for use by all shaders.
485
* And buffers that were previously bound to the specified range
486
* will be unbound.
487
*
488
* \param start_slot first buffer slot to bind.
489
* \param count number of consecutive buffers to bind.
490
* \param buffers array of pointers to the buffers to bind, it
491
* should contain at least \a count elements
492
* unless it's NULL, in which case no buffers will
493
* be bound.
494
*/
495
void (*set_hw_atomic_buffers)(struct pipe_context *,
496
unsigned start_slot, unsigned count,
497
const struct pipe_shader_buffer *buffers);
498
499
/**
500
* Bind an array of images that will be used by a shader.
501
* Any images that were previously bound to the specified range
502
* will be unbound.
503
*
504
* \param shader selects shader stage
505
* \param start_slot first image slot to bind.
506
* \param count number of consecutive images to bind.
507
* \param unbind_num_trailing_slots number of images to unbind after
508
* the bound slot
509
* \param buffers array of the images to bind, it
510
* should contain at least \a count elements
511
* unless it's NULL, in which case no images will
512
* be bound.
513
*/
514
void (*set_shader_images)(struct pipe_context *,
515
enum pipe_shader_type shader,
516
unsigned start_slot, unsigned count,
517
unsigned unbind_num_trailing_slots,
518
const struct pipe_image_view *images);
519
520
/**
521
* Bind an array of vertex buffers to the specified slots.
522
*
523
* \param start_slot first vertex buffer slot
524
* \param count number of consecutive vertex buffers to bind.
525
* \param unbind_num_trailing_slots unbind slots after the bound slots
526
* \param take_ownership the caller holds buffer references and they
527
* should be taken over by the callee. This means
528
* that drivers shouldn't increment reference counts.
529
* \param buffers array of the buffers to bind
530
*/
531
void (*set_vertex_buffers)( struct pipe_context *,
532
unsigned start_slot,
533
unsigned num_buffers,
534
unsigned unbind_num_trailing_slots,
535
bool take_ownership,
536
const struct pipe_vertex_buffer * );
537
538
/*@}*/
539
540
/**
541
* Stream output functions.
542
*/
543
/*@{*/
544
545
struct pipe_stream_output_target *(*create_stream_output_target)(
546
struct pipe_context *,
547
struct pipe_resource *,
548
unsigned buffer_offset,
549
unsigned buffer_size);
550
551
void (*stream_output_target_destroy)(struct pipe_context *,
552
struct pipe_stream_output_target *);
553
554
void (*set_stream_output_targets)(struct pipe_context *,
555
unsigned num_targets,
556
struct pipe_stream_output_target **targets,
557
const unsigned *offsets);
558
559
uint32_t (*stream_output_target_offset)(struct pipe_stream_output_target *target);
560
561
/*@}*/
562
563
564
/**
565
* INTEL_blackhole_render
566
*/
567
/*@{*/
568
569
void (*set_frontend_noop)(struct pipe_context *,
570
bool enable);
571
572
/*@}*/
573
574
575
/**
576
* Resource functions for blit-like functionality
577
*
578
* If a driver supports multisampling, blit must implement color resolve.
579
*/
580
/*@{*/
581
582
/**
583
* Copy a block of pixels from one resource to another.
584
* The resource must be of the same format.
585
* Resources with nr_samples > 1 are not allowed.
586
*/
587
void (*resource_copy_region)(struct pipe_context *pipe,
588
struct pipe_resource *dst,
589
unsigned dst_level,
590
unsigned dstx, unsigned dsty, unsigned dstz,
591
struct pipe_resource *src,
592
unsigned src_level,
593
const struct pipe_box *src_box);
594
595
/* Optimal hardware path for blitting pixels.
596
* Scaling, format conversion, up- and downsampling (resolve) are allowed.
597
*/
598
void (*blit)(struct pipe_context *pipe,
599
const struct pipe_blit_info *info);
600
601
/*@}*/
602
603
/**
604
* Clear the specified set of currently bound buffers to specified values.
605
* The entire buffers are cleared (no scissor, no colormask, etc).
606
*
607
* \param buffers bitfield of PIPE_CLEAR_* values.
608
* \param scissor_state the scissored region to clear
609
* \param color pointer to a union of fiu array for each of r, g, b, a.
610
* \param depth depth clear value in [0,1].
611
* \param stencil stencil clear value
612
*/
613
void (*clear)(struct pipe_context *pipe,
614
unsigned buffers,
615
const struct pipe_scissor_state *scissor_state,
616
const union pipe_color_union *color,
617
double depth,
618
unsigned stencil);
619
620
/**
621
* Clear a color rendertarget surface.
622
* \param color pointer to an union of fiu array for each of r, g, b, a.
623
*/
624
void (*clear_render_target)(struct pipe_context *pipe,
625
struct pipe_surface *dst,
626
const union pipe_color_union *color,
627
unsigned dstx, unsigned dsty,
628
unsigned width, unsigned height,
629
bool render_condition_enabled);
630
631
/**
632
* Clear a depth-stencil surface.
633
* \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values.
634
* \param depth depth clear value in [0,1].
635
* \param stencil stencil clear value
636
*/
637
void (*clear_depth_stencil)(struct pipe_context *pipe,
638
struct pipe_surface *dst,
639
unsigned clear_flags,
640
double depth,
641
unsigned stencil,
642
unsigned dstx, unsigned dsty,
643
unsigned width, unsigned height,
644
bool render_condition_enabled);
645
646
/**
647
* Clear the texture with the specified texel. Not guaranteed to be a
648
* renderable format. Data provided in the resource's format.
649
*/
650
void (*clear_texture)(struct pipe_context *pipe,
651
struct pipe_resource *res,
652
unsigned level,
653
const struct pipe_box *box,
654
const void *data);
655
656
/**
657
* Clear a buffer. Runs a memset over the specified region with the element
658
* value passed in through clear_value of size clear_value_size.
659
*/
660
void (*clear_buffer)(struct pipe_context *pipe,
661
struct pipe_resource *res,
662
unsigned offset,
663
unsigned size,
664
const void *clear_value,
665
int clear_value_size);
666
667
/**
668
* If a depth buffer is rendered with different sample location state than
669
* what is current at the time of reading, the values may differ because
670
* depth buffer compression can depend the sample locations.
671
*
672
* This function is a hint to decompress the current depth buffer to avoid
673
* such problems.
674
*/
675
void (*evaluate_depth_buffer)(struct pipe_context *pipe);
676
677
/**
678
* Flush draw commands.
679
*
680
* This guarantees that the new fence (if any) will finish in finite time,
681
* unless PIPE_FLUSH_DEFERRED is used.
682
*
683
* Subsequent operations on other contexts of the same screen are guaranteed
684
* to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used.
685
*
686
* NOTE: use screen->fence_reference() (or equivalent) to transfer
687
* new fence ref to **fence, to ensure that previous fence is unref'd
688
*
689
* \param fence if not NULL, an old fence to unref and transfer a
690
* new fence reference to
691
* \param flags bitfield of enum pipe_flush_flags values.
692
*/
693
void (*flush)(struct pipe_context *pipe,
694
struct pipe_fence_handle **fence,
695
unsigned flags);
696
697
/**
698
* Create a fence from a fd.
699
*
700
* This is used for importing a foreign/external fence fd.
701
*
702
* \param fence if not NULL, an old fence to unref and transfer a
703
* new fence reference to
704
* \param fd fd representing the fence object
705
* \param type indicates which fence types backs fd
706
*/
707
void (*create_fence_fd)(struct pipe_context *pipe,
708
struct pipe_fence_handle **fence,
709
int fd,
710
enum pipe_fd_type type);
711
712
/**
713
* Insert commands to have GPU wait for fence to be signaled.
714
*/
715
void (*fence_server_sync)(struct pipe_context *pipe,
716
struct pipe_fence_handle *fence);
717
718
/**
719
* Insert commands to have the GPU signal a fence.
720
*/
721
void (*fence_server_signal)(struct pipe_context *pipe,
722
struct pipe_fence_handle *fence);
723
724
/**
725
* Create a view on a texture to be used by a shader stage.
726
*/
727
struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx,
728
struct pipe_resource *texture,
729
const struct pipe_sampler_view *templat);
730
731
/**
732
* Destroy a view on a texture.
733
*
734
* \param ctx the current context
735
* \param view the view to be destroyed
736
*
737
* \note The current context may not be the context in which the view was
738
* created (view->context). However, the caller must guarantee that
739
* the context which created the view is still alive.
740
*/
741
void (*sampler_view_destroy)(struct pipe_context *ctx,
742
struct pipe_sampler_view *view);
743
744
745
/**
746
* Get a surface which is a "view" into a resource, used by
747
* render target / depth stencil stages.
748
*/
749
struct pipe_surface *(*create_surface)(struct pipe_context *ctx,
750
struct pipe_resource *resource,
751
const struct pipe_surface *templat);
752
753
void (*surface_destroy)(struct pipe_context *ctx,
754
struct pipe_surface *);
755
756
757
/**
758
* Map a resource.
759
*
760
* Transfers are (by default) context-private and allow uploads to be
761
* interleaved with rendering.
762
*
763
* out_transfer will contain the transfer object that must be passed
764
* to all the other transfer functions. It also contains useful
765
* information (like texture strides for texture_map).
766
*/
767
void *(*buffer_map)(struct pipe_context *,
768
struct pipe_resource *resource,
769
unsigned level,
770
unsigned usage, /* a combination of PIPE_MAP_x */
771
const struct pipe_box *,
772
struct pipe_transfer **out_transfer);
773
774
/* If transfer was created with WRITE|FLUSH_EXPLICIT, only the
775
* regions specified with this call are guaranteed to be written to
776
* the resource.
777
*/
778
void (*transfer_flush_region)( struct pipe_context *,
779
struct pipe_transfer *transfer,
780
const struct pipe_box *);
781
782
void (*buffer_unmap)(struct pipe_context *,
783
struct pipe_transfer *transfer);
784
785
void *(*texture_map)(struct pipe_context *,
786
struct pipe_resource *resource,
787
unsigned level,
788
unsigned usage, /* a combination of PIPE_MAP_x */
789
const struct pipe_box *,
790
struct pipe_transfer **out_transfer);
791
792
void (*texture_unmap)(struct pipe_context *,
793
struct pipe_transfer *transfer);
794
795
/* One-shot transfer operation with data supplied in a user
796
* pointer.
797
*/
798
void (*buffer_subdata)(struct pipe_context *,
799
struct pipe_resource *,
800
unsigned usage, /* a combination of PIPE_MAP_x */
801
unsigned offset,
802
unsigned size,
803
const void *data);
804
805
void (*texture_subdata)(struct pipe_context *,
806
struct pipe_resource *,
807
unsigned level,
808
unsigned usage, /* a combination of PIPE_MAP_x */
809
const struct pipe_box *,
810
const void *data,
811
unsigned stride,
812
unsigned layer_stride);
813
814
/**
815
* Flush any pending framebuffer writes and invalidate texture caches.
816
*/
817
void (*texture_barrier)(struct pipe_context *, unsigned flags);
818
819
/**
820
* Flush caches according to flags.
821
*/
822
void (*memory_barrier)(struct pipe_context *, unsigned flags);
823
824
/**
825
* Change the commitment status of a part of the given resource, which must
826
* have been created with the PIPE_RESOURCE_FLAG_SPARSE bit.
827
*
828
* \param level The texture level whose commitment should be changed.
829
* \param box The region of the resource whose commitment should be changed.
830
* \param commit Whether memory should be committed or un-committed.
831
*
832
* \return false if out of memory, true on success.
833
*/
834
bool (*resource_commit)(struct pipe_context *, struct pipe_resource *,
835
unsigned level, struct pipe_box *box, bool commit);
836
837
/**
838
* Creates a video codec for a specific video format/profile
839
*/
840
struct pipe_video_codec *(*create_video_codec)( struct pipe_context *context,
841
const struct pipe_video_codec *templat );
842
843
/**
844
* Creates a video buffer as decoding target
845
*/
846
struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context,
847
const struct pipe_video_buffer *templat );
848
849
/**
850
* Compute kernel execution
851
*/
852
/*@{*/
853
/**
854
* Define the compute program and parameters to be used by
855
* pipe_context::launch_grid.
856
*/
857
void *(*create_compute_state)(struct pipe_context *context,
858
const struct pipe_compute_state *);
859
void (*bind_compute_state)(struct pipe_context *, void *);
860
void (*delete_compute_state)(struct pipe_context *, void *);
861
862
/**
863
* Bind an array of shader resources that will be used by the
864
* compute program. Any resources that were previously bound to
865
* the specified range will be unbound after this call.
866
*
867
* \param start first resource to bind.
868
* \param count number of consecutive resources to bind.
869
* \param resources array of pointers to the resources to bind, it
870
* should contain at least \a count elements
871
* unless it's NULL, in which case no new
872
* resources will be bound.
873
*/
874
void (*set_compute_resources)(struct pipe_context *,
875
unsigned start, unsigned count,
876
struct pipe_surface **resources);
877
878
/**
879
* Bind an array of buffers to be mapped into the address space of
880
* the GLOBAL resource. Any buffers that were previously bound
881
* between [first, first + count - 1] are unbound after this call.
882
*
883
* \param first first buffer to map.
884
* \param count number of consecutive buffers to map.
885
* \param resources array of pointers to the buffers to map, it
886
* should contain at least \a count elements
887
* unless it's NULL, in which case no new
888
* resources will be bound.
889
* \param handles array of pointers to the memory locations that
890
* will be updated with the address each buffer
891
* will be mapped to. The base memory address of
892
* each of the buffers will be added to the value
893
* pointed to by its corresponding handle to form
894
* the final address argument. It should contain
895
* at least \a count elements, unless \a
896
* resources is NULL in which case \a handles
897
* should be NULL as well.
898
*
899
* Note that the driver isn't required to make any guarantees about
900
* the contents of the \a handles array being valid anytime except
901
* during the subsequent calls to pipe_context::launch_grid. This
902
* means that the only sensible location handles[i] may point to is
903
* somewhere within the INPUT buffer itself. This is so to
904
* accommodate implementations that lack virtual memory but
905
* nevertheless migrate buffers on the fly, leading to resource
906
* base addresses that change on each kernel invocation or are
907
* unknown to the pipe driver.
908
*/
909
void (*set_global_binding)(struct pipe_context *context,
910
unsigned first, unsigned count,
911
struct pipe_resource **resources,
912
uint32_t **handles);
913
914
/**
915
* Launch the compute kernel starting from instruction \a pc of the
916
* currently bound compute program.
917
*/
918
void (*launch_grid)(struct pipe_context *context,
919
const struct pipe_grid_info *info);
920
/*@}*/
921
922
/**
923
* SVM (Share Virtual Memory) helpers
924
*/
925
/*@{*/
926
/**
927
* Migrate range of virtual address to device or host memory.
928
*
929
* \param to_device - true if the virtual memory is migrated to the device
930
* false if the virtual memory is migrated to the host
931
* \param migrate_content - whether the content should be migrated as well
932
*/
933
void (*svm_migrate)(struct pipe_context *context, unsigned num_ptrs,
934
const void* const* ptrs, const size_t *sizes,
935
bool to_device, bool migrate_content);
936
/*@}*/
937
938
/**
939
* Get the default sample position for an individual sample point.
940
*
941
* \param sample_count - total number of samples
942
* \param sample_index - sample to get the position values for
943
* \param out_value - return value of 2 floats for x and y position for
944
* requested sample.
945
*/
946
void (*get_sample_position)(struct pipe_context *context,
947
unsigned sample_count,
948
unsigned sample_index,
949
float *out_value);
950
951
/**
952
* Query a timestamp in nanoseconds. This is completely equivalent to
953
* pipe_screen::get_timestamp() but takes a context handle for drivers
954
* that require a context.
955
*/
956
uint64_t (*get_timestamp)(struct pipe_context *);
957
958
/**
959
* Flush the resource cache, so that the resource can be used
960
* by an external client. Possible usage:
961
* - flushing a resource before presenting it on the screen
962
* - flushing a resource if some other process or device wants to use it
963
* This shouldn't be used to flush caches if the resource is only managed
964
* by a single pipe_screen and is not shared with another process.
965
* (i.e. you shouldn't use it to flush caches explicitly if you want to e.g.
966
* use the resource for texturing)
967
*/
968
void (*flush_resource)(struct pipe_context *ctx,
969
struct pipe_resource *resource);
970
971
/**
972
* Invalidate the contents of the resource. This is used to
973
*
974
* (1) implement EGL's semantic of undefined depth/stencil
975
* contents after a swapbuffers. This allows a tiled renderer (for
976
* example) to not store the depth buffer.
977
*
978
* (2) implement GL's InvalidateBufferData. For backwards compatibility,
979
* you must only rely on the usability for this purpose when
980
* PIPE_CAP_INVALIDATE_BUFFER is enabled.
981
*/
982
void (*invalidate_resource)(struct pipe_context *ctx,
983
struct pipe_resource *resource);
984
985
/**
986
* Return information about unexpected device resets.
987
*/
988
enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx);
989
990
/**
991
* Sets the reset status callback. If the pointer is null, then no callback
992
* is set, otherwise a copy of the data should be made.
993
*/
994
void (*set_device_reset_callback)(struct pipe_context *ctx,
995
const struct pipe_device_reset_callback *cb);
996
997
/**
998
* Dump driver-specific debug information into a stream. This is
999
* used by debugging tools.
1000
*
1001
* \param ctx pipe context
1002
* \param stream where the output should be written to
1003
* \param flags a mask of PIPE_DUMP_* flags
1004
*/
1005
void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream,
1006
unsigned flags);
1007
1008
/**
1009
* Set the log context to which the driver should write internal debug logs
1010
* (internal states, command streams).
1011
*
1012
* The caller must ensure that the log context is destroyed and reset to
1013
* NULL before the pipe context is destroyed, and that log context functions
1014
* are only called from the driver thread.
1015
*
1016
* \param ctx pipe context
1017
* \param log logging context
1018
*/
1019
void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log);
1020
1021
/**
1022
* Emit string marker in cmdstream
1023
*/
1024
void (*emit_string_marker)(struct pipe_context *ctx,
1025
const char *string,
1026
int len);
1027
1028
/**
1029
* Generate mipmap.
1030
* \return TRUE if mipmap generation succeeds, FALSE otherwise
1031
*/
1032
bool (*generate_mipmap)(struct pipe_context *ctx,
1033
struct pipe_resource *resource,
1034
enum pipe_format format,
1035
unsigned base_level,
1036
unsigned last_level,
1037
unsigned first_layer,
1038
unsigned last_layer);
1039
1040
/**
1041
* Create a 64-bit texture handle.
1042
*
1043
* \param ctx pipe context
1044
* \param view pipe sampler view object
1045
* \param state pipe sampler state template
1046
* \return a 64-bit texture handle if success, 0 otherwise
1047
*/
1048
uint64_t (*create_texture_handle)(struct pipe_context *ctx,
1049
struct pipe_sampler_view *view,
1050
const struct pipe_sampler_state *state);
1051
1052
/**
1053
* Delete a texture handle.
1054
*
1055
* \param ctx pipe context
1056
* \param handle 64-bit texture handle
1057
*/
1058
void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle);
1059
1060
/**
1061
* Make a texture handle resident.
1062
*
1063
* \param ctx pipe context
1064
* \param handle 64-bit texture handle
1065
* \param resident TRUE for resident, FALSE otherwise
1066
*/
1067
void (*make_texture_handle_resident)(struct pipe_context *ctx,
1068
uint64_t handle, bool resident);
1069
1070
/**
1071
* Create a 64-bit image handle.
1072
*
1073
* \param ctx pipe context
1074
* \param image pipe image view template
1075
* \return a 64-bit image handle if success, 0 otherwise
1076
*/
1077
uint64_t (*create_image_handle)(struct pipe_context *ctx,
1078
const struct pipe_image_view *image);
1079
1080
/**
1081
* Delete an image handle.
1082
*
1083
* \param ctx pipe context
1084
* \param handle 64-bit image handle
1085
*/
1086
void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle);
1087
1088
/**
1089
* Make an image handle resident.
1090
*
1091
* \param ctx pipe context
1092
* \param handle 64-bit image handle
1093
* \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE
1094
* \param resident TRUE for resident, FALSE otherwise
1095
*/
1096
void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle,
1097
unsigned access, bool resident);
1098
1099
/**
1100
* Call the given function from the driver thread.
1101
*
1102
* This is set by threaded contexts for use by debugging wrappers.
1103
*
1104
* \param asap if true, run the callback immediately if there are no pending
1105
* commands to be processed by the driver thread
1106
*/
1107
void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data,
1108
bool asap);
1109
1110
/**
1111
* Set a context parameter See enum pipe_context_param for more details.
1112
*/
1113
void (*set_context_param)(struct pipe_context *ctx,
1114
enum pipe_context_param param,
1115
unsigned value);
1116
1117
/**
1118
* Creates a video buffer as decoding target, with modifiers.
1119
*/
1120
struct pipe_video_buffer *(*create_video_buffer_with_modifiers)(struct pipe_context *context,
1121
const struct pipe_video_buffer *templat,
1122
const uint64_t *modifiers,
1123
unsigned int modifiers_count);
1124
};
1125
1126
1127
#ifdef __cplusplus
1128
}
1129
#endif
1130
1131
#endif /* PIPE_CONTEXT_H */
1132
1133