Path: blob/21.2-virgl/src/gallium/drivers/freedreno/freedreno_context.h
4570 views
/*1* Copyright (C) 2012 Rob Clark <[email protected]>2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,19* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE20* SOFTWARE.21*22* Authors:23* Rob Clark <[email protected]>24*/2526#ifndef FREEDRENO_CONTEXT_H_27#define FREEDRENO_CONTEXT_H_2829#include "indices/u_primconvert.h"30#include "pipe/p_context.h"31#include "util/libsync.h"32#include "util/list.h"33#include "util/slab.h"34#include "util/u_blitter.h"35#include "util/u_string.h"36#include "util/u_threaded_context.h"37#include "util/u_trace.h"3839#include "freedreno_autotune.h"40#include "freedreno_gmem.h"41#include "freedreno_perfetto.h"42#include "freedreno_screen.h"43#include "freedreno_util.h"4445#ifdef __cplusplus46extern "C" {47#endif4849#define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)5051struct fd_vertex_stateobj;52struct fd_batch;5354struct fd_texture_stateobj {55struct pipe_sampler_view *textures[PIPE_MAX_SAMPLERS];56unsigned num_textures;57unsigned valid_textures;58struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];59unsigned num_samplers;60unsigned valid_samplers;61};6263struct fd_program_stateobj {64void *vs, *hs, *ds, *gs, *fs;65};6667struct fd_constbuf_stateobj {68struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];69uint32_t enabled_mask;70};7172struct fd_shaderbuf_stateobj {73struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];74uint32_t enabled_mask;75uint32_t writable_mask;76};7778struct fd_shaderimg_stateobj {79struct pipe_image_view si[PIPE_MAX_SHADER_IMAGES];80uint32_t enabled_mask;81};8283struct fd_vertexbuf_stateobj {84struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];85unsigned count;86uint32_t enabled_mask;87};8889struct fd_vertex_stateobj {90struct pipe_vertex_element pipe[PIPE_MAX_ATTRIBS];91unsigned num_elements;92};9394struct fd_stream_output_target {95struct pipe_stream_output_target base;96struct pipe_resource *offset_buf;97/* stride of the last stream out recorded to this target, for98* glDrawTransformFeedback(). */99uint32_t stride;100};101102struct fd_streamout_stateobj {103struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];104/* Bitmask of stream that should be reset. */105unsigned reset;106107unsigned num_targets;108/* Track offset from vtxcnt for streamout data. This counter109* is just incremented by # of vertices on each draw until110* reset or new streamout buffer bound.111*112* When we eventually have GS, the CPU won't actually know the113* number of vertices per draw, so I think we'll have to do114* something more clever.115*/116unsigned offsets[PIPE_MAX_SO_BUFFERS];117118/* Pre-a6xx, the maximum number of vertices that could be recorded to this119* set of targets with the current vertex shader. a6xx and newer, hardware120* queries are used.121*/122unsigned max_tf_vtx;123124/* Pre-a6xx, the number of verts written to the buffers since the last125* Begin. Used for overflow checking for SW queries.126*/127unsigned verts_written;128};129130#define MAX_GLOBAL_BUFFERS 16131struct fd_global_bindings_stateobj {132struct pipe_resource *buf[MAX_GLOBAL_BUFFERS];133uint32_t enabled_mask;134};135136/* group together the vertex and vertexbuf state.. for ease of passing137* around, and because various internal operations (gmem<->mem, etc)138* need their own vertex state:139*/140struct fd_vertex_state {141struct fd_vertex_stateobj *vtx;142struct fd_vertexbuf_stateobj vertexbuf;143};144145/* global 3d pipeline dirty state: */146enum fd_dirty_3d_state {147FD_DIRTY_BLEND = BIT(0),148FD_DIRTY_RASTERIZER = BIT(1),149FD_DIRTY_ZSA = BIT(2),150FD_DIRTY_BLEND_COLOR = BIT(3),151FD_DIRTY_STENCIL_REF = BIT(4),152FD_DIRTY_SAMPLE_MASK = BIT(5),153FD_DIRTY_FRAMEBUFFER = BIT(6),154FD_DIRTY_STIPPLE = BIT(7),155FD_DIRTY_VIEWPORT = BIT(8),156FD_DIRTY_VTXSTATE = BIT(9),157FD_DIRTY_VTXBUF = BIT(10),158FD_DIRTY_MIN_SAMPLES = BIT(11),159FD_DIRTY_SCISSOR = BIT(12),160FD_DIRTY_STREAMOUT = BIT(13),161FD_DIRTY_UCP = BIT(14),162FD_DIRTY_PROG = BIT(15),163FD_DIRTY_CONST = BIT(16),164FD_DIRTY_TEX = BIT(17),165FD_DIRTY_IMAGE = BIT(18),166FD_DIRTY_SSBO = BIT(19),167168/* only used by a2xx.. possibly can be removed.. */169FD_DIRTY_TEXSTATE = BIT(20),170171/* fine grained state changes, for cases where state is not orthogonal172* from hw perspective:173*/174FD_DIRTY_RASTERIZER_DISCARD = BIT(24),175FD_DIRTY_BLEND_DUAL = BIT(25),176#define NUM_DIRTY_BITS 26177178/* additional flag for state requires updated resource tracking: */179FD_DIRTY_RESOURCE = BIT(31),180};181182/* per shader-stage dirty state: */183enum fd_dirty_shader_state {184FD_DIRTY_SHADER_PROG = BIT(0),185FD_DIRTY_SHADER_CONST = BIT(1),186FD_DIRTY_SHADER_TEX = BIT(2),187FD_DIRTY_SHADER_SSBO = BIT(3),188FD_DIRTY_SHADER_IMAGE = BIT(4),189#define NUM_DIRTY_SHADER_BITS 5190};191192#define MAX_HW_SAMPLE_PROVIDERS 7193struct fd_hw_sample_provider;194struct fd_hw_sample;195196struct ir3_shader_key;197198struct fd_context {199struct pipe_context base;200201struct threaded_context *tc;202203struct list_head node; /* node in screen->context_list */204205/* We currently need to serialize emitting GMEM batches, because of206* VSC state access in the context.207*208* In practice this lock should not be contended, since pipe_context209* use should be single threaded. But it is needed to protect the210* case, with batch reordering where a ctxB batch triggers flushing211* a ctxA batch212*/213simple_mtx_t gmem_lock;214215struct fd_device *dev;216struct fd_screen *screen;217struct fd_pipe *pipe;218219struct blitter_context *blitter dt;220void *clear_rs_state[2] dt;221struct primconvert_context *primconvert dt;222223/* slab for pipe_transfer allocations: */224struct slab_child_pool transfer_pool dt;225struct slab_child_pool transfer_pool_unsync; /* for threaded_context */226227struct fd_autotune autotune dt;228229/**230* query related state:231*/232/*@{*/233/* slabs for fd_hw_sample and fd_hw_sample_period allocations: */234struct slab_mempool sample_pool dt;235struct slab_mempool sample_period_pool dt;236237/* sample-providers for hw queries: */238const struct fd_hw_sample_provider239*hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS];240241/* list of active queries: */242struct list_head hw_active_queries dt;243244/* sample-providers for accumulating hw queries: */245const struct fd_acc_sample_provider246*acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS];247248/* list of active accumulating queries: */249struct list_head acc_active_queries dt;250/*@}*/251252/* Whether we need to recheck the active_queries list next253* fd_batch_update_queries().254*/255bool update_active_queries dt;256257/* Current state of pctx->set_active_query_state() (i.e. "should drawing258* be counted against non-perfcounter queries")259*/260bool active_queries dt;261262/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to263* DI_PT_x value to use for draw initiator. There are some264* slight differences between generation:265*/266const uint8_t *primtypes;267uint32_t primtype_mask;268269/* shaders used by clear, and gmem->mem blits: */270struct fd_program_stateobj solid_prog; // TODO move to screen?271struct fd_program_stateobj solid_layered_prog;272273/* shaders used by mem->gmem blits: */274struct fd_program_stateobj275blit_prog[MAX_RENDER_TARGETS]; // TODO move to screen?276struct fd_program_stateobj blit_z, blit_zs;277278/* Stats/counters:279*/280struct {281uint64_t prims_emitted;282uint64_t prims_generated;283uint64_t draw_calls;284uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw,285batch_restore;286uint64_t staging_uploads, shadow_uploads;287uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;288} stats dt;289290/* Counter for number of users who need sw counters (so we can291* skip collecting them when not needed)292*/293unsigned stats_users;294295/* Current batch.. the rule here is that you can deref ctx->batch296* in codepaths from pipe_context entrypoints. But not in code-297* paths from fd_batch_flush() (basically, the stuff that gets298* called from GMEM code), since in those code-paths the batch299* you care about is not necessarily the same as ctx->batch.300*/301struct fd_batch *batch dt;302303/* NULL if there has been rendering since last flush. Otherwise304* keeps a reference to the last fence so we can re-use it rather305* than having to flush no-op batch.306*/307struct pipe_fence_handle *last_fence dt;308309/* Fence fd we are told to wait on via ->fence_server_sync() (or -1310* if none). The in-fence is transferred over to the batch on the311* next draw/blit/grid.312*313* The reason for this extra complexity is that apps will typically314* do eglWaitSyncKHR()/etc at the beginning of the frame, before the315* first draw. But mesa/st doesn't flush down framebuffer state316* change until we hit a draw, so at ->fence_server_sync() time, we317* don't yet have the correct batch. If we created a batch at that318* point, it would be the wrong one, and we'd have to flush it pre-319* maturely, causing us to stall early in the frame where we could320* be building up cmdstream.321*/322int in_fence_fd dt;323324/* track last known reset status globally and per-context to325* determine if more resets occurred since then. If global reset326* count increases, it means some other context crashed. If327* per-context reset count increases, it means we crashed the328* gpu.329*/330uint32_t context_reset_count dt;331uint32_t global_reset_count dt;332333/* Context sequence #, used for batch-cache key: */334uint16_t seqno;335336/* Cost per draw, used in conjunction with samples-passed history to337* estimate whether GMEM or bypass is the better option.338*/339uint8_t draw_cost;340341/* Are we in process of shadowing a resource? Used to detect recursion342* in transfer_map, and skip unneeded synchronization.343*/344bool in_shadow : 1 dt;345346/* For catching recursion problems with blit fallback: */347bool in_blit : 1 dt;348349/* points to either scissor or disabled_scissor depending on rast state: */350struct pipe_scissor_state *current_scissor dt;351352struct pipe_scissor_state scissor dt;353354/* we don't have a disable/enable bit for scissor, so instead we keep355* a disabled-scissor state which matches the entire bound framebuffer356* and use that when scissor is not enabled.357*/358struct pipe_scissor_state disabled_scissor dt;359360/* Per vsc pipe bo's (a2xx-a5xx): */361struct fd_bo *vsc_pipe_bo[32] dt;362363/* Maps generic gallium oriented fd_dirty_3d_state bits to generation364* specific bitmask of state "groups".365*/366uint32_t gen_dirty_map[NUM_DIRTY_BITS];367uint32_t gen_dirty_shader_map[PIPE_SHADER_TYPES][NUM_DIRTY_SHADER_BITS];368369/* Bitmask of all possible gen_dirty bits: */370uint32_t gen_all_dirty;371372/* Generation specific bitmask of dirty state groups: */373uint32_t gen_dirty;374375/* which state objects need to be re-emit'd: */376enum fd_dirty_3d_state dirty dt;377378/* per shader-stage dirty status: */379enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES] dt;380381void *compute dt;382struct pipe_blend_state *blend dt;383struct pipe_rasterizer_state *rasterizer dt;384struct pipe_depth_stencil_alpha_state *zsa dt;385386struct fd_texture_stateobj tex[PIPE_SHADER_TYPES] dt;387388struct fd_program_stateobj prog dt;389uint32_t bound_shader_stages dt;390391struct fd_vertex_state vtx dt;392393struct pipe_blend_color blend_color dt;394struct pipe_stencil_ref stencil_ref dt;395unsigned sample_mask dt;396unsigned min_samples dt;397/* local context fb state, for when ctx->batch is null: */398struct pipe_framebuffer_state framebuffer dt;399struct pipe_poly_stipple stipple dt;400struct pipe_viewport_state viewport dt;401struct pipe_scissor_state viewport_scissor dt;402struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES] dt;403struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES] dt;404struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES] dt;405struct fd_streamout_stateobj streamout dt;406struct fd_global_bindings_stateobj global_bindings dt;407struct pipe_clip_state ucp dt;408409struct pipe_query *cond_query dt;410bool cond_cond dt; /* inverted rendering condition */411uint cond_mode dt;412413/* Private memory is a memory space where each fiber gets its own piece of414* memory, in addition to registers. It is backed by a buffer which needs415* to be large enough to hold the contents of every possible wavefront in416* every core of the GPU. Because it allocates space via the internal417* wavefront ID which is shared between all currently executing shaders,418* the same buffer can be reused by all shaders, as long as all shaders419* sharing the same buffer use the exact same configuration. There are two420* inputs to the configuration, the amount of per-fiber space and whether421* to use the newer per-wave or older per-fiber layout. We only ever422* increase the size, and shaders with a smaller size requirement simply423* use the larger existing buffer, so that we only need to keep track of424* one buffer and its size, but we still need to keep track of per-fiber425* and per-wave buffers separately so that we never use the same buffer426* for different layouts. pvtmem[0] is for per-fiber, and pvtmem[1] is for427* per-wave.428*/429struct {430struct fd_bo *bo;431uint32_t per_fiber_size;432} pvtmem[2] dt;433434/* maps per-shader-stage state plus variant key to hw435* program stateobj:436*/437struct ir3_cache *shader_cache;438439struct pipe_debug_callback debug;440441struct u_trace_context trace_context dt;442443#ifdef HAVE_PERFETTO444struct fd_perfetto_state perfetto;445#endif446447/*448* Counter to generate submit-ids449*/450uint32_t submit_count;451452/* Called on rebind_resource() for any per-gen cleanup required: */453void (*rebind_resource)(struct fd_context *ctx, struct fd_resource *rsc) dt;454455/* GMEM/tile handling fxns: */456void (*emit_tile_init)(struct fd_batch *batch) dt;457void (*emit_tile_prep)(struct fd_batch *batch,458const struct fd_tile *tile) dt;459void (*emit_tile_mem2gmem)(struct fd_batch *batch,460const struct fd_tile *tile) dt;461void (*emit_tile_renderprep)(struct fd_batch *batch,462const struct fd_tile *tile) dt;463void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile) dt;464void (*emit_tile_gmem2mem)(struct fd_batch *batch,465const struct fd_tile *tile) dt;466void (*emit_tile_fini)(struct fd_batch *batch) dt; /* optional */467468/* optional, for GMEM bypass: */469void (*emit_sysmem_prep)(struct fd_batch *batch) dt;470void (*emit_sysmem_fini)(struct fd_batch *batch) dt;471472/* draw: */473bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,474unsigned drawid_offset,475const struct pipe_draw_indirect_info *indirect,476const struct pipe_draw_start_count_bias *draw,477unsigned index_offset) dt;478bool (*clear)(struct fd_context *ctx, unsigned buffers,479const union pipe_color_union *color, double depth,480unsigned stencil) dt;481482/* compute: */483void (*launch_grid)(struct fd_context *ctx,484const struct pipe_grid_info *info) dt;485486/* query: */487struct fd_query *(*create_query)(struct fd_context *ctx, unsigned query_type,488unsigned index);489void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles) dt;490void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,491struct fd_ringbuffer *ring) dt;492void (*query_update_batch)(struct fd_batch *batch, bool disable_all) dt;493494/* blitter: */495bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info) dt;496void (*clear_ubwc)(struct fd_batch *batch, struct fd_resource *rsc) dt;497498/* uncompress resource, if necessary, to use as the specified format: */499void (*validate_format)(struct fd_context *ctx, struct fd_resource *rsc,500enum pipe_format format) dt;501502/* handling for barriers: */503void (*framebuffer_barrier)(struct fd_context *ctx) dt;504505/* logger: */506void (*record_timestamp)(struct fd_ringbuffer *ring, struct fd_bo *bo,507unsigned offset);508uint64_t (*ts_to_ns)(uint64_t ts);509510/*511* Common pre-cooked VBO state (used for a3xx and later):512*/513514/* for clear/gmem->mem vertices, and mem->gmem */515struct pipe_resource *solid_vbuf;516517/* for mem->gmem tex coords: */518struct pipe_resource *blit_texcoord_vbuf;519520/* vertex state for solid_vbuf:521* - solid_vbuf / 12 / R32G32B32_FLOAT522*/523struct fd_vertex_state solid_vbuf_state;524525/* vertex state for blit_prog:526* - blit_texcoord_vbuf / 8 / R32G32_FLOAT527* - solid_vbuf / 12 / R32G32B32_FLOAT528*/529struct fd_vertex_state blit_vbuf_state;530531/*532* Info about state of previous draw, for state that comes from533* pipe_draw_info (ie. not part of a CSO). This allows us to534* skip some register emit when the state doesn't change from535* draw-to-draw536*/537struct {538bool dirty; /* last draw state unknown */539bool primitive_restart;540uint32_t index_start;541uint32_t instance_start;542uint32_t restart_index;543uint32_t streamout_mask;544545/* some state changes require a different shader variant. Keep546* track of this so we know when we need to re-emit shader state547* due to variant change. See ir3_fixup_shader_state()548*549* (used for a3xx+, NULL otherwise)550*/551struct ir3_shader_key *key;552553} last dt;554};555556static inline struct fd_context *557fd_context(struct pipe_context *pctx)558{559return (struct fd_context *)pctx;560}561562static inline struct fd_stream_output_target *563fd_stream_output_target(struct pipe_stream_output_target *target)564{565return (struct fd_stream_output_target *)target;566}567568/**569* Does the dirty state require resource tracking, ie. in general570* does it reference some resource. There are some special cases:571*572* - FD_DIRTY_CONST can reference a resource, but cb0 is handled573* specially as if it is not a user-buffer, we expect it to be574* coming from const_uploader, so we can make some assumptions575* that future transfer_map will be UNSYNCRONIZED576* - FD_DIRTY_ZSA controls how the framebuffer is accessed577* - FD_DIRTY_BLEND needs to update GMEM reason578*579* TODO if we can make assumptions that framebuffer state is bound580* first, before blend/zsa/etc state we can move some of the ZSA/581* BLEND state handling from draw time to bind time. I think this582* is true of mesa/st, perhaps we can just document it to be a583* frontend requirement?584*/585static inline bool586fd_context_dirty_resource(enum fd_dirty_3d_state dirty)587{588return dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA | FD_DIRTY_BLEND |589FD_DIRTY_SSBO | FD_DIRTY_IMAGE | FD_DIRTY_VTXBUF |590FD_DIRTY_TEX | FD_DIRTY_STREAMOUT);591}592593#ifdef __cplusplus594#define or_dirty(d, mask) \595do { \596decltype(mask) _d = (d); \597d = (decltype(mask))(_d | (mask)); \598} while (0)599#else600#define or_dirty(d, mask) \601do { \602d |= (mask); \603} while (0)604#endif605606/* Mark specified non-shader-stage related state as dirty: */607static inline void608fd_context_dirty(struct fd_context *ctx, enum fd_dirty_3d_state dirty) assert_dt609{610assert(util_is_power_of_two_nonzero(dirty));611STATIC_ASSERT(ffs(dirty) <= ARRAY_SIZE(ctx->gen_dirty_map));612613ctx->gen_dirty |= ctx->gen_dirty_map[ffs(dirty) - 1];614615if (fd_context_dirty_resource(dirty))616or_dirty(dirty, FD_DIRTY_RESOURCE);617618or_dirty(ctx->dirty, dirty);619}620621static inline void622fd_context_dirty_shader(struct fd_context *ctx, enum pipe_shader_type shader,623enum fd_dirty_shader_state dirty) assert_dt624{625const enum fd_dirty_3d_state map[] = {626FD_DIRTY_PROG, FD_DIRTY_CONST, FD_DIRTY_TEX,627FD_DIRTY_SSBO, FD_DIRTY_IMAGE,628};629630/* Need to update the table above if these shift: */631STATIC_ASSERT(FD_DIRTY_SHADER_PROG == BIT(0));632STATIC_ASSERT(FD_DIRTY_SHADER_CONST == BIT(1));633STATIC_ASSERT(FD_DIRTY_SHADER_TEX == BIT(2));634STATIC_ASSERT(FD_DIRTY_SHADER_SSBO == BIT(3));635STATIC_ASSERT(FD_DIRTY_SHADER_IMAGE == BIT(4));636637assert(util_is_power_of_two_nonzero(dirty));638assert(ffs(dirty) <= ARRAY_SIZE(map));639640ctx->gen_dirty |= ctx->gen_dirty_shader_map[shader][ffs(dirty) - 1];641642or_dirty(ctx->dirty_shader[shader], dirty);643fd_context_dirty(ctx, map[ffs(dirty) - 1]);644}645646/* mark all state dirty: */647static inline void648fd_context_all_dirty(struct fd_context *ctx) assert_dt649{650ctx->last.dirty = true;651ctx->dirty = (enum fd_dirty_3d_state) ~0;652653/* NOTE: don't use ~0 for gen_dirty, because the gen specific654* emit code will loop over all the bits:655*/656ctx->gen_dirty = ctx->gen_all_dirty;657658for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)659ctx->dirty_shader[i] = (enum fd_dirty_shader_state) ~0;660}661662static inline void663fd_context_all_clean(struct fd_context *ctx) assert_dt664{665ctx->last.dirty = false;666ctx->dirty = (enum fd_dirty_3d_state)0;667ctx->gen_dirty = 0;668for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {669/* don't mark compute state as clean, since it is not emitted670* during normal draw call. The places that call _all_dirty(),671* it is safe to mark compute state dirty as well, but the672* inverse is not true.673*/674if (i == PIPE_SHADER_COMPUTE)675continue;676ctx->dirty_shader[i] = (enum fd_dirty_shader_state)0;677}678}679680/**681* Add mapping between global dirty bit and generation specific dirty682* bit.683*/684static inline void685fd_context_add_map(struct fd_context *ctx, enum fd_dirty_3d_state dirty,686uint32_t gen_dirty)687{688u_foreach_bit (b, dirty) {689ctx->gen_dirty_map[b] |= gen_dirty;690}691ctx->gen_all_dirty |= gen_dirty;692}693694/**695* Add mapping between shader stage specific dirty bit and generation696* specific dirty bit697*/698static inline void699fd_context_add_shader_map(struct fd_context *ctx, enum pipe_shader_type shader,700enum fd_dirty_shader_state dirty, uint32_t gen_dirty)701{702u_foreach_bit (b, dirty) {703ctx->gen_dirty_shader_map[shader][b] |= gen_dirty;704}705ctx->gen_all_dirty |= gen_dirty;706}707708static inline struct pipe_scissor_state *709fd_context_get_scissor(struct fd_context *ctx) assert_dt710{711return ctx->current_scissor;712}713714static inline bool715fd_supported_prim(struct fd_context *ctx, unsigned prim)716{717return (1 << prim) & ctx->primtype_mask;718}719720void fd_context_switch_from(struct fd_context *ctx) assert_dt;721void fd_context_switch_to(struct fd_context *ctx,722struct fd_batch *batch) assert_dt;723struct fd_batch *fd_context_batch(struct fd_context *ctx) assert_dt;724struct fd_batch *fd_context_batch_locked(struct fd_context *ctx) assert_dt;725726void fd_context_setup_common_vbos(struct fd_context *ctx);727void fd_context_cleanup_common_vbos(struct fd_context *ctx);728void fd_emit_string(struct fd_ringbuffer *ring, const char *string, int len);729void fd_emit_string5(struct fd_ringbuffer *ring, const char *string, int len);730731struct pipe_context *fd_context_init(struct fd_context *ctx,732struct pipe_screen *pscreen,733const uint8_t *primtypes, void *priv,734unsigned flags);735struct pipe_context *fd_context_init_tc(struct pipe_context *pctx,736unsigned flags);737738void fd_context_destroy(struct pipe_context *pctx) assert_dt;739740#ifdef __cplusplus741}742#endif743744#endif /* FREEDRENO_CONTEXT_H_ */745746747