Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/r600/r600_pipe.h
4570 views
1
/*
2
* Copyright 2010 Jerome Glisse <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*
23
* Authors:
24
* Jerome Glisse
25
*/
26
#ifndef R600_PIPE_H
27
#define R600_PIPE_H
28
29
#include "r600_pipe_common.h"
30
#include "r600_cs.h"
31
#include "r600_public.h"
32
#include "pipe/p_defines.h"
33
34
#include "util/u_suballoc.h"
35
#include "util/list.h"
36
#include "util/u_transfer.h"
37
#include "util/u_memory.h"
38
39
#include "tgsi/tgsi_scan.h"
40
41
#define R600_NUM_ATOMS 56
42
43
#define R600_MAX_IMAGES 8
44
/*
45
* ranges reserved for images on evergreen
46
* first set for the immediate buffers,
47
* second for the actual resources for RESQ.
48
*/
49
#define R600_IMAGE_IMMED_RESOURCE_OFFSET 160
50
#define R600_IMAGE_REAL_RESOURCE_OFFSET 168
51
52
/* read caches */
53
#define R600_CONTEXT_INV_VERTEX_CACHE (R600_CONTEXT_PRIVATE_FLAG << 0)
54
#define R600_CONTEXT_INV_TEX_CACHE (R600_CONTEXT_PRIVATE_FLAG << 1)
55
#define R600_CONTEXT_INV_CONST_CACHE (R600_CONTEXT_PRIVATE_FLAG << 2)
56
/* read-write caches */
57
#define R600_CONTEXT_FLUSH_AND_INV (R600_CONTEXT_PRIVATE_FLAG << 3)
58
#define R600_CONTEXT_FLUSH_AND_INV_CB_META (R600_CONTEXT_PRIVATE_FLAG << 4)
59
#define R600_CONTEXT_FLUSH_AND_INV_DB_META (R600_CONTEXT_PRIVATE_FLAG << 5)
60
#define R600_CONTEXT_FLUSH_AND_INV_DB (R600_CONTEXT_PRIVATE_FLAG << 6)
61
#define R600_CONTEXT_FLUSH_AND_INV_CB (R600_CONTEXT_PRIVATE_FLAG << 7)
62
/* engine synchronization */
63
#define R600_CONTEXT_PS_PARTIAL_FLUSH (R600_CONTEXT_PRIVATE_FLAG << 8)
64
#define R600_CONTEXT_WAIT_3D_IDLE (R600_CONTEXT_PRIVATE_FLAG << 9)
65
#define R600_CONTEXT_WAIT_CP_DMA_IDLE (R600_CONTEXT_PRIVATE_FLAG << 10)
66
#define R600_CONTEXT_CS_PARTIAL_FLUSH (R600_CONTEXT_PRIVATE_FLAG << 11)
67
68
/* the number of CS dwords for flushing and drawing */
69
#define R600_MAX_FLUSH_CS_DWORDS 18
70
#define R600_MAX_DRAW_CS_DWORDS 58
71
#define R600_MAX_PFP_SYNC_ME_DWORDS 16
72
73
#define EG_MAX_ATOMIC_BUFFERS 8
74
75
#define R600_MAX_USER_CONST_BUFFERS 15
76
#define R600_MAX_DRIVER_CONST_BUFFERS 3
77
#define R600_MAX_CONST_BUFFERS (R600_MAX_USER_CONST_BUFFERS + R600_MAX_DRIVER_CONST_BUFFERS)
78
#define R600_MAX_HW_CONST_BUFFERS 16
79
80
/* start driver buffers after user buffers */
81
#define R600_BUFFER_INFO_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS)
82
#define R600_UCP_SIZE (4*4*8)
83
#define R600_CS_BLOCK_GRID_SIZE (8 * 4)
84
#define R600_TCS_DEFAULT_LEVELS_SIZE (6 * 4)
85
#define R600_BUFFER_INFO_OFFSET (R600_UCP_SIZE)
86
87
/*
88
* We only access this buffer through vtx clauses hence it's fine to exist
89
* at index beyond 15.
90
*/
91
#define R600_LDS_INFO_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 1)
92
/*
93
* Note GS doesn't use a constant buffer binding, just a resource index,
94
* so it's fine to have it exist at index beyond 15. I.e. it's not actually
95
* a const buffer, just a buffer resource.
96
*/
97
#define R600_GS_RING_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 2)
98
/* Currently R600_MAX_CONST_BUFFERS just fits on the hw, which has a limit
99
* of 16 const buffers.
100
* UCP/SAMPLE_POSITIONS are never accessed by same shader stage so they can use the same id.
101
*
102
* In order to support d3d 11 mandated minimum of 15 user const buffers
103
* we'd have to squash all use cases into one driver buffer.
104
*/
105
#define R600_MAX_CONST_BUFFER_SIZE (4096 * sizeof(float[4]))
106
107
/* HW stages */
108
#define R600_HW_STAGE_PS 0
109
#define R600_HW_STAGE_VS 1
110
#define R600_HW_STAGE_GS 2
111
#define R600_HW_STAGE_ES 3
112
#define EG_HW_STAGE_LS 4
113
#define EG_HW_STAGE_HS 5
114
115
#define R600_NUM_HW_STAGES 4
116
#define EG_NUM_HW_STAGES 6
117
118
struct r600_context;
119
struct r600_bytecode;
120
union r600_shader_key;
121
122
/* This is an atom containing GPU commands that never change.
123
* This is supposed to be copied directly into the CS. */
124
struct r600_command_buffer {
125
uint32_t *buf;
126
unsigned num_dw;
127
unsigned max_num_dw;
128
unsigned pkt_flags;
129
};
130
131
struct r600_db_state {
132
struct r600_atom atom;
133
struct r600_surface *rsurf;
134
};
135
136
struct r600_db_misc_state {
137
struct r600_atom atom;
138
bool occlusion_queries_disabled;
139
bool flush_depthstencil_through_cb;
140
bool flush_depth_inplace;
141
bool flush_stencil_inplace;
142
bool copy_depth, copy_stencil;
143
unsigned copy_sample;
144
unsigned log_samples;
145
unsigned db_shader_control;
146
bool htile_clear;
147
uint8_t ps_conservative_z;
148
};
149
150
struct r600_cb_misc_state {
151
struct r600_atom atom;
152
unsigned cb_color_control; /* this comes from blend state */
153
unsigned blend_colormask; /* 8*4 bits for 8 RGBA colorbuffers */
154
unsigned nr_cbufs;
155
unsigned bound_cbufs_target_mask;
156
unsigned nr_ps_color_outputs;
157
unsigned ps_color_export_mask;
158
unsigned image_rat_enabled_mask;
159
unsigned buffer_rat_enabled_mask;
160
bool multiwrite;
161
bool dual_src_blend;
162
};
163
164
struct r600_clip_misc_state {
165
struct r600_atom atom;
166
unsigned pa_cl_clip_cntl; /* from rasterizer */
167
unsigned pa_cl_vs_out_cntl; /* from vertex shader */
168
unsigned clip_plane_enable; /* from rasterizer */
169
unsigned cc_dist_mask; /* from vertex shader */
170
unsigned clip_dist_write; /* from vertex shader */
171
unsigned cull_dist_write; /* from vertex shader */
172
boolean clip_disable; /* from vertex shader */
173
boolean vs_out_viewport; /* from vertex shader */
174
};
175
176
struct r600_alphatest_state {
177
struct r600_atom atom;
178
unsigned sx_alpha_test_control; /* this comes from dsa state */
179
unsigned sx_alpha_ref; /* this comes from dsa state */
180
bool bypass;
181
bool cb0_export_16bpc; /* from set_framebuffer_state */
182
};
183
184
struct r600_vgt_state {
185
struct r600_atom atom;
186
uint32_t vgt_multi_prim_ib_reset_en;
187
uint32_t vgt_multi_prim_ib_reset_indx;
188
uint32_t vgt_indx_offset;
189
bool last_draw_was_indirect;
190
};
191
192
struct r600_blend_color {
193
struct r600_atom atom;
194
struct pipe_blend_color state;
195
};
196
197
struct r600_clip_state {
198
struct r600_atom atom;
199
struct pipe_clip_state state;
200
};
201
202
struct r600_cs_shader_state {
203
struct r600_atom atom;
204
unsigned kernel_index;
205
unsigned pc;
206
struct r600_pipe_compute *shader;
207
};
208
209
struct r600_framebuffer {
210
struct r600_atom atom;
211
struct pipe_framebuffer_state state;
212
unsigned compressed_cb_mask;
213
unsigned nr_samples;
214
bool export_16bpc;
215
bool cb0_is_integer;
216
bool is_msaa_resolve;
217
bool dual_src_blend;
218
bool do_update_surf_dirtiness;
219
};
220
221
struct r600_sample_mask {
222
struct r600_atom atom;
223
uint16_t sample_mask; /* there are only 8 bits on EG, 16 bits on Cayman */
224
};
225
226
struct r600_config_state {
227
struct r600_atom atom;
228
unsigned sq_gpr_resource_mgmt_1;
229
unsigned sq_gpr_resource_mgmt_2;
230
unsigned sq_gpr_resource_mgmt_3;
231
bool dyn_gpr_enabled;
232
};
233
234
struct r600_stencil_ref
235
{
236
ubyte ref_value[2];
237
ubyte valuemask[2];
238
ubyte writemask[2];
239
};
240
241
struct r600_stencil_ref_state {
242
struct r600_atom atom;
243
struct r600_stencil_ref state;
244
struct pipe_stencil_ref pipe_state;
245
};
246
247
struct r600_shader_stages_state {
248
struct r600_atom atom;
249
unsigned geom_enable;
250
};
251
252
struct r600_gs_rings_state {
253
struct r600_atom atom;
254
unsigned enable;
255
struct pipe_constant_buffer esgs_ring;
256
struct pipe_constant_buffer gsvs_ring;
257
};
258
259
/* This must start from 16. */
260
/* features */
261
#define DBG_NO_CP_DMA (1 << 30)
262
/* shader backend */
263
#define DBG_NO_SB (1 << 21)
264
#define DBG_SB_CS (1 << 22)
265
#define DBG_SB_DRY_RUN (1 << 23)
266
#define DBG_SB_STAT (1 << 24)
267
#define DBG_SB_DUMP (1 << 25)
268
#define DBG_SB_NO_FALLBACK (1 << 26)
269
#define DBG_SB_DISASM (1 << 27)
270
#define DBG_SB_SAFEMATH (1 << 28)
271
#define DBG_NIR_SB (1 << 28)
272
273
#define DBG_NIR_PREFERRED (DBG_NIR_SB | DBG_NIR)
274
275
struct r600_screen {
276
struct r600_common_screen b;
277
bool has_msaa;
278
bool has_compressed_msaa_texturing;
279
bool has_atomics;
280
281
/*for compute global memory binding, we allocate stuff here, instead of
282
* buffers.
283
* XXX: Not sure if this is the best place for global_pool. Also,
284
* it's not thread safe, so it won't work with multiple contexts. */
285
struct compute_memory_pool *global_pool;
286
};
287
288
struct r600_pipe_sampler_view {
289
struct pipe_sampler_view base;
290
struct list_head list;
291
struct r600_resource *tex_resource;
292
uint32_t tex_resource_words[8];
293
bool skip_mip_address_reloc;
294
bool is_stencil_sampler;
295
};
296
297
struct r600_rasterizer_state {
298
struct r600_command_buffer buffer;
299
boolean flatshade;
300
boolean two_side;
301
unsigned sprite_coord_enable;
302
unsigned clip_plane_enable;
303
unsigned pa_sc_line_stipple;
304
unsigned pa_cl_clip_cntl;
305
unsigned pa_su_sc_mode_cntl;
306
float offset_units;
307
float offset_scale;
308
bool offset_enable;
309
bool offset_units_unscaled;
310
bool scissor_enable;
311
bool multisample_enable;
312
bool clip_halfz;
313
bool rasterizer_discard;
314
};
315
316
struct r600_poly_offset_state {
317
struct r600_atom atom;
318
enum pipe_format zs_format;
319
float offset_units;
320
float offset_scale;
321
bool offset_units_unscaled;
322
};
323
324
struct r600_blend_state {
325
struct r600_command_buffer buffer;
326
struct r600_command_buffer buffer_no_blend;
327
unsigned cb_target_mask;
328
unsigned cb_color_control;
329
unsigned cb_color_control_no_blend;
330
bool dual_src_blend;
331
bool alpha_to_one;
332
};
333
334
struct r600_dsa_state {
335
struct r600_command_buffer buffer;
336
unsigned alpha_ref;
337
ubyte valuemask[2];
338
ubyte writemask[2];
339
unsigned zwritemask;
340
unsigned sx_alpha_test_control;
341
};
342
343
struct r600_pipe_shader;
344
345
struct r600_pipe_shader_selector {
346
struct r600_pipe_shader *current;
347
348
struct tgsi_token *tokens;
349
struct nir_shader *nir;
350
struct pipe_stream_output_info so;
351
struct tgsi_shader_info info;
352
353
unsigned num_shaders;
354
355
enum pipe_shader_type type;
356
enum pipe_shader_ir ir_type;
357
358
/* geometry shader properties */
359
enum pipe_prim_type gs_output_prim;
360
unsigned gs_max_out_vertices;
361
unsigned gs_num_invocations;
362
363
/* TCS/VS */
364
uint64_t lds_patch_outputs_written_mask;
365
uint64_t lds_outputs_written_mask;
366
unsigned nr_ps_max_color_exports;
367
};
368
369
struct r600_pipe_sampler_state {
370
uint32_t tex_sampler_words[3];
371
union pipe_color_union border_color;
372
bool border_color_use;
373
bool seamless_cube_map;
374
};
375
376
/* needed for blitter save */
377
#define NUM_TEX_UNITS 16
378
379
struct r600_seamless_cube_map {
380
struct r600_atom atom;
381
bool enabled;
382
};
383
384
struct r600_samplerview_state {
385
struct r600_atom atom;
386
struct r600_pipe_sampler_view *views[NUM_TEX_UNITS];
387
uint32_t enabled_mask;
388
uint32_t dirty_mask;
389
uint32_t compressed_depthtex_mask; /* which textures are depth */
390
uint32_t compressed_colortex_mask;
391
boolean dirty_buffer_constants;
392
};
393
394
struct r600_sampler_states {
395
struct r600_atom atom;
396
struct r600_pipe_sampler_state *states[NUM_TEX_UNITS];
397
uint32_t enabled_mask;
398
uint32_t dirty_mask;
399
uint32_t has_bordercolor_mask; /* which states contain the border color */
400
};
401
402
struct r600_textures_info {
403
struct r600_samplerview_state views;
404
struct r600_sampler_states states;
405
bool is_array_sampler[NUM_TEX_UNITS];
406
};
407
408
struct r600_shader_driver_constants_info {
409
/* currently 128 bytes for UCP/samplepos + sampler buffer constants */
410
uint32_t *constants;
411
uint32_t alloc_size;
412
bool texture_const_dirty;
413
bool vs_ucp_dirty;
414
bool ps_sample_pos_dirty;
415
bool cs_block_grid_size_dirty;
416
bool tcs_default_levels_dirty;
417
};
418
419
struct r600_constbuf_state
420
{
421
struct r600_atom atom;
422
struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
423
uint32_t enabled_mask;
424
uint32_t dirty_mask;
425
};
426
427
struct r600_vertexbuf_state
428
{
429
struct r600_atom atom;
430
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
431
uint32_t enabled_mask; /* non-NULL buffers */
432
uint32_t dirty_mask;
433
};
434
435
/* CSO (constant state object, in other words, immutable state). */
436
struct r600_cso_state
437
{
438
struct r600_atom atom;
439
void *cso; /* e.g. r600_blend_state */
440
struct r600_command_buffer *cb;
441
};
442
443
struct r600_fetch_shader {
444
struct r600_resource *buffer;
445
unsigned offset;
446
};
447
448
struct r600_shader_state {
449
struct r600_atom atom;
450
struct r600_pipe_shader *shader;
451
};
452
453
struct r600_atomic_buffer_state {
454
struct pipe_shader_buffer buffer[EG_MAX_ATOMIC_BUFFERS];
455
};
456
457
struct r600_image_view {
458
struct pipe_image_view base;
459
uint32_t cb_color_base;
460
uint32_t cb_color_pitch;
461
uint32_t cb_color_slice;
462
uint32_t cb_color_view;
463
uint32_t cb_color_info;
464
uint32_t cb_color_attrib;
465
uint32_t cb_color_dim;
466
uint32_t cb_color_fmask;
467
uint32_t cb_color_fmask_slice;
468
uint32_t immed_resource_words[8];
469
uint32_t resource_words[8];
470
bool skip_mip_address_reloc;
471
uint32_t buf_size;
472
};
473
474
struct r600_image_state {
475
struct r600_atom atom;
476
uint32_t enabled_mask;
477
uint32_t dirty_mask;
478
uint32_t compressed_depthtex_mask;
479
uint32_t compressed_colortex_mask;
480
boolean dirty_buffer_constants;
481
struct r600_image_view views[R600_MAX_IMAGES];
482
};
483
484
/* Used to spill shader temps */
485
struct r600_scratch_buffer {
486
struct r600_resource *buffer;
487
boolean dirty;
488
unsigned size;
489
unsigned item_size;
490
};
491
492
struct r600_context {
493
struct r600_common_context b;
494
struct r600_screen *screen;
495
struct blitter_context *blitter;
496
struct u_suballocator allocator_fetch_shader;
497
498
/* Hardware info. */
499
boolean has_vertex_cache;
500
unsigned default_gprs[EG_NUM_HW_STAGES];
501
unsigned current_gprs[EG_NUM_HW_STAGES];
502
unsigned r6xx_num_clause_temp_gprs;
503
504
/* Miscellaneous state objects. */
505
void *custom_dsa_flush;
506
void *custom_blend_resolve;
507
void *custom_blend_decompress;
508
void *custom_blend_fastclear;
509
/* With rasterizer discard, there doesn't have to be a pixel shader.
510
* In that case, we bind this one: */
511
void *dummy_pixel_shader;
512
/* These dummy CMASK and FMASK buffers are used to get around the R6xx hardware
513
* bug where valid CMASK and FMASK are required to be present to avoid
514
* a hardlock in certain operations but aren't actually used
515
* for anything useful. */
516
struct r600_resource *dummy_fmask;
517
struct r600_resource *dummy_cmask;
518
519
/* State binding slots are here. */
520
struct r600_atom *atoms[R600_NUM_ATOMS];
521
/* Dirty atom bitmask for fast tests */
522
uint64_t dirty_atoms;
523
/* States for CS initialization. */
524
struct r600_command_buffer start_cs_cmd; /* invariant state mostly */
525
/** Compute specific registers initializations. The start_cs_cmd atom
526
* must be emitted before start_compute_cs_cmd. */
527
struct r600_command_buffer start_compute_cs_cmd;
528
/* Register states. */
529
struct r600_alphatest_state alphatest_state;
530
struct r600_cso_state blend_state;
531
struct r600_blend_color blend_color;
532
struct r600_cb_misc_state cb_misc_state;
533
struct r600_clip_misc_state clip_misc_state;
534
struct r600_clip_state clip_state;
535
struct r600_db_misc_state db_misc_state;
536
struct r600_db_state db_state;
537
struct r600_cso_state dsa_state;
538
struct r600_framebuffer framebuffer;
539
struct r600_poly_offset_state poly_offset_state;
540
struct r600_cso_state rasterizer_state;
541
struct r600_sample_mask sample_mask;
542
struct r600_seamless_cube_map seamless_cube_map;
543
struct r600_config_state config_state;
544
struct r600_stencil_ref_state stencil_ref;
545
struct r600_vgt_state vgt_state;
546
struct r600_atomic_buffer_state atomic_buffer_state;
547
/* only have images on fragment shader */
548
struct r600_image_state fragment_images;
549
struct r600_image_state compute_images;
550
struct r600_image_state fragment_buffers;
551
struct r600_image_state compute_buffers;
552
/* Shaders and shader resources. */
553
struct r600_cso_state vertex_fetch_shader;
554
struct r600_shader_state hw_shader_stages[EG_NUM_HW_STAGES];
555
struct r600_cs_shader_state cs_shader_state;
556
struct r600_shader_stages_state shader_stages;
557
struct r600_gs_rings_state gs_rings;
558
struct r600_constbuf_state constbuf_state[PIPE_SHADER_TYPES];
559
struct r600_textures_info samplers[PIPE_SHADER_TYPES];
560
561
struct r600_shader_driver_constants_info driver_consts[PIPE_SHADER_TYPES];
562
563
/** Vertex buffers for fetch shaders */
564
struct r600_vertexbuf_state vertex_buffer_state;
565
/** Vertex buffers for compute shaders */
566
struct r600_vertexbuf_state cs_vertex_buffer_state;
567
568
/* Additional context states. */
569
unsigned compute_cb_target_mask;
570
struct r600_pipe_shader_selector *ps_shader;
571
struct r600_pipe_shader_selector *vs_shader;
572
struct r600_pipe_shader_selector *gs_shader;
573
574
struct r600_pipe_shader_selector *tcs_shader;
575
struct r600_pipe_shader_selector *tes_shader;
576
577
struct r600_pipe_shader_selector *fixed_func_tcs_shader;
578
579
struct r600_rasterizer_state *rasterizer;
580
bool alpha_to_one;
581
bool force_blend_disable;
582
bool gs_tri_strip_adj_fix;
583
boolean dual_src_blend;
584
unsigned zwritemask;
585
unsigned ps_iter_samples;
586
587
/* The list of all texture buffer objects in this context.
588
* This list is walked when a buffer is invalidated/reallocated and
589
* the GPU addresses are updated. */
590
struct list_head texture_buffers;
591
592
/* Last draw state (-1 = unset). */
593
enum pipe_prim_type last_primitive_type; /* Last primitive type used in draw_vbo. */
594
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
595
enum pipe_prim_type last_rast_prim;
596
unsigned last_start_instance;
597
598
void *sb_context;
599
struct r600_isa *isa;
600
float sample_positions[4 * 16];
601
float tess_state[8];
602
uint32_t cs_block_grid_sizes[8]; /* 3 for grid + 1 pad, 3 for block + 1 pad*/
603
struct r600_pipe_shader_selector *last_ls;
604
struct r600_pipe_shader_selector *last_tcs;
605
unsigned last_num_tcs_input_cp;
606
unsigned lds_alloc;
607
608
struct r600_scratch_buffer scratch_buffers[MAX2(R600_NUM_HW_STAGES, EG_NUM_HW_STAGES)];
609
610
/* Debug state. */
611
bool is_debug;
612
struct radeon_saved_cs last_gfx;
613
struct r600_resource *last_trace_buf;
614
struct r600_resource *trace_buf;
615
unsigned trace_id;
616
617
bool cmd_buf_is_compute;
618
struct pipe_resource *append_fence;
619
uint32_t append_fence_id;
620
};
621
622
static inline void r600_emit_command_buffer(struct radeon_cmdbuf *cs,
623
struct r600_command_buffer *cb)
624
{
625
assert(cs->current.cdw + cb->num_dw <= cs->current.max_dw);
626
memcpy(cs->current.buf + cs->current.cdw, cb->buf, 4 * cb->num_dw);
627
cs->current.cdw += cb->num_dw;
628
}
629
630
static inline void r600_set_atom_dirty(struct r600_context *rctx,
631
struct r600_atom *atom,
632
bool dirty)
633
{
634
uint64_t mask;
635
636
assert(atom->id != 0);
637
assert(atom->id < sizeof(mask) * 8);
638
mask = 1ull << atom->id;
639
if (dirty)
640
rctx->dirty_atoms |= mask;
641
else
642
rctx->dirty_atoms &= ~mask;
643
}
644
645
static inline void r600_mark_atom_dirty(struct r600_context *rctx,
646
struct r600_atom *atom)
647
{
648
r600_set_atom_dirty(rctx, atom, true);
649
}
650
651
static inline void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
652
{
653
atom->emit(&rctx->b, atom);
654
r600_set_atom_dirty(rctx, atom, false);
655
}
656
657
static inline void r600_set_cso_state(struct r600_context *rctx,
658
struct r600_cso_state *state, void *cso)
659
{
660
state->cso = cso;
661
r600_set_atom_dirty(rctx, &state->atom, cso != NULL);
662
}
663
664
static inline void r600_set_cso_state_with_cb(struct r600_context *rctx,
665
struct r600_cso_state *state, void *cso,
666
struct r600_command_buffer *cb)
667
{
668
state->cb = cb;
669
state->atom.num_dw = cb ? cb->num_dw : 0;
670
r600_set_cso_state(rctx, state, cso);
671
}
672
673
/* compute_memory_pool.c */
674
struct compute_memory_pool;
675
void compute_memory_pool_delete(struct compute_memory_pool* pool);
676
struct compute_memory_pool* compute_memory_pool_new(
677
struct r600_screen *rscreen);
678
679
/* evergreen_state.c */
680
struct pipe_sampler_view *
681
evergreen_create_sampler_view_custom(struct pipe_context *ctx,
682
struct pipe_resource *texture,
683
const struct pipe_sampler_view *state,
684
unsigned width0, unsigned height0,
685
unsigned force_level);
686
void evergreen_init_common_regs(struct r600_context *ctx,
687
struct r600_command_buffer *cb,
688
enum chip_class ctx_chip_class,
689
enum radeon_family ctx_family,
690
int ctx_drm_minor);
691
void cayman_init_common_regs(struct r600_command_buffer *cb,
692
enum chip_class ctx_chip_class,
693
enum radeon_family ctx_family,
694
int ctx_drm_minor);
695
696
void evergreen_init_state_functions(struct r600_context *rctx);
697
void evergreen_init_atom_start_cs(struct r600_context *rctx);
698
void evergreen_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
699
void evergreen_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
700
void evergreen_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
701
void evergreen_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
702
void evergreen_update_ls_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
703
void evergreen_update_hs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
704
void *evergreen_create_db_flush_dsa(struct r600_context *rctx);
705
void *evergreen_create_resolve_blend(struct r600_context *rctx);
706
void *evergreen_create_decompress_blend(struct r600_context *rctx);
707
void *evergreen_create_fastclear_blend(struct r600_context *rctx);
708
bool evergreen_is_format_supported(struct pipe_screen *screen,
709
enum pipe_format format,
710
enum pipe_texture_target target,
711
unsigned sample_count,
712
unsigned storage_sample_count,
713
unsigned usage);
714
void evergreen_init_color_surface(struct r600_context *rctx,
715
struct r600_surface *surf);
716
void evergreen_init_color_surface_rat(struct r600_context *rctx,
717
struct r600_surface *surf);
718
void evergreen_update_db_shader_control(struct r600_context * rctx);
719
bool evergreen_adjust_gprs(struct r600_context *rctx);
720
void evergreen_setup_scratch_buffers(struct r600_context *rctx);
721
uint32_t evergreen_construct_rat_mask(struct r600_context *rctx, struct r600_cb_misc_state *a,
722
unsigned nr_cbufs);
723
/* r600_blit.c */
724
void r600_init_blit_functions(struct r600_context *rctx);
725
void r600_decompress_depth_textures(struct r600_context *rctx,
726
struct r600_samplerview_state *textures);
727
void r600_decompress_depth_images(struct r600_context *rctx,
728
struct r600_image_state *images);
729
void r600_decompress_color_textures(struct r600_context *rctx,
730
struct r600_samplerview_state *textures);
731
void r600_decompress_color_images(struct r600_context *rctx,
732
struct r600_image_state *images);
733
void r600_resource_copy_region(struct pipe_context *ctx,
734
struct pipe_resource *dst,
735
unsigned dst_level,
736
unsigned dstx, unsigned dsty, unsigned dstz,
737
struct pipe_resource *src,
738
unsigned src_level,
739
const struct pipe_box *src_box);
740
741
/* r600_shader.c */
742
int r600_pipe_shader_create(struct pipe_context *ctx,
743
struct r600_pipe_shader *shader,
744
union r600_shader_key key);
745
746
void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader);
747
748
/* r600_state.c */
749
struct pipe_sampler_view *
750
r600_create_sampler_view_custom(struct pipe_context *ctx,
751
struct pipe_resource *texture,
752
const struct pipe_sampler_view *state,
753
unsigned width_first_level, unsigned height_first_level);
754
void r600_init_state_functions(struct r600_context *rctx);
755
void r600_init_atom_start_cs(struct r600_context *rctx);
756
void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
757
void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
758
void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
759
void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
760
void *r600_create_db_flush_dsa(struct r600_context *rctx);
761
void *r600_create_resolve_blend(struct r600_context *rctx);
762
void *r700_create_resolve_blend(struct r600_context *rctx);
763
void *r600_create_decompress_blend(struct r600_context *rctx);
764
bool r600_adjust_gprs(struct r600_context *rctx);
765
bool r600_is_format_supported(struct pipe_screen *screen,
766
enum pipe_format format,
767
enum pipe_texture_target target,
768
unsigned sample_count,
769
unsigned storage_sample_count,
770
unsigned usage);
771
void r600_update_db_shader_control(struct r600_context * rctx);
772
void r600_setup_scratch_buffers(struct r600_context *rctx);
773
774
/* r600_hw_context.c */
775
void r600_context_gfx_flush(void *context, unsigned flags,
776
struct pipe_fence_handle **fence);
777
void r600_begin_new_cs(struct r600_context *ctx);
778
void r600_flush_emit(struct r600_context *ctx);
779
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in, unsigned num_atomics);
780
void r600_emit_pfp_sync_me(struct r600_context *rctx);
781
void r600_cp_dma_copy_buffer(struct r600_context *rctx,
782
struct pipe_resource *dst, uint64_t dst_offset,
783
struct pipe_resource *src, uint64_t src_offset,
784
unsigned size);
785
void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
786
struct pipe_resource *dst, uint64_t offset,
787
unsigned size, uint32_t clear_value,
788
enum r600_coherency coher);
789
void r600_dma_copy_buffer(struct r600_context *rctx,
790
struct pipe_resource *dst,
791
struct pipe_resource *src,
792
uint64_t dst_offset,
793
uint64_t src_offset,
794
uint64_t size);
795
796
/*
797
* evergreen_hw_context.c
798
*/
799
void evergreen_dma_copy_buffer(struct r600_context *rctx,
800
struct pipe_resource *dst,
801
struct pipe_resource *src,
802
uint64_t dst_offset,
803
uint64_t src_offset,
804
uint64_t size);
805
void evergreen_setup_tess_constants(struct r600_context *rctx,
806
const struct pipe_draw_info *info,
807
unsigned *num_patches);
808
uint32_t evergreen_get_ls_hs_config(struct r600_context *rctx,
809
const struct pipe_draw_info *info,
810
unsigned num_patches);
811
void evergreen_set_ls_hs_config(struct r600_context *rctx,
812
struct radeon_cmdbuf *cs,
813
uint32_t ls_hs_config);
814
void evergreen_set_lds_alloc(struct r600_context *rctx,
815
struct radeon_cmdbuf *cs,
816
uint32_t lds_alloc);
817
818
/* r600_state_common.c */
819
void r600_init_common_state_functions(struct r600_context *rctx);
820
void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom);
821
void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom);
822
void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom);
823
void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom);
824
void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom);
825
void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom);
826
void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a);
827
void r600_add_atom(struct r600_context *rctx, struct r600_atom *atom, unsigned id);
828
void r600_init_atom(struct r600_context *rctx, struct r600_atom *atom, unsigned id,
829
void (*emit)(struct r600_context *ctx, struct r600_atom *state),
830
unsigned num_dw);
831
void r600_vertex_buffers_dirty(struct r600_context *rctx);
832
void r600_sampler_views_dirty(struct r600_context *rctx,
833
struct r600_samplerview_state *state);
834
void r600_sampler_states_dirty(struct r600_context *rctx,
835
struct r600_sampler_states *state);
836
void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state);
837
void r600_set_sample_locations_constant_buffer(struct r600_context *rctx);
838
void r600_setup_scratch_area_for_shader(struct r600_context *rctx,
839
struct r600_pipe_shader *shader, struct r600_scratch_buffer *scratch,
840
unsigned ring_base_reg, unsigned item_size_reg, unsigned ring_size_reg);
841
uint32_t r600_translate_stencil_op(int s_op);
842
uint32_t r600_translate_fill(uint32_t func);
843
unsigned r600_tex_wrap(unsigned wrap);
844
unsigned r600_tex_mipfilter(unsigned filter);
845
unsigned r600_tex_compare(unsigned compare);
846
bool sampler_state_needs_border_color(const struct pipe_sampler_state *state);
847
unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
848
const unsigned char *swizzle_view,
849
boolean vtx);
850
uint32_t r600_translate_texformat(struct pipe_screen *screen, enum pipe_format format,
851
const unsigned char *swizzle_view,
852
uint32_t *word4_p, uint32_t *yuv_format_p,
853
bool do_endian_swap);
854
uint32_t r600_translate_colorformat(enum chip_class chip, enum pipe_format format,
855
bool do_endian_swap);
856
uint32_t r600_colorformat_endian_swap(uint32_t colorformat, bool do_endian_swap);
857
858
/* r600_uvd.c */
859
struct pipe_video_codec *r600_uvd_create_decoder(struct pipe_context *context,
860
const struct pipe_video_codec *decoder);
861
862
struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe,
863
const struct pipe_video_buffer *tmpl);
864
865
/*
866
* Helpers for building command buffers
867
*/
868
869
#define PKT3_SET_CONFIG_REG 0x68
870
#define PKT3_SET_CONTEXT_REG 0x69
871
#define PKT3_SET_CTL_CONST 0x6F
872
#define PKT3_SET_LOOP_CONST 0x6C
873
874
#define R600_CONFIG_REG_OFFSET 0x08000
875
#define R600_CONTEXT_REG_OFFSET 0x28000
876
#define R600_CTL_CONST_OFFSET 0x3CFF0
877
#define R600_LOOP_CONST_OFFSET 0X0003E200
878
#define EG_LOOP_CONST_OFFSET 0x0003A200
879
880
#define PKT_TYPE_S(x) (((unsigned)(x) & 0x3) << 30)
881
#define PKT_COUNT_S(x) (((unsigned)(x) & 0x3FFF) << 16)
882
#define PKT3_IT_OPCODE_S(x) (((unsigned)(x) & 0xFF) << 8)
883
#define PKT3_PREDICATE(x) (((x) >> 0) & 0x1)
884
#define PKT3(op, count, predicate) (PKT_TYPE_S(3) | PKT_COUNT_S(count) | PKT3_IT_OPCODE_S(op) | PKT3_PREDICATE(predicate))
885
886
#define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002
887
888
/*Evergreen Compute packet3*/
889
#define PKT3C(op, count, predicate) (PKT_TYPE_S(3) | PKT3_IT_OPCODE_S(op) | PKT_COUNT_S(count) | PKT3_PREDICATE(predicate) | RADEON_CP_PACKET3_COMPUTE_MODE)
890
891
static inline void r600_store_value(struct r600_command_buffer *cb, unsigned value)
892
{
893
cb->buf[cb->num_dw++] = value;
894
}
895
896
static inline void r600_store_array(struct r600_command_buffer *cb, unsigned num, unsigned *ptr)
897
{
898
assert(cb->num_dw+num <= cb->max_num_dw);
899
memcpy(&cb->buf[cb->num_dw], ptr, num * sizeof(ptr[0]));
900
cb->num_dw += num;
901
}
902
903
static inline void r600_store_config_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
904
{
905
assert(reg < R600_CONTEXT_REG_OFFSET);
906
assert(cb->num_dw+2+num <= cb->max_num_dw);
907
cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0);
908
cb->buf[cb->num_dw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2;
909
}
910
911
/**
912
* Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
913
* shaders.
914
*/
915
static inline void r600_store_context_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
916
{
917
assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET);
918
assert(cb->num_dw+2+num <= cb->max_num_dw);
919
cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0) | cb->pkt_flags;
920
cb->buf[cb->num_dw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2;
921
}
922
923
/**
924
* Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
925
* shaders.
926
*/
927
static inline void r600_store_ctl_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
928
{
929
assert(reg >= R600_CTL_CONST_OFFSET);
930
assert(cb->num_dw+2+num <= cb->max_num_dw);
931
cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CTL_CONST, num, 0) | cb->pkt_flags;
932
cb->buf[cb->num_dw++] = (reg - R600_CTL_CONST_OFFSET) >> 2;
933
}
934
935
static inline void r600_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
936
{
937
assert(reg >= R600_LOOP_CONST_OFFSET);
938
assert(cb->num_dw+2+num <= cb->max_num_dw);
939
cb->buf[cb->num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0);
940
cb->buf[cb->num_dw++] = (reg - R600_LOOP_CONST_OFFSET) >> 2;
941
}
942
943
/**
944
* Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
945
* shaders.
946
*/
947
static inline void eg_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
948
{
949
assert(reg >= EG_LOOP_CONST_OFFSET);
950
assert(cb->num_dw+2+num <= cb->max_num_dw);
951
cb->buf[cb->num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0) | cb->pkt_flags;
952
cb->buf[cb->num_dw++] = (reg - EG_LOOP_CONST_OFFSET) >> 2;
953
}
954
955
static inline void r600_store_config_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value)
956
{
957
r600_store_config_reg_seq(cb, reg, 1);
958
r600_store_value(cb, value);
959
}
960
961
static inline void r600_store_context_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value)
962
{
963
r600_store_context_reg_seq(cb, reg, 1);
964
r600_store_value(cb, value);
965
}
966
967
static inline void r600_store_ctl_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
968
{
969
r600_store_ctl_const_seq(cb, reg, 1);
970
r600_store_value(cb, value);
971
}
972
973
static inline void r600_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
974
{
975
r600_store_loop_const_seq(cb, reg, 1);
976
r600_store_value(cb, value);
977
}
978
979
static inline void eg_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
980
{
981
eg_store_loop_const_seq(cb, reg, 1);
982
r600_store_value(cb, value);
983
}
984
985
void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw);
986
void r600_release_command_buffer(struct r600_command_buffer *cb);
987
988
static inline void radeon_compute_set_context_reg_seq(struct radeon_cmdbuf *cs, unsigned reg, unsigned num)
989
{
990
radeon_set_context_reg_seq(cs, reg, num);
991
/* Set the compute bit on the packet header */
992
cs->current.buf[cs->current.cdw - 2] |= RADEON_CP_PACKET3_COMPUTE_MODE;
993
}
994
995
static inline void radeon_set_ctl_const_seq(struct radeon_cmdbuf *cs, unsigned reg, unsigned num)
996
{
997
assert(reg >= R600_CTL_CONST_OFFSET);
998
assert(cs->current.cdw + 2 + num <= cs->current.max_dw);
999
radeon_emit(cs, PKT3(PKT3_SET_CTL_CONST, num, 0));
1000
radeon_emit(cs, (reg - R600_CTL_CONST_OFFSET) >> 2);
1001
}
1002
1003
static inline void radeon_compute_set_context_reg(struct radeon_cmdbuf *cs, unsigned reg, unsigned value)
1004
{
1005
radeon_compute_set_context_reg_seq(cs, reg, 1);
1006
radeon_emit(cs, value);
1007
}
1008
1009
static inline void radeon_set_context_reg_flag(struct radeon_cmdbuf *cs, unsigned reg, unsigned value, unsigned flag)
1010
{
1011
if (flag & RADEON_CP_PACKET3_COMPUTE_MODE) {
1012
radeon_compute_set_context_reg(cs, reg, value);
1013
} else {
1014
radeon_set_context_reg(cs, reg, value);
1015
}
1016
}
1017
1018
static inline void radeon_set_ctl_const(struct radeon_cmdbuf *cs, unsigned reg, unsigned value)
1019
{
1020
radeon_set_ctl_const_seq(cs, reg, 1);
1021
radeon_emit(cs, value);
1022
}
1023
1024
/*
1025
* common helpers
1026
*/
1027
1028
/* 12.4 fixed-point */
1029
static inline unsigned r600_pack_float_12p4(float x)
1030
{
1031
return x <= 0 ? 0 :
1032
x >= 4096 ? 0xffff : x * 16;
1033
}
1034
1035
static inline unsigned r600_get_flush_flags(enum r600_coherency coher)
1036
{
1037
switch (coher) {
1038
default:
1039
case R600_COHERENCY_NONE:
1040
return 0;
1041
case R600_COHERENCY_SHADER:
1042
return R600_CONTEXT_INV_CONST_CACHE |
1043
R600_CONTEXT_INV_VERTEX_CACHE |
1044
R600_CONTEXT_INV_TEX_CACHE |
1045
R600_CONTEXT_STREAMOUT_FLUSH;
1046
case R600_COHERENCY_CB_META:
1047
return R600_CONTEXT_FLUSH_AND_INV_CB |
1048
R600_CONTEXT_FLUSH_AND_INV_CB_META;
1049
}
1050
}
1051
1052
#define V_028A6C_OUTPRIM_TYPE_POINTLIST 0
1053
#define V_028A6C_OUTPRIM_TYPE_LINESTRIP 1
1054
#define V_028A6C_OUTPRIM_TYPE_TRISTRIP 2
1055
1056
unsigned r600_conv_prim_to_gs_out(unsigned mode);
1057
1058
void eg_trace_emit(struct r600_context *rctx);
1059
void eg_dump_debug_state(struct pipe_context *ctx, FILE *f,
1060
unsigned flags);
1061
1062
struct r600_pipe_shader_selector *r600_create_shader_state_tokens(struct pipe_context *ctx,
1063
const void *tokens,
1064
enum pipe_shader_ir,
1065
unsigned pipe_shader_type);
1066
int r600_shader_select(struct pipe_context *ctx,
1067
struct r600_pipe_shader_selector* sel,
1068
bool *dirty);
1069
1070
void r600_delete_shader_selector(struct pipe_context *ctx,
1071
struct r600_pipe_shader_selector *sel);
1072
1073
struct r600_shader_atomic;
1074
void evergreen_emit_atomic_buffer_setup_count(struct r600_context *rctx,
1075
struct r600_pipe_shader *cs_shader,
1076
struct r600_shader_atomic *combined_atomics,
1077
uint8_t *atomic_used_mask_p);
1078
void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
1079
bool is_compute,
1080
struct r600_shader_atomic *combined_atomics,
1081
uint8_t atomic_used_mask);
1082
void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
1083
bool is_compute,
1084
struct r600_shader_atomic *combined_atomics,
1085
uint8_t *atomic_used_mask_p);
1086
void r600_update_compressed_resource_state(struct r600_context *rctx, bool compute_only);
1087
1088
void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type);
1089
void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_only);
1090
#endif
1091
1092