Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/compiler/brw_eu_defines.h
4550 views
1
/*
2
Copyright (C) Intel Corp. 2006. All Rights Reserved.
3
Intel funded Tungsten Graphics to
4
develop this 3D driver.
5
6
Permission is hereby granted, free of charge, to any person obtaining
7
a copy of this software and associated documentation files (the
8
"Software"), to deal in the Software without restriction, including
9
without limitation the rights to use, copy, modify, merge, publish,
10
distribute, sublicense, and/or sell copies of the Software, and to
11
permit persons to whom the Software is furnished to do so, subject to
12
the following conditions:
13
14
The above copyright notice and this permission notice (including the
15
next paragraph) shall be included in all copies or substantial
16
portions of the Software.
17
18
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26
**********************************************************************/
27
/*
28
* Authors:
29
* Keith Whitwell <[email protected]>
30
*/
31
32
#ifndef BRW_EU_DEFINES_H
33
#define BRW_EU_DEFINES_H
34
35
#include <stdint.h>
36
#include <stdlib.h>
37
#include "util/macros.h"
38
#include "dev/intel_device_info.h"
39
40
/* The following hunk, up-to "Execution Unit" is used by both the
41
* intel/compiler and i965 codebase. */
42
43
#define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
44
/* Using the GNU statement expression extension */
45
#define SET_FIELD(value, field) \
46
({ \
47
uint32_t fieldval = (uint32_t)(value) << field ## _SHIFT; \
48
assert((fieldval & ~ field ## _MASK) == 0); \
49
fieldval & field ## _MASK; \
50
})
51
52
#define SET_BITS(value, high, low) \
53
({ \
54
const uint32_t fieldval = (uint32_t)(value) << (low); \
55
assert((fieldval & ~INTEL_MASK(high, low)) == 0); \
56
fieldval & INTEL_MASK(high, low); \
57
})
58
59
#define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
60
#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
61
62
#define _3DPRIM_POINTLIST 0x01
63
#define _3DPRIM_LINELIST 0x02
64
#define _3DPRIM_LINESTRIP 0x03
65
#define _3DPRIM_TRILIST 0x04
66
#define _3DPRIM_TRISTRIP 0x05
67
#define _3DPRIM_TRIFAN 0x06
68
#define _3DPRIM_QUADLIST 0x07
69
#define _3DPRIM_QUADSTRIP 0x08
70
#define _3DPRIM_LINELIST_ADJ 0x09 /* G45+ */
71
#define _3DPRIM_LINESTRIP_ADJ 0x0A /* G45+ */
72
#define _3DPRIM_TRILIST_ADJ 0x0B /* G45+ */
73
#define _3DPRIM_TRISTRIP_ADJ 0x0C /* G45+ */
74
#define _3DPRIM_TRISTRIP_REVERSE 0x0D
75
#define _3DPRIM_POLYGON 0x0E
76
#define _3DPRIM_RECTLIST 0x0F
77
#define _3DPRIM_LINELOOP 0x10
78
#define _3DPRIM_POINTLIST_BF 0x11
79
#define _3DPRIM_LINESTRIP_CONT 0x12
80
#define _3DPRIM_LINESTRIP_BF 0x13
81
#define _3DPRIM_LINESTRIP_CONT_BF 0x14
82
#define _3DPRIM_TRIFAN_NOSTIPPLE 0x16
83
#define _3DPRIM_PATCHLIST(n) ({ assert(n > 0 && n <= 32); 0x20 + (n - 1); })
84
85
/* Bitfields for the URB_WRITE message, DW2 of message header: */
86
#define URB_WRITE_PRIM_END 0x1
87
#define URB_WRITE_PRIM_START 0x2
88
#define URB_WRITE_PRIM_TYPE_SHIFT 2
89
90
#define BRW_SPRITE_POINT_ENABLE 16
91
92
# define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT 0
93
# define GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
94
95
/* Execution Unit (EU) defines
96
*/
97
98
#define BRW_ALIGN_1 0
99
#define BRW_ALIGN_16 1
100
101
#define BRW_ADDRESS_DIRECT 0
102
#define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER 1
103
104
#define BRW_CHANNEL_X 0
105
#define BRW_CHANNEL_Y 1
106
#define BRW_CHANNEL_Z 2
107
#define BRW_CHANNEL_W 3
108
109
enum brw_compression {
110
BRW_COMPRESSION_NONE = 0,
111
BRW_COMPRESSION_2NDHALF = 1,
112
BRW_COMPRESSION_COMPRESSED = 2,
113
};
114
115
#define GFX6_COMPRESSION_1Q 0
116
#define GFX6_COMPRESSION_2Q 1
117
#define GFX6_COMPRESSION_3Q 2
118
#define GFX6_COMPRESSION_4Q 3
119
#define GFX6_COMPRESSION_1H 0
120
#define GFX6_COMPRESSION_2H 2
121
122
enum PACKED brw_conditional_mod {
123
BRW_CONDITIONAL_NONE = 0,
124
BRW_CONDITIONAL_Z = 1,
125
BRW_CONDITIONAL_NZ = 2,
126
BRW_CONDITIONAL_EQ = 1, /* Z */
127
BRW_CONDITIONAL_NEQ = 2, /* NZ */
128
BRW_CONDITIONAL_G = 3,
129
BRW_CONDITIONAL_GE = 4,
130
BRW_CONDITIONAL_L = 5,
131
BRW_CONDITIONAL_LE = 6,
132
BRW_CONDITIONAL_R = 7, /* Gen <= 5 */
133
BRW_CONDITIONAL_O = 8,
134
BRW_CONDITIONAL_U = 9,
135
};
136
137
#define BRW_DEBUG_NONE 0
138
#define BRW_DEBUG_BREAKPOINT 1
139
140
#define BRW_DEPENDENCY_NORMAL 0
141
#define BRW_DEPENDENCY_NOTCLEARED 1
142
#define BRW_DEPENDENCY_NOTCHECKED 2
143
#define BRW_DEPENDENCY_DISABLE 3
144
145
enum PACKED brw_execution_size {
146
BRW_EXECUTE_1 = 0,
147
BRW_EXECUTE_2 = 1,
148
BRW_EXECUTE_4 = 2,
149
BRW_EXECUTE_8 = 3,
150
BRW_EXECUTE_16 = 4,
151
BRW_EXECUTE_32 = 5,
152
};
153
154
enum PACKED brw_horizontal_stride {
155
BRW_HORIZONTAL_STRIDE_0 = 0,
156
BRW_HORIZONTAL_STRIDE_1 = 1,
157
BRW_HORIZONTAL_STRIDE_2 = 2,
158
BRW_HORIZONTAL_STRIDE_4 = 3,
159
};
160
161
enum PACKED gfx10_align1_3src_src_horizontal_stride {
162
BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0 = 0,
163
BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1 = 1,
164
BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2 = 2,
165
BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4 = 3,
166
};
167
168
enum PACKED gfx10_align1_3src_dst_horizontal_stride {
169
BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1 = 0,
170
BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_2 = 1,
171
};
172
173
#define BRW_INSTRUCTION_NORMAL 0
174
#define BRW_INSTRUCTION_SATURATE 1
175
176
#define BRW_MASK_ENABLE 0
177
#define BRW_MASK_DISABLE 1
178
179
/** @{
180
*
181
* Gfx6 has replaced "mask enable/disable" with WECtrl, which is
182
* effectively the same but much simpler to think about. Now, there
183
* are two contributors ANDed together to whether channels are
184
* executed: The predication on the instruction, and the channel write
185
* enable.
186
*/
187
/**
188
* This is the default value. It means that a channel's write enable is set
189
* if the per-channel IP is pointing at this instruction.
190
*/
191
#define BRW_WE_NORMAL 0
192
/**
193
* This is used like BRW_MASK_DISABLE, and causes all channels to have
194
* their write enable set. Note that predication still contributes to
195
* whether the channel actually gets written.
196
*/
197
#define BRW_WE_ALL 1
198
/** @} */
199
200
enum opcode {
201
/* These are the actual hardware instructions. */
202
BRW_OPCODE_ILLEGAL,
203
BRW_OPCODE_SYNC,
204
BRW_OPCODE_MOV,
205
BRW_OPCODE_SEL,
206
BRW_OPCODE_MOVI, /**< G45+ */
207
BRW_OPCODE_NOT,
208
BRW_OPCODE_AND,
209
BRW_OPCODE_OR,
210
BRW_OPCODE_XOR,
211
BRW_OPCODE_SHR,
212
BRW_OPCODE_SHL,
213
BRW_OPCODE_DIM, /**< Gfx7.5 only */
214
BRW_OPCODE_SMOV, /**< Gfx8+ */
215
BRW_OPCODE_ASR,
216
BRW_OPCODE_ROR, /**< Gfx11+ */
217
BRW_OPCODE_ROL, /**< Gfx11+ */
218
BRW_OPCODE_CMP,
219
BRW_OPCODE_CMPN,
220
BRW_OPCODE_CSEL, /**< Gfx8+ */
221
BRW_OPCODE_F32TO16, /**< Gfx7 only */
222
BRW_OPCODE_F16TO32, /**< Gfx7 only */
223
BRW_OPCODE_BFREV, /**< Gfx7+ */
224
BRW_OPCODE_BFE, /**< Gfx7+ */
225
BRW_OPCODE_BFI1, /**< Gfx7+ */
226
BRW_OPCODE_BFI2, /**< Gfx7+ */
227
BRW_OPCODE_JMPI,
228
BRW_OPCODE_BRD, /**< Gfx7+ */
229
BRW_OPCODE_IF,
230
BRW_OPCODE_IFF, /**< Pre-Gfx6 */
231
BRW_OPCODE_BRC, /**< Gfx7+ */
232
BRW_OPCODE_ELSE,
233
BRW_OPCODE_ENDIF,
234
BRW_OPCODE_DO, /**< Pre-Gfx6 */
235
BRW_OPCODE_CASE, /**< Gfx6 only */
236
BRW_OPCODE_WHILE,
237
BRW_OPCODE_BREAK,
238
BRW_OPCODE_CONTINUE,
239
BRW_OPCODE_HALT,
240
BRW_OPCODE_CALLA, /**< Gfx7.5+ */
241
BRW_OPCODE_MSAVE, /**< Pre-Gfx6 */
242
BRW_OPCODE_CALL, /**< Gfx6+ */
243
BRW_OPCODE_MREST, /**< Pre-Gfx6 */
244
BRW_OPCODE_RET, /**< Gfx6+ */
245
BRW_OPCODE_PUSH, /**< Pre-Gfx6 */
246
BRW_OPCODE_FORK, /**< Gfx6 only */
247
BRW_OPCODE_GOTO, /**< Gfx8+ */
248
BRW_OPCODE_POP, /**< Pre-Gfx6 */
249
BRW_OPCODE_WAIT,
250
BRW_OPCODE_SEND,
251
BRW_OPCODE_SENDC,
252
BRW_OPCODE_SENDS, /**< Gfx9+ */
253
BRW_OPCODE_SENDSC, /**< Gfx9+ */
254
BRW_OPCODE_MATH, /**< Gfx6+ */
255
BRW_OPCODE_ADD,
256
BRW_OPCODE_MUL,
257
BRW_OPCODE_AVG,
258
BRW_OPCODE_FRC,
259
BRW_OPCODE_RNDU,
260
BRW_OPCODE_RNDD,
261
BRW_OPCODE_RNDE,
262
BRW_OPCODE_RNDZ,
263
BRW_OPCODE_MAC,
264
BRW_OPCODE_MACH,
265
BRW_OPCODE_LZD,
266
BRW_OPCODE_FBH, /**< Gfx7+ */
267
BRW_OPCODE_FBL, /**< Gfx7+ */
268
BRW_OPCODE_CBIT, /**< Gfx7+ */
269
BRW_OPCODE_ADDC, /**< Gfx7+ */
270
BRW_OPCODE_SUBB, /**< Gfx7+ */
271
BRW_OPCODE_SAD2,
272
BRW_OPCODE_SADA2,
273
BRW_OPCODE_DP4,
274
BRW_OPCODE_DPH,
275
BRW_OPCODE_DP3,
276
BRW_OPCODE_DP2,
277
BRW_OPCODE_LINE,
278
BRW_OPCODE_PLN, /**< G45+ */
279
BRW_OPCODE_MAD, /**< Gfx6+ */
280
BRW_OPCODE_LRP, /**< Gfx6+ */
281
BRW_OPCODE_MADM, /**< Gfx8+ */
282
BRW_OPCODE_NENOP, /**< G45 only */
283
BRW_OPCODE_NOP,
284
285
NUM_BRW_OPCODES,
286
287
/* These are compiler backend opcodes that get translated into other
288
* instructions.
289
*/
290
FS_OPCODE_FB_WRITE = NUM_BRW_OPCODES,
291
292
/**
293
* Same as FS_OPCODE_FB_WRITE but expects its arguments separately as
294
* individual sources instead of as a single payload blob. The
295
* position/ordering of the arguments are defined by the enum
296
* fb_write_logical_srcs.
297
*/
298
FS_OPCODE_FB_WRITE_LOGICAL,
299
300
FS_OPCODE_REP_FB_WRITE,
301
302
FS_OPCODE_FB_READ,
303
FS_OPCODE_FB_READ_LOGICAL,
304
305
SHADER_OPCODE_RCP,
306
SHADER_OPCODE_RSQ,
307
SHADER_OPCODE_SQRT,
308
SHADER_OPCODE_EXP2,
309
SHADER_OPCODE_LOG2,
310
SHADER_OPCODE_POW,
311
SHADER_OPCODE_INT_QUOTIENT,
312
SHADER_OPCODE_INT_REMAINDER,
313
SHADER_OPCODE_SIN,
314
SHADER_OPCODE_COS,
315
316
/**
317
* A generic "send" opcode. The first two sources are the message
318
* descriptor and extended message descriptor respectively. The third
319
* and optional fourth sources are the message payload
320
*/
321
SHADER_OPCODE_SEND,
322
323
/**
324
* An "undefined" write which does nothing but indicates to liveness that
325
* we don't care about any values in the register which predate this
326
* instruction. Used to prevent partial writes from causing issues with
327
* live ranges.
328
*/
329
SHADER_OPCODE_UNDEF,
330
331
/**
332
* Texture sampling opcodes.
333
*
334
* LOGICAL opcodes are eventually translated to the matching non-LOGICAL
335
* opcode but instead of taking a single payload blob they expect their
336
* arguments separately as individual sources. The position/ordering of the
337
* arguments are defined by the enum tex_logical_srcs.
338
*/
339
SHADER_OPCODE_TEX,
340
SHADER_OPCODE_TEX_LOGICAL,
341
SHADER_OPCODE_TXD,
342
SHADER_OPCODE_TXD_LOGICAL,
343
SHADER_OPCODE_TXF,
344
SHADER_OPCODE_TXF_LOGICAL,
345
SHADER_OPCODE_TXF_LZ,
346
SHADER_OPCODE_TXL,
347
SHADER_OPCODE_TXL_LOGICAL,
348
SHADER_OPCODE_TXL_LZ,
349
SHADER_OPCODE_TXS,
350
SHADER_OPCODE_TXS_LOGICAL,
351
FS_OPCODE_TXB,
352
FS_OPCODE_TXB_LOGICAL,
353
SHADER_OPCODE_TXF_CMS,
354
SHADER_OPCODE_TXF_CMS_LOGICAL,
355
SHADER_OPCODE_TXF_CMS_W,
356
SHADER_OPCODE_TXF_CMS_W_LOGICAL,
357
SHADER_OPCODE_TXF_UMS,
358
SHADER_OPCODE_TXF_UMS_LOGICAL,
359
SHADER_OPCODE_TXF_MCS,
360
SHADER_OPCODE_TXF_MCS_LOGICAL,
361
SHADER_OPCODE_LOD,
362
SHADER_OPCODE_LOD_LOGICAL,
363
SHADER_OPCODE_TG4,
364
SHADER_OPCODE_TG4_LOGICAL,
365
SHADER_OPCODE_TG4_OFFSET,
366
SHADER_OPCODE_TG4_OFFSET_LOGICAL,
367
SHADER_OPCODE_SAMPLEINFO,
368
SHADER_OPCODE_SAMPLEINFO_LOGICAL,
369
370
SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
371
372
/**
373
* Combines multiple sources of size 1 into a larger virtual GRF.
374
* For example, parameters for a send-from-GRF message. Or, updating
375
* channels of a size 4 VGRF used to store vec4s such as texturing results.
376
*
377
* This will be lowered into MOVs from each source to consecutive offsets
378
* of the destination VGRF.
379
*
380
* src[0] may be BAD_FILE. If so, the lowering pass skips emitting the MOV,
381
* but still reserves the first channel of the destination VGRF. This can be
382
* used to reserve space for, say, a message header set up by the generators.
383
*/
384
SHADER_OPCODE_LOAD_PAYLOAD,
385
386
/**
387
* Packs a number of sources into a single value. Unlike LOAD_PAYLOAD, this
388
* acts intra-channel, obtaining the final value for each channel by
389
* combining the sources values for the same channel, the first source
390
* occupying the lowest bits and the last source occupying the highest
391
* bits.
392
*/
393
FS_OPCODE_PACK,
394
395
SHADER_OPCODE_SHADER_TIME_ADD,
396
397
/**
398
* Typed and untyped surface access opcodes.
399
*
400
* LOGICAL opcodes are eventually translated to the matching non-LOGICAL
401
* opcode but instead of taking a single payload blob they expect their
402
* arguments separately as individual sources:
403
*
404
* Source 0: [required] Surface coordinates.
405
* Source 1: [optional] Operation source.
406
* Source 2: [required] Surface index.
407
* Source 3: [required] Number of coordinate components (as UD immediate).
408
* Source 4: [required] Opcode-specific control immediate, same as source 2
409
* of the matching non-LOGICAL opcode.
410
*/
411
VEC4_OPCODE_UNTYPED_ATOMIC,
412
SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
413
SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
414
VEC4_OPCODE_UNTYPED_SURFACE_READ,
415
SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
416
VEC4_OPCODE_UNTYPED_SURFACE_WRITE,
417
SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
418
419
SHADER_OPCODE_OWORD_BLOCK_READ_LOGICAL,
420
SHADER_OPCODE_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
421
SHADER_OPCODE_OWORD_BLOCK_WRITE_LOGICAL,
422
423
/**
424
* Untyped A64 surface access opcodes.
425
*
426
* Source 0: 64-bit address
427
* Source 1: Operational source
428
* Source 2: [required] Opcode-specific control immediate, same as source 2
429
* of the matching non-LOGICAL opcode.
430
*/
431
SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
432
SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
433
SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
434
SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
435
SHADER_OPCODE_A64_OWORD_BLOCK_READ_LOGICAL,
436
SHADER_OPCODE_A64_UNALIGNED_OWORD_BLOCK_READ_LOGICAL,
437
SHADER_OPCODE_A64_OWORD_BLOCK_WRITE_LOGICAL,
438
SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
439
SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT16_LOGICAL,
440
SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL,
441
SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT16_LOGICAL,
442
SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT32_LOGICAL,
443
444
SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
445
SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
446
SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
447
448
SHADER_OPCODE_RND_MODE,
449
SHADER_OPCODE_FLOAT_CONTROL_MODE,
450
451
/**
452
* Byte scattered write/read opcodes.
453
*
454
* LOGICAL opcodes are eventually translated to the matching non-LOGICAL
455
* opcode, but instead of taking a single payload blog they expect their
456
* arguments separately as individual sources, like untyped write/read.
457
*/
458
SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
459
SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
460
SHADER_OPCODE_DWORD_SCATTERED_READ_LOGICAL,
461
SHADER_OPCODE_DWORD_SCATTERED_WRITE_LOGICAL,
462
463
/**
464
* Memory fence messages.
465
*
466
* Source 0: Must be register g0, used as header.
467
* Source 1: Immediate bool to indicate whether control is returned to the
468
* thread only after the fence has been honored.
469
* Source 2: Immediate byte indicating which memory to fence. Zero means
470
* global memory; GFX7_BTI_SLM means SLM (for Gfx11+ only).
471
*
472
* Vec4 backend only uses Source 0.
473
*/
474
SHADER_OPCODE_MEMORY_FENCE,
475
476
/**
477
* Scheduling-only fence.
478
*
479
* Sources can be used to force a stall until the registers in those are
480
* available. This might generate MOVs or SYNC_NOPs (Gfx12+).
481
*/
482
FS_OPCODE_SCHEDULING_FENCE,
483
484
SHADER_OPCODE_GFX4_SCRATCH_READ,
485
SHADER_OPCODE_GFX4_SCRATCH_WRITE,
486
SHADER_OPCODE_GFX7_SCRATCH_READ,
487
488
SHADER_OPCODE_SCRATCH_HEADER,
489
490
/**
491
* Gfx8+ SIMD8 URB Read messages.
492
*/
493
SHADER_OPCODE_URB_READ_SIMD8,
494
SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT,
495
496
SHADER_OPCODE_URB_WRITE_SIMD8,
497
SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT,
498
SHADER_OPCODE_URB_WRITE_SIMD8_MASKED,
499
SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT,
500
501
/**
502
* Return the index of an arbitrary live channel (i.e. one of the channels
503
* enabled in the current execution mask) and assign it to the first
504
* component of the destination. Expected to be used as input for the
505
* BROADCAST pseudo-opcode.
506
*/
507
SHADER_OPCODE_FIND_LIVE_CHANNEL,
508
509
/**
510
* Return the current execution mask in the specified flag subregister.
511
* Can be CSE'ed more easily than a plain MOV from the ce0 ARF register.
512
*/
513
FS_OPCODE_LOAD_LIVE_CHANNELS,
514
515
/**
516
* Pick the channel from its first source register given by the index
517
* specified as second source. Useful for variable indexing of surfaces.
518
*
519
* Note that because the result of this instruction is by definition
520
* uniform and it can always be splatted to multiple channels using a
521
* scalar regioning mode, only the first channel of the destination region
522
* is guaranteed to be updated, which implies that BROADCAST instructions
523
* should usually be marked force_writemask_all.
524
*/
525
SHADER_OPCODE_BROADCAST,
526
527
/* Pick the channel from its first source register given by the index
528
* specified as second source.
529
*
530
* This is similar to the BROADCAST instruction except that it takes a
531
* dynamic index and potentially puts a different value in each output
532
* channel.
533
*/
534
SHADER_OPCODE_SHUFFLE,
535
536
/* Select between src0 and src1 based on channel enables.
537
*
538
* This instruction copies src0 into the enabled channels of the
539
* destination and copies src1 into the disabled channels.
540
*/
541
SHADER_OPCODE_SEL_EXEC,
542
543
/* This turns into an align16 mov from src0 to dst with a swizzle
544
* provided as an immediate in src1.
545
*/
546
SHADER_OPCODE_QUAD_SWIZZLE,
547
548
/* Take every Nth element in src0 and broadcast it to the group of N
549
* channels in which it lives in the destination. The offset within the
550
* cluster is given by src1 and the cluster size is given by src2.
551
*/
552
SHADER_OPCODE_CLUSTER_BROADCAST,
553
554
SHADER_OPCODE_GET_BUFFER_SIZE,
555
556
SHADER_OPCODE_INTERLOCK,
557
558
/** Target for a HALT
559
*
560
* All HALT instructions in a shader must target the same jump point and
561
* that point is denoted by a HALT_TARGET instruction.
562
*/
563
SHADER_OPCODE_HALT_TARGET,
564
565
VEC4_OPCODE_MOV_BYTES,
566
VEC4_OPCODE_PACK_BYTES,
567
VEC4_OPCODE_UNPACK_UNIFORM,
568
VEC4_OPCODE_DOUBLE_TO_F32,
569
VEC4_OPCODE_DOUBLE_TO_D32,
570
VEC4_OPCODE_DOUBLE_TO_U32,
571
VEC4_OPCODE_TO_DOUBLE,
572
VEC4_OPCODE_PICK_LOW_32BIT,
573
VEC4_OPCODE_PICK_HIGH_32BIT,
574
VEC4_OPCODE_SET_LOW_32BIT,
575
VEC4_OPCODE_SET_HIGH_32BIT,
576
VEC4_OPCODE_MOV_FOR_SCRATCH,
577
VEC4_OPCODE_ZERO_OOB_PUSH_REGS,
578
579
FS_OPCODE_DDX_COARSE,
580
FS_OPCODE_DDX_FINE,
581
/**
582
* Compute dFdy(), dFdyCoarse(), or dFdyFine().
583
*/
584
FS_OPCODE_DDY_COARSE,
585
FS_OPCODE_DDY_FINE,
586
FS_OPCODE_LINTERP,
587
FS_OPCODE_PIXEL_X,
588
FS_OPCODE_PIXEL_Y,
589
FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
590
FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GFX7,
591
FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GFX4,
592
FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL,
593
FS_OPCODE_SET_SAMPLE_ID,
594
FS_OPCODE_PACK_HALF_2x16_SPLIT,
595
FS_OPCODE_INTERPOLATE_AT_SAMPLE,
596
FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
597
FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET,
598
599
VS_OPCODE_URB_WRITE,
600
VS_OPCODE_PULL_CONSTANT_LOAD,
601
VS_OPCODE_PULL_CONSTANT_LOAD_GFX7,
602
603
VS_OPCODE_UNPACK_FLAGS_SIMD4X2,
604
605
/**
606
* Write geometry shader output data to the URB.
607
*
608
* Unlike VS_OPCODE_URB_WRITE, this opcode doesn't do an implied move from
609
* R0 to the first MRF. This allows the geometry shader to override the
610
* "Slot {0,1} Offset" fields in the message header.
611
*/
612
GS_OPCODE_URB_WRITE,
613
614
/**
615
* Write geometry shader output data to the URB and request a new URB
616
* handle (gfx6).
617
*
618
* This opcode doesn't do an implied move from R0 to the first MRF.
619
*/
620
GS_OPCODE_URB_WRITE_ALLOCATE,
621
622
/**
623
* Terminate the geometry shader thread by doing an empty URB write.
624
*
625
* This opcode doesn't do an implied move from R0 to the first MRF. This
626
* allows the geometry shader to override the "GS Number of Output Vertices
627
* for Slot {0,1}" fields in the message header.
628
*/
629
GS_OPCODE_THREAD_END,
630
631
/**
632
* Set the "Slot {0,1} Offset" fields of a URB_WRITE message header.
633
*
634
* - dst is the MRF containing the message header.
635
*
636
* - src0.x indicates which portion of the URB should be written to (e.g. a
637
* vertex number)
638
*
639
* - src1 is an immediate multiplier which will be applied to src0
640
* (e.g. the size of a single vertex in the URB).
641
*
642
* Note: the hardware will apply this offset *in addition to* the offset in
643
* vec4_instruction::offset.
644
*/
645
GS_OPCODE_SET_WRITE_OFFSET,
646
647
/**
648
* Set the "GS Number of Output Vertices for Slot {0,1}" fields of a
649
* URB_WRITE message header.
650
*
651
* - dst is the MRF containing the message header.
652
*
653
* - src0.x is the vertex count. The upper 16 bits will be ignored.
654
*/
655
GS_OPCODE_SET_VERTEX_COUNT,
656
657
/**
658
* Set DWORD 2 of dst to the value in src.
659
*/
660
GS_OPCODE_SET_DWORD_2,
661
662
/**
663
* Prepare the dst register for storage in the "Channel Mask" fields of a
664
* URB_WRITE message header.
665
*
666
* DWORD 4 of dst is shifted left by 4 bits, so that later,
667
* GS_OPCODE_SET_CHANNEL_MASKS can OR DWORDs 0 and 4 together to form the
668
* final channel mask.
669
*
670
* Note: since GS_OPCODE_SET_CHANNEL_MASKS ORs DWORDs 0 and 4 together to
671
* form the final channel mask, DWORDs 0 and 4 of the dst register must not
672
* have any extraneous bits set prior to execution of this opcode (that is,
673
* they should be in the range 0x0 to 0xf).
674
*/
675
GS_OPCODE_PREPARE_CHANNEL_MASKS,
676
677
/**
678
* Set the "Channel Mask" fields of a URB_WRITE message header.
679
*
680
* - dst is the MRF containing the message header.
681
*
682
* - src.x is the channel mask, as prepared by
683
* GS_OPCODE_PREPARE_CHANNEL_MASKS. DWORDs 0 and 4 are OR'ed together to
684
* form the final channel mask.
685
*/
686
GS_OPCODE_SET_CHANNEL_MASKS,
687
688
/**
689
* Get the "Instance ID" fields from the payload.
690
*
691
* - dst is the GRF for gl_InvocationID.
692
*/
693
GS_OPCODE_GET_INSTANCE_ID,
694
695
/**
696
* Send a FF_SYNC message to allocate initial URB handles (gfx6).
697
*
698
* - dst will be used as the writeback register for the FF_SYNC operation.
699
*
700
* - src0 is the number of primitives written.
701
*
702
* - src1 is the value to hold in M0.0: number of SO vertices to write
703
* and number of SO primitives needed. Its value will be overwritten
704
* with the SVBI values if transform feedback is enabled.
705
*
706
* Note: This opcode uses an implicit MRF register for the ff_sync message
707
* header, so the caller is expected to set inst->base_mrf and initialize
708
* that MRF register to r0. This opcode will also write to this MRF register
709
* to include the allocated URB handle so it can then be reused directly as
710
* the header in the URB write operation we are allocating the handle for.
711
*/
712
GS_OPCODE_FF_SYNC,
713
714
/**
715
* Move r0.1 (which holds PrimitiveID information in gfx6) to a separate
716
* register.
717
*
718
* - dst is the GRF where PrimitiveID information will be moved.
719
*/
720
GS_OPCODE_SET_PRIMITIVE_ID,
721
722
/**
723
* Write transform feedback data to the SVB by sending a SVB WRITE message.
724
* Used in gfx6.
725
*
726
* - dst is the MRF register containing the message header.
727
*
728
* - src0 is the register where the vertex data is going to be copied from.
729
*
730
* - src1 is the destination register when write commit occurs.
731
*/
732
GS_OPCODE_SVB_WRITE,
733
734
/**
735
* Set destination index in the SVB write message payload (M0.5). Used
736
* in gfx6 for transform feedback.
737
*
738
* - dst is the header to save the destination indices for SVB WRITE.
739
* - src is the register that holds the destination indices value.
740
*/
741
GS_OPCODE_SVB_SET_DST_INDEX,
742
743
/**
744
* Prepare Mx.0 subregister for being used in the FF_SYNC message header.
745
* Used in gfx6 for transform feedback.
746
*
747
* - dst will hold the register with the final Mx.0 value.
748
*
749
* - src0 has the number of vertices emitted in SO (NumSOVertsToWrite)
750
*
751
* - src1 has the number of needed primitives for SO (NumSOPrimsNeeded)
752
*
753
* - src2 is the value to hold in M0: number of SO vertices to write
754
* and number of SO primitives needed.
755
*/
756
GS_OPCODE_FF_SYNC_SET_PRIMITIVES,
757
758
/**
759
* Terminate the compute shader.
760
*/
761
CS_OPCODE_CS_TERMINATE,
762
763
/**
764
* GLSL barrier()
765
*/
766
SHADER_OPCODE_BARRIER,
767
768
/**
769
* Calculate the high 32-bits of a 32x32 multiply.
770
*/
771
SHADER_OPCODE_MULH,
772
773
/** Signed subtraction with saturation. */
774
SHADER_OPCODE_ISUB_SAT,
775
776
/** Unsigned subtraction with saturation. */
777
SHADER_OPCODE_USUB_SAT,
778
779
/**
780
* A MOV that uses VxH indirect addressing.
781
*
782
* Source 0: A register to start from (HW_REG).
783
* Source 1: An indirect offset (in bytes, UD GRF).
784
* Source 2: The length of the region that could be accessed (in bytes,
785
* UD immediate).
786
*/
787
SHADER_OPCODE_MOV_INDIRECT,
788
789
/** Fills out a relocatable immediate */
790
SHADER_OPCODE_MOV_RELOC_IMM,
791
792
VEC4_OPCODE_URB_READ,
793
TCS_OPCODE_GET_INSTANCE_ID,
794
TCS_OPCODE_URB_WRITE,
795
TCS_OPCODE_SET_INPUT_URB_OFFSETS,
796
TCS_OPCODE_SET_OUTPUT_URB_OFFSETS,
797
TCS_OPCODE_GET_PRIMITIVE_ID,
798
TCS_OPCODE_CREATE_BARRIER_HEADER,
799
TCS_OPCODE_SRC0_010_IS_ZERO,
800
TCS_OPCODE_RELEASE_INPUT,
801
TCS_OPCODE_THREAD_END,
802
803
TES_OPCODE_GET_PRIMITIVE_ID,
804
TES_OPCODE_CREATE_INPUT_READ_HEADER,
805
TES_OPCODE_ADD_INDIRECT_URB_OFFSET,
806
807
SHADER_OPCODE_GET_DSS_ID,
808
SHADER_OPCODE_BTD_SPAWN_LOGICAL,
809
SHADER_OPCODE_BTD_RETIRE_LOGICAL,
810
811
RT_OPCODE_TRACE_RAY_LOGICAL,
812
};
813
814
enum brw_urb_write_flags {
815
BRW_URB_WRITE_NO_FLAGS = 0,
816
817
/**
818
* Causes a new URB entry to be allocated, and its address stored in the
819
* destination register (gen < 7).
820
*/
821
BRW_URB_WRITE_ALLOCATE = 0x1,
822
823
/**
824
* Causes the current URB entry to be deallocated (gen < 7).
825
*/
826
BRW_URB_WRITE_UNUSED = 0x2,
827
828
/**
829
* Causes the thread to terminate.
830
*/
831
BRW_URB_WRITE_EOT = 0x4,
832
833
/**
834
* Indicates that the given URB entry is complete, and may be sent further
835
* down the 3D pipeline (gen < 7).
836
*/
837
BRW_URB_WRITE_COMPLETE = 0x8,
838
839
/**
840
* Indicates that an additional offset (which may be different for the two
841
* vec4 slots) is stored in the message header (gen == 7).
842
*/
843
BRW_URB_WRITE_PER_SLOT_OFFSET = 0x10,
844
845
/**
846
* Indicates that the channel masks in the URB_WRITE message header should
847
* not be overridden to 0xff (gen == 7).
848
*/
849
BRW_URB_WRITE_USE_CHANNEL_MASKS = 0x20,
850
851
/**
852
* Indicates that the data should be sent to the URB using the
853
* URB_WRITE_OWORD message rather than URB_WRITE_HWORD (gen == 7). This
854
* causes offsets to be interpreted as multiples of an OWORD instead of an
855
* HWORD, and only allows one OWORD to be written.
856
*/
857
BRW_URB_WRITE_OWORD = 0x40,
858
859
/**
860
* Convenient combination of flags: end the thread while simultaneously
861
* marking the given URB entry as complete.
862
*/
863
BRW_URB_WRITE_EOT_COMPLETE = BRW_URB_WRITE_EOT | BRW_URB_WRITE_COMPLETE,
864
865
/**
866
* Convenient combination of flags: mark the given URB entry as complete
867
* and simultaneously allocate a new one.
868
*/
869
BRW_URB_WRITE_ALLOCATE_COMPLETE =
870
BRW_URB_WRITE_ALLOCATE | BRW_URB_WRITE_COMPLETE,
871
};
872
873
enum fb_write_logical_srcs {
874
FB_WRITE_LOGICAL_SRC_COLOR0, /* REQUIRED */
875
FB_WRITE_LOGICAL_SRC_COLOR1, /* for dual source blend messages */
876
FB_WRITE_LOGICAL_SRC_SRC0_ALPHA,
877
FB_WRITE_LOGICAL_SRC_SRC_DEPTH, /* gl_FragDepth */
878
FB_WRITE_LOGICAL_SRC_DST_DEPTH, /* GFX4-5: passthrough from thread */
879
FB_WRITE_LOGICAL_SRC_SRC_STENCIL, /* gl_FragStencilRefARB */
880
FB_WRITE_LOGICAL_SRC_OMASK, /* Sample Mask (gl_SampleMask) */
881
FB_WRITE_LOGICAL_SRC_COMPONENTS, /* REQUIRED */
882
FB_WRITE_LOGICAL_NUM_SRCS
883
};
884
885
enum tex_logical_srcs {
886
/** Texture coordinates */
887
TEX_LOGICAL_SRC_COORDINATE,
888
/** Shadow comparator */
889
TEX_LOGICAL_SRC_SHADOW_C,
890
/** dPdx if the operation takes explicit derivatives, otherwise LOD value */
891
TEX_LOGICAL_SRC_LOD,
892
/** dPdy if the operation takes explicit derivatives */
893
TEX_LOGICAL_SRC_LOD2,
894
/** Min LOD */
895
TEX_LOGICAL_SRC_MIN_LOD,
896
/** Sample index */
897
TEX_LOGICAL_SRC_SAMPLE_INDEX,
898
/** MCS data */
899
TEX_LOGICAL_SRC_MCS,
900
/** REQUIRED: Texture surface index */
901
TEX_LOGICAL_SRC_SURFACE,
902
/** Texture sampler index */
903
TEX_LOGICAL_SRC_SAMPLER,
904
/** Texture surface bindless handle */
905
TEX_LOGICAL_SRC_SURFACE_HANDLE,
906
/** Texture sampler bindless handle */
907
TEX_LOGICAL_SRC_SAMPLER_HANDLE,
908
/** Texel offset for gathers */
909
TEX_LOGICAL_SRC_TG4_OFFSET,
910
/** REQUIRED: Number of coordinate components (as UD immediate) */
911
TEX_LOGICAL_SRC_COORD_COMPONENTS,
912
/** REQUIRED: Number of derivative components (as UD immediate) */
913
TEX_LOGICAL_SRC_GRAD_COMPONENTS,
914
915
TEX_LOGICAL_NUM_SRCS,
916
};
917
918
enum surface_logical_srcs {
919
/** Surface binding table index */
920
SURFACE_LOGICAL_SRC_SURFACE,
921
/** Surface bindless handle */
922
SURFACE_LOGICAL_SRC_SURFACE_HANDLE,
923
/** Surface address; could be multi-dimensional for typed opcodes */
924
SURFACE_LOGICAL_SRC_ADDRESS,
925
/** Data to be written or used in an atomic op */
926
SURFACE_LOGICAL_SRC_DATA,
927
/** Surface number of dimensions. Affects the size of ADDRESS */
928
SURFACE_LOGICAL_SRC_IMM_DIMS,
929
/** Per-opcode immediate argument. For atomics, this is the atomic opcode */
930
SURFACE_LOGICAL_SRC_IMM_ARG,
931
/**
932
* Some instructions with side-effects should not be predicated on
933
* sample mask, e.g. lowered stores to scratch.
934
*/
935
SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK,
936
937
SURFACE_LOGICAL_NUM_SRCS
938
};
939
940
#ifdef __cplusplus
941
/**
942
* Allow brw_urb_write_flags enums to be ORed together.
943
*/
944
inline brw_urb_write_flags
945
operator|(brw_urb_write_flags x, brw_urb_write_flags y)
946
{
947
return static_cast<brw_urb_write_flags>(static_cast<int>(x) |
948
static_cast<int>(y));
949
}
950
#endif
951
952
enum PACKED brw_predicate {
953
BRW_PREDICATE_NONE = 0,
954
BRW_PREDICATE_NORMAL = 1,
955
BRW_PREDICATE_ALIGN1_ANYV = 2,
956
BRW_PREDICATE_ALIGN1_ALLV = 3,
957
BRW_PREDICATE_ALIGN1_ANY2H = 4,
958
BRW_PREDICATE_ALIGN1_ALL2H = 5,
959
BRW_PREDICATE_ALIGN1_ANY4H = 6,
960
BRW_PREDICATE_ALIGN1_ALL4H = 7,
961
BRW_PREDICATE_ALIGN1_ANY8H = 8,
962
BRW_PREDICATE_ALIGN1_ALL8H = 9,
963
BRW_PREDICATE_ALIGN1_ANY16H = 10,
964
BRW_PREDICATE_ALIGN1_ALL16H = 11,
965
BRW_PREDICATE_ALIGN1_ANY32H = 12,
966
BRW_PREDICATE_ALIGN1_ALL32H = 13,
967
BRW_PREDICATE_ALIGN16_REPLICATE_X = 2,
968
BRW_PREDICATE_ALIGN16_REPLICATE_Y = 3,
969
BRW_PREDICATE_ALIGN16_REPLICATE_Z = 4,
970
BRW_PREDICATE_ALIGN16_REPLICATE_W = 5,
971
BRW_PREDICATE_ALIGN16_ANY4H = 6,
972
BRW_PREDICATE_ALIGN16_ALL4H = 7,
973
};
974
975
enum PACKED brw_reg_file {
976
BRW_ARCHITECTURE_REGISTER_FILE = 0,
977
BRW_GENERAL_REGISTER_FILE = 1,
978
BRW_MESSAGE_REGISTER_FILE = 2,
979
BRW_IMMEDIATE_VALUE = 3,
980
981
ARF = BRW_ARCHITECTURE_REGISTER_FILE,
982
FIXED_GRF = BRW_GENERAL_REGISTER_FILE,
983
MRF = BRW_MESSAGE_REGISTER_FILE,
984
IMM = BRW_IMMEDIATE_VALUE,
985
986
/* These are not hardware values */
987
VGRF,
988
ATTR,
989
UNIFORM, /* prog_data->params[reg] */
990
BAD_FILE,
991
};
992
993
enum PACKED gfx10_align1_3src_reg_file {
994
BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE = 0,
995
BRW_ALIGN1_3SRC_IMMEDIATE_VALUE = 1, /* src0, src2 */
996
BRW_ALIGN1_3SRC_ACCUMULATOR = 1, /* dest, src1 */
997
};
998
999
/* CNL adds Align1 support for 3-src instructions. Bit 35 of the instruction
1000
* word is "Execution Datatype" which controls whether the instruction operates
1001
* on float or integer types. The register arguments have fields that offer
1002
* more fine control their respective types.
1003
*/
1004
enum PACKED gfx10_align1_3src_exec_type {
1005
BRW_ALIGN1_3SRC_EXEC_TYPE_INT = 0,
1006
BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT = 1,
1007
};
1008
1009
#define BRW_ARF_NULL 0x00
1010
#define BRW_ARF_ADDRESS 0x10
1011
#define BRW_ARF_ACCUMULATOR 0x20
1012
#define BRW_ARF_FLAG 0x30
1013
#define BRW_ARF_MASK 0x40
1014
#define BRW_ARF_MASK_STACK 0x50
1015
#define BRW_ARF_MASK_STACK_DEPTH 0x60
1016
#define BRW_ARF_STATE 0x70
1017
#define BRW_ARF_CONTROL 0x80
1018
#define BRW_ARF_NOTIFICATION_COUNT 0x90
1019
#define BRW_ARF_IP 0xA0
1020
#define BRW_ARF_TDR 0xB0
1021
#define BRW_ARF_TIMESTAMP 0xC0
1022
1023
#define BRW_MRF_COMPR4 (1 << 7)
1024
1025
#define BRW_AMASK 0
1026
#define BRW_IMASK 1
1027
#define BRW_LMASK 2
1028
#define BRW_CMASK 3
1029
1030
1031
1032
#define BRW_THREAD_NORMAL 0
1033
#define BRW_THREAD_ATOMIC 1
1034
#define BRW_THREAD_SWITCH 2
1035
1036
enum PACKED brw_vertical_stride {
1037
BRW_VERTICAL_STRIDE_0 = 0,
1038
BRW_VERTICAL_STRIDE_1 = 1,
1039
BRW_VERTICAL_STRIDE_2 = 2,
1040
BRW_VERTICAL_STRIDE_4 = 3,
1041
BRW_VERTICAL_STRIDE_8 = 4,
1042
BRW_VERTICAL_STRIDE_16 = 5,
1043
BRW_VERTICAL_STRIDE_32 = 6,
1044
BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL = 0xF,
1045
};
1046
1047
enum PACKED gfx10_align1_3src_vertical_stride {
1048
BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0 = 0,
1049
BRW_ALIGN1_3SRC_VERTICAL_STRIDE_1 = 1,
1050
BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2 = 1,
1051
BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4 = 2,
1052
BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8 = 3,
1053
};
1054
1055
enum PACKED brw_width {
1056
BRW_WIDTH_1 = 0,
1057
BRW_WIDTH_2 = 1,
1058
BRW_WIDTH_4 = 2,
1059
BRW_WIDTH_8 = 3,
1060
BRW_WIDTH_16 = 4,
1061
};
1062
1063
/**
1064
* Gfx12+ SWSB SBID synchronization mode.
1065
*
1066
* This is represented as a bitmask including any required SBID token
1067
* synchronization modes, used to synchronize out-of-order instructions. Only
1068
* the strongest mode of the mask will be provided to the hardware in the SWSB
1069
* field of an actual hardware instruction, but virtual instructions may be
1070
* able to take into account multiple of them.
1071
*/
1072
enum tgl_sbid_mode {
1073
TGL_SBID_NULL = 0,
1074
TGL_SBID_SRC = 1,
1075
TGL_SBID_DST = 2,
1076
TGL_SBID_SET = 4
1077
};
1078
1079
#ifdef __cplusplus
1080
/**
1081
* Allow bitwise arithmetic of tgl_sbid_mode enums.
1082
*/
1083
inline tgl_sbid_mode
1084
operator|(tgl_sbid_mode x, tgl_sbid_mode y)
1085
{
1086
return tgl_sbid_mode(unsigned(x) | unsigned(y));
1087
}
1088
1089
inline tgl_sbid_mode
1090
operator&(tgl_sbid_mode x, tgl_sbid_mode y)
1091
{
1092
return tgl_sbid_mode(unsigned(x) & unsigned(y));
1093
}
1094
1095
inline tgl_sbid_mode &
1096
operator|=(tgl_sbid_mode &x, tgl_sbid_mode y)
1097
{
1098
return x = x | y;
1099
}
1100
1101
#endif
1102
1103
/**
1104
* TGL+ SWSB RegDist synchronization pipeline.
1105
*
1106
* On TGL all instructions that use the RegDist synchronization mechanism are
1107
* considered to be executed as a single in-order pipeline, therefore only the
1108
* TGL_PIPE_FLOAT pipeline is applicable. On XeHP+ platforms there are two
1109
* additional asynchronous ALU pipelines (which still execute instructions
1110
* in-order and use the RegDist synchronization mechanism). TGL_PIPE_NONE
1111
* doesn't provide any RegDist pipeline synchronization information and allows
1112
* the hardware to infer the pipeline based on the source types of the
1113
* instruction. TGL_PIPE_ALL can be used when synchronization with all ALU
1114
* pipelines is intended.
1115
*/
1116
enum tgl_pipe {
1117
TGL_PIPE_NONE = 0,
1118
TGL_PIPE_FLOAT,
1119
TGL_PIPE_INT,
1120
TGL_PIPE_LONG,
1121
TGL_PIPE_ALL
1122
};
1123
1124
/**
1125
* Logical representation of the SWSB scheduling information of a hardware
1126
* instruction. The binary representation is slightly more compact.
1127
*/
1128
struct tgl_swsb {
1129
unsigned regdist : 3;
1130
enum tgl_pipe pipe : 3;
1131
unsigned sbid : 4;
1132
enum tgl_sbid_mode mode : 3;
1133
};
1134
1135
/**
1136
* Construct a scheduling annotation with a single RegDist dependency. This
1137
* synchronizes with the completion of the d-th previous in-order instruction.
1138
* The index is one-based, zero causes a no-op tgl_swsb to be constructed.
1139
*/
1140
static inline struct tgl_swsb
1141
tgl_swsb_regdist(unsigned d)
1142
{
1143
const struct tgl_swsb swsb = { d, d ? TGL_PIPE_ALL : TGL_PIPE_NONE };
1144
assert(swsb.regdist == d);
1145
return swsb;
1146
}
1147
1148
/**
1149
* Construct a scheduling annotation that synchronizes with the specified SBID
1150
* token.
1151
*/
1152
static inline struct tgl_swsb
1153
tgl_swsb_sbid(enum tgl_sbid_mode mode, unsigned sbid)
1154
{
1155
const struct tgl_swsb swsb = { 0, TGL_PIPE_NONE, sbid, mode };
1156
assert(swsb.sbid == sbid);
1157
return swsb;
1158
}
1159
1160
/**
1161
* Construct a no-op scheduling annotation.
1162
*/
1163
static inline struct tgl_swsb
1164
tgl_swsb_null(void)
1165
{
1166
return tgl_swsb_regdist(0);
1167
}
1168
1169
/**
1170
* Return a scheduling annotation that allocates the same SBID synchronization
1171
* token as \p swsb. In addition it will synchronize against a previous
1172
* in-order instruction if \p regdist is non-zero.
1173
*/
1174
static inline struct tgl_swsb
1175
tgl_swsb_dst_dep(struct tgl_swsb swsb, unsigned regdist)
1176
{
1177
swsb.regdist = regdist;
1178
swsb.mode = swsb.mode & TGL_SBID_SET;
1179
swsb.pipe = (regdist ? TGL_PIPE_ALL : TGL_PIPE_NONE);
1180
return swsb;
1181
}
1182
1183
/**
1184
* Return a scheduling annotation that synchronizes against the same SBID and
1185
* RegDist dependencies as \p swsb, but doesn't allocate any SBID token.
1186
*/
1187
static inline struct tgl_swsb
1188
tgl_swsb_src_dep(struct tgl_swsb swsb)
1189
{
1190
swsb.mode = swsb.mode & (TGL_SBID_SRC | TGL_SBID_DST);
1191
return swsb;
1192
}
1193
1194
/**
1195
* Convert the provided tgl_swsb to the hardware's binary representation of an
1196
* SWSB annotation.
1197
*/
1198
static inline uint8_t
1199
tgl_swsb_encode(const struct intel_device_info *devinfo, struct tgl_swsb swsb)
1200
{
1201
if (!swsb.mode) {
1202
const unsigned pipe = devinfo->verx10 < 125 ? 0 :
1203
swsb.pipe == TGL_PIPE_FLOAT ? 0x10 :
1204
swsb.pipe == TGL_PIPE_INT ? 0x18 :
1205
swsb.pipe == TGL_PIPE_LONG ? 0x50 :
1206
swsb.pipe == TGL_PIPE_ALL ? 0x8 : 0;
1207
return pipe | swsb.regdist;
1208
} else if (swsb.regdist) {
1209
return 0x80 | swsb.regdist << 4 | swsb.sbid;
1210
} else {
1211
return swsb.sbid | (swsb.mode & TGL_SBID_SET ? 0x40 :
1212
swsb.mode & TGL_SBID_DST ? 0x20 : 0x30);
1213
}
1214
}
1215
1216
/**
1217
* Convert the provided binary representation of an SWSB annotation to a
1218
* tgl_swsb.
1219
*/
1220
static inline struct tgl_swsb
1221
tgl_swsb_decode(const struct intel_device_info *devinfo, const enum opcode opcode,
1222
const uint8_t x)
1223
{
1224
if (x & 0x80) {
1225
const struct tgl_swsb swsb = { (x & 0x70u) >> 4, TGL_PIPE_NONE,
1226
x & 0xfu,
1227
(opcode == BRW_OPCODE_SEND ||
1228
opcode == BRW_OPCODE_SENDC ||
1229
opcode == BRW_OPCODE_MATH) ?
1230
TGL_SBID_SET : TGL_SBID_DST };
1231
return swsb;
1232
} else if ((x & 0x70) == 0x20) {
1233
return tgl_swsb_sbid(TGL_SBID_DST, x & 0xfu);
1234
} else if ((x & 0x70) == 0x30) {
1235
return tgl_swsb_sbid(TGL_SBID_SRC, x & 0xfu);
1236
} else if ((x & 0x70) == 0x40) {
1237
return tgl_swsb_sbid(TGL_SBID_SET, x & 0xfu);
1238
} else {
1239
const struct tgl_swsb swsb = { x & 0x7u,
1240
((x & 0x78) == 0x10 ? TGL_PIPE_FLOAT :
1241
(x & 0x78) == 0x18 ? TGL_PIPE_INT :
1242
(x & 0x78) == 0x50 ? TGL_PIPE_LONG :
1243
(x & 0x78) == 0x8 ? TGL_PIPE_ALL :
1244
TGL_PIPE_NONE) };
1245
assert(devinfo->verx10 >= 125 || swsb.pipe == TGL_PIPE_NONE);
1246
return swsb;
1247
}
1248
}
1249
1250
enum tgl_sync_function {
1251
TGL_SYNC_NOP = 0x0,
1252
TGL_SYNC_ALLRD = 0x2,
1253
TGL_SYNC_ALLWR = 0x3,
1254
TGL_SYNC_BAR = 0xe,
1255
TGL_SYNC_HOST = 0xf
1256
};
1257
1258
/**
1259
* Message target: Shared Function ID for where to SEND a message.
1260
*
1261
* These are enumerated in the ISA reference under "send - Send Message".
1262
* In particular, see the following tables:
1263
* - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
1264
* - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
1265
* - Ivybridge PRM, Volume 1 Part 1, section 3.2.7 "GPE Function IDs"
1266
*/
1267
enum brw_message_target {
1268
BRW_SFID_NULL = 0,
1269
BRW_SFID_MATH = 1, /* Only valid on Gfx4-5 */
1270
BRW_SFID_SAMPLER = 2,
1271
BRW_SFID_MESSAGE_GATEWAY = 3,
1272
BRW_SFID_DATAPORT_READ = 4,
1273
BRW_SFID_DATAPORT_WRITE = 5,
1274
BRW_SFID_URB = 6,
1275
BRW_SFID_THREAD_SPAWNER = 7,
1276
BRW_SFID_VME = 8,
1277
1278
GFX6_SFID_DATAPORT_SAMPLER_CACHE = 4,
1279
GFX6_SFID_DATAPORT_RENDER_CACHE = 5,
1280
GFX6_SFID_DATAPORT_CONSTANT_CACHE = 9,
1281
1282
GFX7_SFID_DATAPORT_DATA_CACHE = 10,
1283
GFX7_SFID_PIXEL_INTERPOLATOR = 11,
1284
HSW_SFID_DATAPORT_DATA_CACHE_1 = 12,
1285
HSW_SFID_CRE = 13,
1286
1287
GFX12_SFID_TGM = 13, /* Typed Global Memory */
1288
GFX12_SFID_SLM = 14, /* Shared Local Memory */
1289
GFX12_SFID_UGM = 15, /* Untyped Global Memory */
1290
1291
GEN_RT_SFID_BINDLESS_THREAD_DISPATCH = 7,
1292
GEN_RT_SFID_RAY_TRACE_ACCELERATOR = 8,
1293
};
1294
1295
#define GFX7_MESSAGE_TARGET_DP_DATA_CACHE 10
1296
1297
#define BRW_SAMPLER_RETURN_FORMAT_FLOAT32 0
1298
#define BRW_SAMPLER_RETURN_FORMAT_UINT32 2
1299
#define BRW_SAMPLER_RETURN_FORMAT_SINT32 3
1300
1301
#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE 0
1302
#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE 0
1303
#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0
1304
#define BRW_SAMPLER_MESSAGE_SIMD8_KILLPIX 1
1305
#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1
1306
#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1
1307
#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2
1308
#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2
1309
#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0
1310
#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2
1311
#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE 0
1312
#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE 1
1313
#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE 1
1314
#define BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2
1315
#define BRW_SAMPLER_MESSAGE_SIMD16_RESINFO 2
1316
#define BRW_SAMPLER_MESSAGE_SIMD4X2_LD 3
1317
#define BRW_SAMPLER_MESSAGE_SIMD8_LD 3
1318
#define BRW_SAMPLER_MESSAGE_SIMD16_LD 3
1319
1320
#define GFX5_SAMPLER_MESSAGE_SAMPLE 0
1321
#define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS 1
1322
#define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD 2
1323
#define GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE 3
1324
#define GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS 4
1325
#define GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
1326
#define GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE 6
1327
#define GFX5_SAMPLER_MESSAGE_SAMPLE_LD 7
1328
#define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4 8
1329
#define GFX5_SAMPLER_MESSAGE_LOD 9
1330
#define GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO 10
1331
#define GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO 11
1332
#define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C 16
1333
#define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO 17
1334
#define GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C 18
1335
#define HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE 20
1336
#define GFX9_SAMPLER_MESSAGE_SAMPLE_LZ 24
1337
#define GFX9_SAMPLER_MESSAGE_SAMPLE_C_LZ 25
1338
#define GFX9_SAMPLER_MESSAGE_SAMPLE_LD_LZ 26
1339
#define GFX9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W 28
1340
#define GFX7_SAMPLER_MESSAGE_SAMPLE_LD_MCS 29
1341
#define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DMS 30
1342
#define GFX7_SAMPLER_MESSAGE_SAMPLE_LD2DSS 31
1343
1344
/* for GFX5 only */
1345
#define BRW_SAMPLER_SIMD_MODE_SIMD4X2 0
1346
#define BRW_SAMPLER_SIMD_MODE_SIMD8 1
1347
#define BRW_SAMPLER_SIMD_MODE_SIMD16 2
1348
#define BRW_SAMPLER_SIMD_MODE_SIMD32_64 3
1349
1350
/* GFX9 changes SIMD mode 0 to mean SIMD8D, but lets us get the SIMD4x2
1351
* behavior by setting bit 22 of dword 2 in the message header. */
1352
#define GFX9_SAMPLER_SIMD_MODE_SIMD8D 0
1353
#define GFX9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2 (1 << 22)
1354
1355
#define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
1356
#define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
1357
#define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS 2
1358
#define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS 3
1359
#define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS 4
1360
#define GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS 5
1361
#define BRW_DATAPORT_OWORD_BLOCK_OWORDS(n) \
1362
((n) == 1 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1363
(n) == 2 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1364
(n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1365
(n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1366
(n) == 16 ? GFX12_DATAPORT_OWORD_BLOCK_16_OWORDS : \
1367
(abort(), ~0))
1368
#define BRW_DATAPORT_OWORD_BLOCK_DWORDS(n) \
1369
((n) == 4 ? BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW : \
1370
(n) == 8 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS : \
1371
(n) == 16 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS : \
1372
(n) == 32 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : \
1373
(abort(), ~0))
1374
1375
#define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
1376
#define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
1377
1378
#define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
1379
#define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
1380
1381
/* This one stays the same across generations. */
1382
#define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
1383
/* GFX4 */
1384
#define BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1
1385
#define BRW_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 2
1386
#define BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3
1387
/* G45, GFX5 */
1388
#define G45_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1389
#define G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1390
#define G45_DATAPORT_READ_MESSAGE_AVC_LOOP_FILTER_READ 3
1391
#define G45_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1392
#define G45_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1393
/* GFX6 */
1394
#define GFX6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ 1
1395
#define GFX6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 2
1396
#define GFX6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ 4
1397
#define GFX6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ 5
1398
#define GFX6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 6
1399
1400
#define BRW_DATAPORT_READ_TARGET_DATA_CACHE 0
1401
#define BRW_DATAPORT_READ_TARGET_RENDER_CACHE 1
1402
#define BRW_DATAPORT_READ_TARGET_SAMPLER_CACHE 2
1403
1404
#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
1405
#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
1406
#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
1407
#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
1408
#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
1409
1410
#define BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0
1411
#define BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1
1412
#define BRW_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 2
1413
#define BRW_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3
1414
#define BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4
1415
#define BRW_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5
1416
#define BRW_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7
1417
1418
/* GFX6 */
1419
#define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE 7
1420
#define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 8
1421
#define GFX6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 9
1422
#define GFX6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE 10
1423
#define GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 11
1424
#define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 12
1425
#define GFX6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE 13
1426
#define GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE 14
1427
1428
/* GFX7 */
1429
#define GFX7_DATAPORT_RC_MEDIA_BLOCK_READ 4
1430
#define GFX7_DATAPORT_RC_TYPED_SURFACE_READ 5
1431
#define GFX7_DATAPORT_RC_TYPED_ATOMIC_OP 6
1432
#define GFX7_DATAPORT_RC_MEMORY_FENCE 7
1433
#define GFX7_DATAPORT_RC_MEDIA_BLOCK_WRITE 10
1434
#define GFX7_DATAPORT_RC_RENDER_TARGET_WRITE 12
1435
#define GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE 13
1436
#define GFX7_DATAPORT_DC_OWORD_BLOCK_READ 0
1437
#define GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ 1
1438
#define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_READ 2
1439
#define GFX7_DATAPORT_DC_DWORD_SCATTERED_READ 3
1440
#define GFX7_DATAPORT_DC_BYTE_SCATTERED_READ 4
1441
#define GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ 5
1442
#define GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP 6
1443
#define GFX7_DATAPORT_DC_MEMORY_FENCE 7
1444
#define GFX7_DATAPORT_DC_OWORD_BLOCK_WRITE 8
1445
#define GFX7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE 10
1446
#define GFX7_DATAPORT_DC_DWORD_SCATTERED_WRITE 11
1447
#define GFX7_DATAPORT_DC_BYTE_SCATTERED_WRITE 12
1448
#define GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE 13
1449
1450
#define GFX7_DATAPORT_SCRATCH_READ ((1 << 18) | \
1451
(0 << 17))
1452
#define GFX7_DATAPORT_SCRATCH_WRITE ((1 << 18) | \
1453
(1 << 17))
1454
#define GFX7_DATAPORT_SCRATCH_NUM_REGS_SHIFT 12
1455
1456
#define GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET 0
1457
#define GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE 1
1458
#define GFX7_PIXEL_INTERPOLATOR_LOC_CENTROID 2
1459
#define GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET 3
1460
1461
/* HSW */
1462
#define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_READ 0
1463
#define HSW_DATAPORT_DC_PORT0_UNALIGNED_OWORD_BLOCK_READ 1
1464
#define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_READ 2
1465
#define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_READ 3
1466
#define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ 4
1467
#define HSW_DATAPORT_DC_PORT0_MEMORY_FENCE 7
1468
#define HSW_DATAPORT_DC_PORT0_OWORD_BLOCK_WRITE 8
1469
#define HSW_DATAPORT_DC_PORT0_OWORD_DUAL_BLOCK_WRITE 10
1470
#define HSW_DATAPORT_DC_PORT0_DWORD_SCATTERED_WRITE 11
1471
#define HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE 12
1472
1473
#define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ 1
1474
#define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP 2
1475
#define HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2 3
1476
#define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_READ 4
1477
#define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ 5
1478
#define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP 6
1479
#define HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2 7
1480
#define HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE 9
1481
#define HSW_DATAPORT_DC_PORT1_MEDIA_BLOCK_WRITE 10
1482
#define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP 11
1483
#define HSW_DATAPORT_DC_PORT1_ATOMIC_COUNTER_OP_SIMD4X2 12
1484
#define HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE 13
1485
#define GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ 0x10
1486
#define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ 0x11
1487
#define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP 0x12
1488
#define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP 0x13
1489
#define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ 0x14
1490
#define GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE 0x15
1491
#define GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE 0x19
1492
#define GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE 0x1a
1493
#define GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP 0x1b
1494
#define GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP 0x1d
1495
#define GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP 0x1e
1496
1497
/* GFX9 */
1498
#define GFX9_DATAPORT_RC_RENDER_TARGET_WRITE 12
1499
#define GFX9_DATAPORT_RC_RENDER_TARGET_READ 13
1500
1501
/* A64 scattered message subtype */
1502
#define GFX8_A64_SCATTERED_SUBTYPE_BYTE 0
1503
#define GFX8_A64_SCATTERED_SUBTYPE_DWORD 1
1504
#define GFX8_A64_SCATTERED_SUBTYPE_QWORD 2
1505
#define GFX8_A64_SCATTERED_SUBTYPE_HWORD 3
1506
1507
/* Dataport special binding table indices: */
1508
#define BRW_BTI_STATELESS 255
1509
#define GFX7_BTI_SLM 254
1510
1511
#define HSW_BTI_STATELESS_LOCALLY_COHERENT 255
1512
#define HSW_BTI_STATELESS_NON_COHERENT 253
1513
#define HSW_BTI_STATELESS_GLOBALLY_COHERENT 252
1514
#define HSW_BTI_STATELESS_LLC_COHERENT 251
1515
#define HSW_BTI_STATELESS_L3_UNCACHED 250
1516
1517
/* The hardware docs are a bit contradictory here. On Haswell, where they
1518
* first added cache ability control, there were 5 different cache modes (see
1519
* HSW_BTI_STATELESS_* above). On Broadwell, they reduced to two:
1520
*
1521
* - IA-Coherent (BTI=255): Coherent within Gen and coherent within the
1522
* entire IA cache memory hierarchy.
1523
*
1524
* - Non-Coherent (BTI=253): Coherent within Gen, same cache type.
1525
*
1526
* Information about stateless cache coherency can be found in the "A32
1527
* Stateless" section of the "3D Media GPGPU" volume of the PRM for each
1528
* hardware generation.
1529
*
1530
* Unfortunately, the docs for MDC_STATELESS appear to have been copied and
1531
* pasted from Haswell and give the Haswell definitions for the BTI values of
1532
* 255 and 253 including a warning about accessing 253 surfaces from multiple
1533
* threads. This seems to be a copy+paste error and the definitions from the
1534
* "A32 Stateless" section should be trusted instead.
1535
*
1536
* Note that because the DRM sets bit 4 of HDC_CHICKEN0 on BDW, CHV and at
1537
* least some pre-production steppings of SKL due to WaForceEnableNonCoherent,
1538
* HDC memory access may have been overridden by the kernel to be non-coherent
1539
* (matching the behavior of the same BTI on pre-Gfx8 hardware) and BTI 255
1540
* may actually be an alias for BTI 253.
1541
*/
1542
#define GFX8_BTI_STATELESS_IA_COHERENT 255
1543
#define GFX8_BTI_STATELESS_NON_COHERENT 253
1544
#define GFX9_BTI_BINDLESS 252
1545
1546
/* Dataport atomic operations for Untyped Atomic Integer Operation message
1547
* (and others).
1548
*/
1549
#define BRW_AOP_AND 1
1550
#define BRW_AOP_OR 2
1551
#define BRW_AOP_XOR 3
1552
#define BRW_AOP_MOV 4
1553
#define BRW_AOP_INC 5
1554
#define BRW_AOP_DEC 6
1555
#define BRW_AOP_ADD 7
1556
#define BRW_AOP_SUB 8
1557
#define BRW_AOP_REVSUB 9
1558
#define BRW_AOP_IMAX 10
1559
#define BRW_AOP_IMIN 11
1560
#define BRW_AOP_UMAX 12
1561
#define BRW_AOP_UMIN 13
1562
#define BRW_AOP_CMPWR 14
1563
#define BRW_AOP_PREDEC 15
1564
1565
/* Dataport atomic operations for Untyped Atomic Float Operation message. */
1566
#define BRW_AOP_FMAX 1
1567
#define BRW_AOP_FMIN 2
1568
#define BRW_AOP_FCMPWR 3
1569
1570
#define BRW_MATH_FUNCTION_INV 1
1571
#define BRW_MATH_FUNCTION_LOG 2
1572
#define BRW_MATH_FUNCTION_EXP 3
1573
#define BRW_MATH_FUNCTION_SQRT 4
1574
#define BRW_MATH_FUNCTION_RSQ 5
1575
#define BRW_MATH_FUNCTION_SIN 6
1576
#define BRW_MATH_FUNCTION_COS 7
1577
#define BRW_MATH_FUNCTION_SINCOS 8 /* gfx4, gfx5 */
1578
#define BRW_MATH_FUNCTION_FDIV 9 /* gfx6+ */
1579
#define BRW_MATH_FUNCTION_POW 10
1580
#define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
1581
#define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT 12
1582
#define BRW_MATH_FUNCTION_INT_DIV_REMAINDER 13
1583
#define GFX8_MATH_FUNCTION_INVM 14
1584
#define GFX8_MATH_FUNCTION_RSQRTM 15
1585
1586
#define BRW_MATH_INTEGER_UNSIGNED 0
1587
#define BRW_MATH_INTEGER_SIGNED 1
1588
1589
#define BRW_MATH_PRECISION_FULL 0
1590
#define BRW_MATH_PRECISION_PARTIAL 1
1591
1592
#define BRW_MATH_SATURATE_NONE 0
1593
#define BRW_MATH_SATURATE_SATURATE 1
1594
1595
#define BRW_MATH_DATA_VECTOR 0
1596
#define BRW_MATH_DATA_SCALAR 1
1597
1598
#define BRW_URB_OPCODE_WRITE_HWORD 0
1599
#define BRW_URB_OPCODE_WRITE_OWORD 1
1600
#define BRW_URB_OPCODE_READ_HWORD 2
1601
#define BRW_URB_OPCODE_READ_OWORD 3
1602
#define GFX7_URB_OPCODE_ATOMIC_MOV 4
1603
#define GFX7_URB_OPCODE_ATOMIC_INC 5
1604
#define GFX8_URB_OPCODE_ATOMIC_ADD 6
1605
#define GFX8_URB_OPCODE_SIMD8_WRITE 7
1606
#define GFX8_URB_OPCODE_SIMD8_READ 8
1607
1608
#define BRW_URB_SWIZZLE_NONE 0
1609
#define BRW_URB_SWIZZLE_INTERLEAVE 1
1610
#define BRW_URB_SWIZZLE_TRANSPOSE 2
1611
1612
#define BRW_SCRATCH_SPACE_SIZE_1K 0
1613
#define BRW_SCRATCH_SPACE_SIZE_2K 1
1614
#define BRW_SCRATCH_SPACE_SIZE_4K 2
1615
#define BRW_SCRATCH_SPACE_SIZE_8K 3
1616
#define BRW_SCRATCH_SPACE_SIZE_16K 4
1617
#define BRW_SCRATCH_SPACE_SIZE_32K 5
1618
#define BRW_SCRATCH_SPACE_SIZE_64K 6
1619
#define BRW_SCRATCH_SPACE_SIZE_128K 7
1620
#define BRW_SCRATCH_SPACE_SIZE_256K 8
1621
#define BRW_SCRATCH_SPACE_SIZE_512K 9
1622
#define BRW_SCRATCH_SPACE_SIZE_1M 10
1623
#define BRW_SCRATCH_SPACE_SIZE_2M 11
1624
1625
#define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
1626
#define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
1627
#define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
1628
#define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
1629
#define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
1630
#define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
1631
#define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
1632
1633
1634
/* Gfx7 "GS URB Entry Allocation Size" is a U9-1 field, so the maximum gs_size
1635
* is 2^9, or 512. It's counted in multiples of 64 bytes.
1636
*
1637
* Identical for VS, DS, and HS.
1638
*/
1639
#define GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES (512*64)
1640
#define GFX7_MAX_DS_URB_ENTRY_SIZE_BYTES (512*64)
1641
#define GFX7_MAX_HS_URB_ENTRY_SIZE_BYTES (512*64)
1642
#define GFX7_MAX_VS_URB_ENTRY_SIZE_BYTES (512*64)
1643
1644
#define BRW_GS_EDGE_INDICATOR_0 (1 << 8)
1645
#define BRW_GS_EDGE_INDICATOR_1 (1 << 9)
1646
1647
/* Gfx6 "GS URB Entry Allocation Size" is defined as a number of 1024-bit
1648
* (128 bytes) URB rows and the maximum allowed value is 5 rows.
1649
*/
1650
#define GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES (5*128)
1651
1652
/* GS Thread Payload
1653
*/
1654
1655
/* 3DSTATE_GS "Output Vertex Size" has an effective maximum of 62. It's
1656
* counted in multiples of 16 bytes.
1657
*/
1658
#define GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES (62*16)
1659
1660
1661
/* R0 */
1662
# define GFX7_GS_PAYLOAD_INSTANCE_ID_SHIFT 27
1663
1664
/* CR0.0[5:4] Floating-Point Rounding Modes
1665
* Skylake PRM, Volume 7 Part 1, "Control Register", page 756
1666
*/
1667
1668
#define BRW_CR0_RND_MODE_MASK 0x30
1669
#define BRW_CR0_RND_MODE_SHIFT 4
1670
1671
enum PACKED brw_rnd_mode {
1672
BRW_RND_MODE_RTNE = 0, /* Round to Nearest or Even */
1673
BRW_RND_MODE_RU = 1, /* Round Up, toward +inf */
1674
BRW_RND_MODE_RD = 2, /* Round Down, toward -inf */
1675
BRW_RND_MODE_RTZ = 3, /* Round Toward Zero */
1676
BRW_RND_MODE_UNSPECIFIED, /* Unspecified rounding mode */
1677
};
1678
1679
#define BRW_CR0_FP64_DENORM_PRESERVE (1 << 6)
1680
#define BRW_CR0_FP32_DENORM_PRESERVE (1 << 7)
1681
#define BRW_CR0_FP16_DENORM_PRESERVE (1 << 10)
1682
1683
#define BRW_CR0_FP_MODE_MASK (BRW_CR0_FP64_DENORM_PRESERVE | \
1684
BRW_CR0_FP32_DENORM_PRESERVE | \
1685
BRW_CR0_FP16_DENORM_PRESERVE | \
1686
BRW_CR0_RND_MODE_MASK)
1687
1688
/* MDC_DS - Data Size Message Descriptor Control Field
1689
* Skylake PRM, Volume 2d, page 129
1690
*
1691
* Specifies the number of Bytes to be read or written per Dword used at
1692
* byte_scattered read/write and byte_scaled read/write messages.
1693
*/
1694
#define GFX7_BYTE_SCATTERED_DATA_ELEMENT_BYTE 0
1695
#define GFX7_BYTE_SCATTERED_DATA_ELEMENT_WORD 1
1696
#define GFX7_BYTE_SCATTERED_DATA_ELEMENT_DWORD 2
1697
1698
#define GEN_RT_BTD_MESSAGE_SPAWN 1
1699
1700
#define GEN_RT_TRACE_RAY_INITAL 0
1701
#define GEN_RT_TRACE_RAY_INSTANCE 1
1702
#define GEN_RT_TRACE_RAY_COMMIT 2
1703
#define GEN_RT_TRACE_RAY_CONTINUE 3
1704
1705
#define GEN_RT_BTD_SHADER_TYPE_ANY_HIT 0
1706
#define GEN_RT_BTD_SHADER_TYPE_CLOSEST_HIT 1
1707
#define GEN_RT_BTD_SHADER_TYPE_MISS 2
1708
#define GEN_RT_BTD_SHADER_TYPE_INTERSECTION 3
1709
1710
/* Starting with Xe-HPG, the old dataport was massively reworked dataport.
1711
* The new thing, called Load/Store Cache or LSC, has a significantly improved
1712
* interface. Instead of bespoke messages for every case, there's basically
1713
* one or two messages with different bits to control things like address
1714
* size, how much data is read/written, etc. It's way nicer but also means we
1715
* get to rewrite all our dataport encoding/decoding code. This patch kicks
1716
* off the party with all of the new enums.
1717
*/
1718
enum lsc_opcode {
1719
LSC_OP_LOAD = 0,
1720
LSC_OP_LOAD_CMASK = 2,
1721
LSC_OP_STORE = 4,
1722
LSC_OP_STORE_CMASK = 6,
1723
LSC_OP_ATOMIC_INC = 8,
1724
LSC_OP_ATOMIC_DEC = 9,
1725
LSC_OP_ATOMIC_LOAD = 10,
1726
LSC_OP_ATOMIC_STORE = 11,
1727
LSC_OP_ATOMIC_ADD = 12,
1728
LSC_OP_ATOMIC_SUB = 13,
1729
LSC_OP_ATOMIC_MIN = 14,
1730
LSC_OP_ATOMIC_MAX = 15,
1731
LSC_OP_ATOMIC_UMIN = 16,
1732
LSC_OP_ATOMIC_UMAX = 17,
1733
LSC_OP_ATOMIC_CMPXCHG = 18,
1734
LSC_OP_ATOMIC_FADD = 19,
1735
LSC_OP_ATOMIC_FSUB = 20,
1736
LSC_OP_ATOMIC_FMIN = 21,
1737
LSC_OP_ATOMIC_FMAX = 22,
1738
LSC_OP_ATOMIC_FCMPXCHG = 23,
1739
LSC_OP_ATOMIC_AND = 24,
1740
LSC_OP_ATOMIC_OR = 25,
1741
LSC_OP_ATOMIC_XOR = 26,
1742
LSC_OP_FENCE = 31
1743
};
1744
1745
/*
1746
* Specifies the size of the dataport address payload in registers.
1747
*/
1748
enum PACKED lsc_addr_reg_size {
1749
LSC_ADDR_REG_SIZE_1 = 1,
1750
LSC_ADDR_REG_SIZE_2 = 2,
1751
LSC_ADDR_REG_SIZE_3 = 3,
1752
LSC_ADDR_REG_SIZE_4 = 4,
1753
LSC_ADDR_REG_SIZE_6 = 6,
1754
LSC_ADDR_REG_SIZE_8 = 8,
1755
};
1756
1757
/*
1758
* Specifies the size of the address payload item in a dataport message.
1759
*/
1760
enum PACKED lsc_addr_size {
1761
LSC_ADDR_SIZE_A16 = 1, /* 16-bit address offset */
1762
LSC_ADDR_SIZE_A32 = 2, /* 32-bit address offset */
1763
LSC_ADDR_SIZE_A64 = 3, /* 64-bit address offset */
1764
};
1765
1766
/*
1767
* Specifies the type of the address payload item in a dataport message. The
1768
* address type specifies how the dataport message decodes the Extended
1769
* Descriptor for the surface attributes and address calculation.
1770
*/
1771
enum PACKED lsc_addr_surface_type {
1772
LSC_ADDR_SURFTYPE_FLAT = 0, /* Flat */
1773
LSC_ADDR_SURFTYPE_BSS = 1, /* Bindless surface state */
1774
LSC_ADDR_SURFTYPE_SS = 2, /* Surface state */
1775
LSC_ADDR_SURFTYPE_BTI = 3, /* Binding table index */
1776
};
1777
1778
/*
1779
* Specifies the dataport message override to the default L1 and L3 memory
1780
* cache policies. Dataport L1 cache policies are uncached (UC), cached (C),
1781
* cache streaming (S) and invalidate-after-read (IAR). Dataport L3 cache
1782
* policies are uncached (UC) and cached (C).
1783
*/
1784
enum lsc_cache_load {
1785
/* No override. Use the non-pipelined state or surface state cache settings
1786
* for L1 and L3.
1787
*/
1788
LSC_CACHE_LOAD_L1STATE_L3MOCS = 0,
1789
/* Override to L1 uncached and L3 uncached */
1790
LSC_CACHE_LOAD_L1UC_L3UC = 1,
1791
/* Override to L1 uncached and L3 cached */
1792
LSC_CACHE_LOAD_L1UC_L3C = 2,
1793
/* Override to L1 cached and L3 uncached */
1794
LSC_CACHE_LOAD_L1C_L3UC = 3,
1795
/* Override to cache at both L1 and L3 */
1796
LSC_CACHE_LOAD_L1C_L3C = 4,
1797
/* Override to L1 streaming load and L3 uncached */
1798
LSC_CACHE_LOAD_L1S_L3UC = 5,
1799
/* Override to L1 streaming load and L3 cached */
1800
LSC_CACHE_LOAD_L1S_L3C = 6,
1801
/* For load messages, override to L1 invalidate-after-read, and L3 cached. */
1802
LSC_CACHE_LOAD_L1IAR_L3C = 7,
1803
};
1804
1805
/*
1806
* Specifies the dataport message override to the default L1 and L3 memory
1807
* cache policies. Dataport L1 cache policies are uncached (UC), write-through
1808
* (WT), write-back (WB) and streaming (S). Dataport L3 cache policies are
1809
* uncached (UC) and cached (WB).
1810
*/
1811
enum PACKED lsc_cache_store {
1812
/* No override. Use the non-pipelined or surface state cache settings for L1
1813
* and L3.
1814
*/
1815
LSC_CACHE_STORE_L1STATE_L3MOCS = 0,
1816
/* Override to L1 uncached and L3 uncached */
1817
LSC_CACHE_STORE_L1UC_L3UC = 1,
1818
/* Override to L1 uncached and L3 cached */
1819
LSC_CACHE_STORE_L1UC_L3WB = 2,
1820
/* Override to L1 write-through and L3 uncached */
1821
LSC_CACHE_STORE_L1WT_L3UC = 3,
1822
/* Override to L1 write-through and L3 cached */
1823
LSC_CACHE_STORE_L1WT_L3WB = 4,
1824
/* Override to L1 streaming and L3 uncached */
1825
LSC_CACHE_STORE_L1S_L3UC = 5,
1826
/* Override to L1 streaming and L3 cached */
1827
LSC_CACHE_STORE_L1S_L3WB = 6,
1828
/* Override to L1 write-back, and L3 cached */
1829
LSC_CACHE_STORE_L1WB_L3WB = 7,
1830
1831
};
1832
1833
/*
1834
* Specifies which components of the data payload 4-element vector (X,Y,Z,W) is
1835
* packed into the register payload.
1836
*/
1837
enum PACKED lsc_cmask {
1838
LSC_CMASK_X = 0x1,
1839
LSC_CMASK_Y = 0x2,
1840
LSC_CMASK_XY = 0x3,
1841
LSC_CMASK_Z = 0x4,
1842
LSC_CMASK_XZ = 0x5,
1843
LSC_CMASK_YZ = 0x6,
1844
LSC_CMASK_XYZ = 0x7,
1845
LSC_CMASK_W = 0x8,
1846
LSC_CMASK_XW = 0x9,
1847
LSC_CMASK_YW = 0xa,
1848
LSC_CMASK_XYW = 0xb,
1849
LSC_CMASK_ZW = 0xc,
1850
LSC_CMASK_XZW = 0xd,
1851
LSC_CMASK_YZW = 0xe,
1852
LSC_CMASK_XYZW = 0xf,
1853
};
1854
1855
/*
1856
* Specifies the size of the data payload item in a dataport message.
1857
*/
1858
enum PACKED lsc_data_size {
1859
/* 8-bit scalar data value in memory, packed into a 8-bit data value in
1860
* register.
1861
*/
1862
LSC_DATA_SIZE_D8 = 0,
1863
/* 16-bit scalar data value in memory, packed into a 16-bit data value in
1864
* register.
1865
*/
1866
LSC_DATA_SIZE_D16 = 1,
1867
/* 32-bit scalar data value in memory, packed into 32-bit data value in
1868
* register.
1869
*/
1870
LSC_DATA_SIZE_D32 = 2,
1871
/* 64-bit scalar data value in memory, packed into 64-bit data value in
1872
* register.
1873
*/
1874
LSC_DATA_SIZE_D64 = 3,
1875
/* 8-bit scalar data value in memory, packed into 32-bit unsigned data value
1876
* in register.
1877
*/
1878
LSC_DATA_SIZE_D8U32 = 4,
1879
/* 16-bit scalar data value in memory, packed into 32-bit unsigned data
1880
* value in register.
1881
*/
1882
LSC_DATA_SIZE_D16U32 = 5,
1883
/* 16-bit scalar BigFloat data value in memory, packed into 32-bit float
1884
* value in register.
1885
*/
1886
LSC_DATA_SIZE_D16BF32 = 6,
1887
};
1888
1889
/*
1890
* Enum specifies the scope of the fence.
1891
*/
1892
enum PACKED lsc_fence_scope {
1893
/* Wait until all previous memory transactions from this thread are observed
1894
* within the local thread-group.
1895
*/
1896
LSC_FENCE_THREADGROUP = 0,
1897
/* Wait until all previous memory transactions from this thread are observed
1898
* within the local sub-slice.
1899
*/
1900
LSC_FENCE_LOCAL = 1,
1901
/* Wait until all previous memory transactions from this thread are observed
1902
* in the local tile.
1903
*/
1904
LSC_FENCE_TILE = 2,
1905
/* Wait until all previous memory transactions from this thread are observed
1906
* in the local GPU.
1907
*/
1908
LSC_FENCE_GPU = 3,
1909
/* Wait until all previous memory transactions from this thread are observed
1910
* across all GPUs in the system.
1911
*/
1912
LSC_FENCE_ALL_GPU = 4,
1913
/* Wait until all previous memory transactions from this thread are observed
1914
* at the "system" level.
1915
*/
1916
LSC_FENCE_SYSTEM_RELEASE = 5,
1917
/* For GPUs that do not follow PCIe Write ordering for downstream writes
1918
* targeting device memory, a fence message with scope=System_Acquire will
1919
* commit to device memory all downstream and peer writes that have reached
1920
* the device.
1921
*/
1922
LSC_FENCE_SYSTEM_ACQUIRE = 6,
1923
};
1924
1925
/*
1926
* Specifies the type of cache flush operation to perform after a fence is
1927
* complete.
1928
*/
1929
enum PACKED lsc_flush_type {
1930
LSC_FLUSH_TYPE_NONE = 0,
1931
/*
1932
* For a R/W cache, evict dirty lines (M to I state) and invalidate clean
1933
* lines. For a RO cache, invalidate clean lines.
1934
*/
1935
LSC_FLUSH_TYPE_EVICT = 1,
1936
/*
1937
* For both R/W and RO cache, invalidate clean lines in the cache.
1938
*/
1939
LSC_FLUSH_TYPE_INVALIDATE = 2,
1940
/*
1941
* For a R/W cache, invalidate dirty lines (M to I state), without
1942
* write-back to next level. This opcode does nothing for a RO cache.
1943
*/
1944
LSC_FLUSH_TYPE_DISCARD = 3,
1945
/*
1946
* For a R/W cache, write-back dirty lines to the next level, but kept in
1947
* the cache as "clean" (M to V state). This opcode does nothing for a RO
1948
* cache.
1949
*/
1950
LSC_FLUSH_TYPE_CLEAN = 4,
1951
/*
1952
* Flush "RW" section of the L3 cache, but leave L1 and L2 caches untouched.
1953
*/
1954
LSC_FLUSH_TYPE_L3ONLY = 5,
1955
};
1956
1957
enum PACKED lsc_backup_fence_routing {
1958
/* Normal routing: UGM fence is routed to UGM pipeline. */
1959
LSC_NORMAL_ROUTING,
1960
/* Route UGM fence to LSC unit. */
1961
LSC_ROUTE_TO_LSC,
1962
};
1963
1964
/*
1965
* Specifies the size of the vector in a dataport message.
1966
*/
1967
enum PACKED lsc_vect_size {
1968
LSC_VECT_SIZE_V1 = 0, /* vector length 1 */
1969
LSC_VECT_SIZE_V2 = 1, /* vector length 2 */
1970
LSC_VECT_SIZE_V3 = 2, /* Vector length 3 */
1971
LSC_VECT_SIZE_V4 = 3, /* Vector length 4 */
1972
LSC_VECT_SIZE_V8 = 4, /* Vector length 8 */
1973
LSC_VECT_SIZE_V16 = 5, /* Vector length 16 */
1974
LSC_VECT_SIZE_V32 = 6, /* Vector length 32 */
1975
LSC_VECT_SIZE_V64 = 7, /* Vector length 64 */
1976
};
1977
1978
#define LSC_ONE_ADDR_REG 1
1979
1980
#endif /* BRW_EU_DEFINES_H */
1981
1982