Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/genX_state.c
4547 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include <assert.h>
25
#include <stdbool.h>
26
#include <string.h>
27
#include <unistd.h>
28
#include <fcntl.h>
29
30
#include "anv_private.h"
31
32
#include "common/intel_aux_map.h"
33
#include "common/intel_sample_positions.h"
34
#include "genxml/gen_macros.h"
35
#include "genxml/genX_pack.h"
36
37
#include "vk_util.h"
38
39
/**
40
* Compute an \p n x \p m pixel hashing table usable as slice, subslice or
41
* pixel pipe hashing table. The resulting table is the cyclic repetition of
42
* a fixed pattern with periodicity equal to \p period.
43
*
44
* If \p index is specified to be equal to \p period, a 2-way hashing table
45
* will be generated such that indices 0 and 1 are returned for the following
46
* fractions of entries respectively:
47
*
48
* p_0 = ceil(period / 2) / period
49
* p_1 = floor(period / 2) / period
50
*
51
* If \p index is even and less than \p period, a 3-way hashing table will be
52
* generated such that indices 0, 1 and 2 are returned for the following
53
* fractions of entries:
54
*
55
* p_0 = (ceil(period / 2) - 1) / period
56
* p_1 = floor(period / 2) / period
57
* p_2 = 1 / period
58
*
59
* The equations above apply if \p flip is equal to 0, if it is equal to 1 p_0
60
* and p_1 will be swapped for the result. Note that in the context of pixel
61
* pipe hashing this can be always 0 on Gfx12 platforms, since the hardware
62
* transparently remaps logical indices found on the table to physical pixel
63
* pipe indices from the highest to lowest EU count.
64
*/
65
UNUSED static void
66
calculate_pixel_hashing_table(unsigned n, unsigned m,
67
unsigned period, unsigned index, bool flip,
68
uint32_t *p)
69
{
70
for (unsigned i = 0; i < n; i++) {
71
for (unsigned j = 0; j < m; j++) {
72
const unsigned k = (i + j) % period;
73
p[j + m * i] = (k == index ? 2 : (k & 1) ^ flip);
74
}
75
}
76
}
77
78
static void
79
genX(emit_slice_hashing_state)(struct anv_device *device,
80
struct anv_batch *batch)
81
{
82
device->slice_hash = (struct anv_state) { 0 };
83
84
#if GFX_VER == 11
85
assert(device->info.ppipe_subslices[2] == 0);
86
87
if (device->info.ppipe_subslices[0] == device->info.ppipe_subslices[1])
88
return;
89
90
unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
91
device->slice_hash =
92
anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
93
94
const bool flip = device->info.ppipe_subslices[0] <
95
device->info.ppipe_subslices[1];
96
struct GENX(SLICE_HASH_TABLE) table;
97
calculate_pixel_hashing_table(16, 16, 3, 3, flip, table.Entry[0]);
98
99
GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, &table);
100
101
anv_batch_emit(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
102
ptr.SliceHashStatePointerValid = true;
103
ptr.SliceHashTableStatePointer = device->slice_hash.offset;
104
}
105
106
anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), mode) {
107
mode.SliceHashingTableEnable = true;
108
}
109
#elif GFX_VERx10 == 120
110
/* For each n calculate ppipes_of[n], equal to the number of pixel pipes
111
* present with n active dual subslices.
112
*/
113
unsigned ppipes_of[3] = {};
114
115
for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
116
for (unsigned p = 0; p < ARRAY_SIZE(device->info.ppipe_subslices); p++)
117
ppipes_of[n] += (device->info.ppipe_subslices[p] == n);
118
}
119
120
/* Gfx12 has three pixel pipes. */
121
assert(ppipes_of[0] + ppipes_of[1] + ppipes_of[2] == 3);
122
123
if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
124
/* All three pixel pipes have the maximum number of active dual
125
* subslices, or there is only one active pixel pipe: Nothing to do.
126
*/
127
return;
128
}
129
130
anv_batch_emit(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
131
p.SliceHashControl[0] = TABLE_0;
132
133
if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
134
calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
135
else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
136
calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
137
138
if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
139
calculate_pixel_hashing_table(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
140
else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
141
calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
142
else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
143
calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
144
else
145
unreachable("Illegal fusing.");
146
}
147
148
anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), p) {
149
p.SubsliceHashingTableEnable = true;
150
p.SubsliceHashingTableEnableMask = true;
151
}
152
#endif
153
}
154
155
static VkResult
156
init_render_queue_state(struct anv_queue *queue)
157
{
158
struct anv_device *device = queue->device;
159
struct anv_batch batch;
160
161
uint32_t cmds[64];
162
batch.start = batch.next = cmds;
163
batch.end = (void *) cmds + sizeof(cmds);
164
165
anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) {
166
#if GFX_VER >= 9
167
ps.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
168
ps.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
169
#endif
170
ps.PipelineSelection = _3D;
171
}
172
173
#if GFX_VER == 9
174
anv_batch_write_reg(&batch, GENX(CACHE_MODE_1), cm1) {
175
cm1.FloatBlendOptimizationEnable = true;
176
cm1.FloatBlendOptimizationEnableMask = true;
177
cm1.MSCRAWHazardAvoidanceBit = true;
178
cm1.MSCRAWHazardAvoidanceBitMask = true;
179
cm1.PartialResolveDisableInVC = true;
180
cm1.PartialResolveDisableInVCMask = true;
181
}
182
#endif
183
184
anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa);
185
186
anv_batch_emit(&batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
187
rect.ClippedDrawingRectangleYMin = 0;
188
rect.ClippedDrawingRectangleXMin = 0;
189
rect.ClippedDrawingRectangleYMax = UINT16_MAX;
190
rect.ClippedDrawingRectangleXMax = UINT16_MAX;
191
rect.DrawingRectangleOriginY = 0;
192
rect.DrawingRectangleOriginX = 0;
193
}
194
195
#if GFX_VER >= 8
196
anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck);
197
198
genX(emit_sample_pattern)(&batch, 0, NULL);
199
200
/* The BDW+ docs describe how to use the 3DSTATE_WM_HZ_OP instruction in the
201
* section titled, "Optimized Depth Buffer Clear and/or Stencil Buffer
202
* Clear." It mentions that the packet overrides GPU state for the clear
203
* operation and needs to be reset to 0s to clear the overrides. Depending
204
* on the kernel, we may not get a context with the state for this packet
205
* zeroed. Do it ourselves just in case. We've observed this to prevent a
206
* number of GPU hangs on ICL.
207
*/
208
anv_batch_emit(&batch, GENX(3DSTATE_WM_HZ_OP), hzp);
209
#endif
210
211
#if GFX_VER == 11
212
/* The default behavior of bit 5 "Headerless Message for Pre-emptable
213
* Contexts" in SAMPLER MODE register is set to 0, which means
214
* headerless sampler messages are not allowed for pre-emptable
215
* contexts. Set the bit 5 to 1 to allow them.
216
*/
217
anv_batch_write_reg(&batch, GENX(SAMPLER_MODE), sm) {
218
sm.HeaderlessMessageforPreemptableContexts = true;
219
sm.HeaderlessMessageforPreemptableContextsMask = true;
220
}
221
222
/* Bit 1 "Enabled Texel Offset Precision Fix" must be set in
223
* HALF_SLICE_CHICKEN7 register.
224
*/
225
anv_batch_write_reg(&batch, GENX(HALF_SLICE_CHICKEN7), hsc7) {
226
hsc7.EnabledTexelOffsetPrecisionFix = true;
227
hsc7.EnabledTexelOffsetPrecisionFixMask = true;
228
}
229
230
anv_batch_write_reg(&batch, GENX(TCCNTLREG), tcc) {
231
tcc.L3DataPartialWriteMergingEnable = true;
232
tcc.ColorZPartialWriteMergingEnable = true;
233
tcc.URBPartialWriteMergingEnable = true;
234
tcc.TCDisable = true;
235
}
236
#endif
237
genX(emit_slice_hashing_state)(device, &batch);
238
239
#if GFX_VER >= 11
240
/* hardware specification recommends disabling repacking for
241
* the compatibility with decompression mechanism in display controller.
242
*/
243
if (device->info.disable_ccs_repack) {
244
anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) {
245
cm0.DisableRepackingforCompression = true;
246
cm0.DisableRepackingforCompressionMask = true;
247
}
248
}
249
250
/* an unknown issue is causing vs push constants to become
251
* corrupted during object-level preemption. For now, restrict
252
* to command buffer level preemption to avoid rendering
253
* corruption.
254
*/
255
anv_batch_write_reg(&batch, GENX(CS_CHICKEN1), cc1) {
256
cc1.ReplayMode = MidcmdbufferPreemption;
257
cc1.ReplayModeMask = true;
258
}
259
#endif
260
261
#if GFX_VER == 12
262
if (device->info.has_aux_map) {
263
uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);
264
assert(aux_base_addr % (32 * 1024) == 0);
265
anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
266
lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num);
267
lri.DataDWord = aux_base_addr & 0xffffffff;
268
}
269
anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
270
lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num) + 4;
271
lri.DataDWord = aux_base_addr >> 32;
272
}
273
}
274
#endif
275
276
/* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
277
* 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
278
*
279
* This is only safe on kernels with context isolation support.
280
*/
281
if (GFX_VER >= 8 && device->physical->has_context_isolation) {
282
#if GFX_VER >= 9
283
anv_batch_write_reg(&batch, GENX(CS_DEBUG_MODE2), csdm2) {
284
csdm2.CONSTANT_BUFFERAddressOffsetDisable = true;
285
csdm2.CONSTANT_BUFFERAddressOffsetDisableMask = true;
286
}
287
#elif GFX_VER == 8
288
anv_batch_write_reg(&batch, GENX(INSTPM), instpm) {
289
instpm.CONSTANT_BUFFERAddressOffsetDisable = true;
290
instpm.CONSTANT_BUFFERAddressOffsetDisableMask = true;
291
}
292
#endif
293
}
294
295
#if GFX_VER >= 11
296
/* Starting with GFX version 11, SLM is no longer part of the L3$ config
297
* so it never changes throughout the lifetime of the VkDevice.
298
*/
299
const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info);
300
genX(emit_l3_config)(&batch, device, cfg);
301
device->l3_config = cfg;
302
#endif
303
304
anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
305
306
assert(batch.next <= batch.end);
307
308
return anv_queue_submit_simple_batch(queue, &batch);
309
}
310
311
void
312
genX(init_physical_device_state)(ASSERTED struct anv_physical_device *device)
313
{
314
assert(device->info.verx10 == GFX_VERx10);
315
}
316
317
VkResult
318
genX(init_device_state)(struct anv_device *device)
319
{
320
VkResult res;
321
322
for (uint32_t i = 0; i < device->queue_count; i++) {
323
struct anv_queue *queue = &device->queues[i];
324
switch (queue->family->engine_class) {
325
case I915_ENGINE_CLASS_RENDER:
326
res = init_render_queue_state(queue);
327
break;
328
default:
329
res = vk_error(VK_ERROR_INITIALIZATION_FAILED);
330
break;
331
}
332
if (res != VK_SUCCESS)
333
return res;
334
}
335
336
return res;
337
}
338
339
void
340
genX(emit_l3_config)(struct anv_batch *batch,
341
const struct anv_device *device,
342
const struct intel_l3_config *cfg)
343
{
344
UNUSED const struct intel_device_info *devinfo = &device->info;
345
346
#if GFX_VER >= 8
347
348
#if GFX_VER >= 12
349
#define L3_ALLOCATION_REG GENX(L3ALLOC)
350
#define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
351
#else
352
#define L3_ALLOCATION_REG GENX(L3CNTLREG)
353
#define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
354
#endif
355
356
anv_batch_write_reg(batch, L3_ALLOCATION_REG, l3cr) {
357
if (cfg == NULL) {
358
#if GFX_VER >= 12
359
l3cr.L3FullWayAllocationEnable = true;
360
#else
361
unreachable("Invalid L3$ config");
362
#endif
363
} else {
364
#if GFX_VER < 11
365
l3cr.SLMEnable = cfg->n[INTEL_L3P_SLM];
366
#endif
367
#if GFX_VER == 11
368
/* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be
369
* set in L3CNTLREG register. The default setting of the bit is not
370
* the desirable behavior.
371
*/
372
l3cr.ErrorDetectionBehaviorControl = true;
373
l3cr.UseFullWays = true;
374
#endif /* GFX_VER == 11 */
375
assert(cfg->n[INTEL_L3P_IS] == 0);
376
assert(cfg->n[INTEL_L3P_C] == 0);
377
assert(cfg->n[INTEL_L3P_T] == 0);
378
l3cr.URBAllocation = cfg->n[INTEL_L3P_URB];
379
l3cr.ROAllocation = cfg->n[INTEL_L3P_RO];
380
l3cr.DCAllocation = cfg->n[INTEL_L3P_DC];
381
l3cr.AllAllocation = cfg->n[INTEL_L3P_ALL];
382
}
383
}
384
385
#else /* GFX_VER < 8 */
386
387
const bool has_dc = cfg->n[INTEL_L3P_DC] || cfg->n[INTEL_L3P_ALL];
388
const bool has_is = cfg->n[INTEL_L3P_IS] || cfg->n[INTEL_L3P_RO] ||
389
cfg->n[INTEL_L3P_ALL];
390
const bool has_c = cfg->n[INTEL_L3P_C] || cfg->n[INTEL_L3P_RO] ||
391
cfg->n[INTEL_L3P_ALL];
392
const bool has_t = cfg->n[INTEL_L3P_T] || cfg->n[INTEL_L3P_RO] ||
393
cfg->n[INTEL_L3P_ALL];
394
395
assert(!cfg->n[INTEL_L3P_ALL]);
396
397
/* When enabled SLM only uses a portion of the L3 on half of the banks,
398
* the matching space on the remaining banks has to be allocated to a
399
* client (URB for all validated configurations) set to the
400
* lower-bandwidth 2-bank address hashing mode.
401
*/
402
const bool urb_low_bw = cfg->n[INTEL_L3P_SLM] && !devinfo->is_baytrail;
403
assert(!urb_low_bw || cfg->n[INTEL_L3P_URB] == cfg->n[INTEL_L3P_SLM]);
404
405
/* Minimum number of ways that can be allocated to the URB. */
406
const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
407
assert(cfg->n[INTEL_L3P_URB] >= n0_urb);
408
409
anv_batch_write_reg(batch, GENX(L3SQCREG1), l3sqc) {
410
l3sqc.ConvertDC_UC = !has_dc;
411
l3sqc.ConvertIS_UC = !has_is;
412
l3sqc.ConvertC_UC = !has_c;
413
l3sqc.ConvertT_UC = !has_t;
414
#if GFX_VERx10 == 75
415
l3sqc.L3SQGeneralPriorityCreditInitialization = SQGPCI_DEFAULT;
416
#else
417
l3sqc.L3SQGeneralPriorityCreditInitialization =
418
devinfo->is_baytrail ? BYT_SQGPCI_DEFAULT : SQGPCI_DEFAULT;
419
#endif
420
l3sqc.L3SQHighPriorityCreditInitialization = SQHPCI_DEFAULT;
421
}
422
423
anv_batch_write_reg(batch, GENX(L3CNTLREG2), l3cr2) {
424
l3cr2.SLMEnable = cfg->n[INTEL_L3P_SLM];
425
l3cr2.URBLowBandwidth = urb_low_bw;
426
l3cr2.URBAllocation = cfg->n[INTEL_L3P_URB] - n0_urb;
427
#if !GFX_VERx10 == 75
428
l3cr2.ALLAllocation = cfg->n[INTEL_L3P_ALL];
429
#endif
430
l3cr2.ROAllocation = cfg->n[INTEL_L3P_RO];
431
l3cr2.DCAllocation = cfg->n[INTEL_L3P_DC];
432
}
433
434
anv_batch_write_reg(batch, GENX(L3CNTLREG3), l3cr3) {
435
l3cr3.ISAllocation = cfg->n[INTEL_L3P_IS];
436
l3cr3.ISLowBandwidth = 0;
437
l3cr3.CAllocation = cfg->n[INTEL_L3P_C];
438
l3cr3.CLowBandwidth = 0;
439
l3cr3.TAllocation = cfg->n[INTEL_L3P_T];
440
l3cr3.TLowBandwidth = 0;
441
}
442
443
#if GFX_VERx10 == 75
444
if (device->physical->cmd_parser_version >= 4) {
445
/* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
446
* them disabled to avoid crashing the system hard.
447
*/
448
anv_batch_write_reg(batch, GENX(SCRATCH1), s1) {
449
s1.L3AtomicDisable = !has_dc;
450
}
451
anv_batch_write_reg(batch, GENX(CHICKEN3), c3) {
452
c3.L3AtomicDisableMask = true;
453
c3.L3AtomicDisable = !has_dc;
454
}
455
}
456
#endif /* GFX_VERx10 == 75 */
457
458
#endif /* GFX_VER < 8 */
459
}
460
461
void
462
genX(emit_multisample)(struct anv_batch *batch, uint32_t samples,
463
const VkSampleLocationEXT *locations)
464
{
465
anv_batch_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
466
ms.NumberofMultisamples = __builtin_ffs(samples) - 1;
467
468
ms.PixelLocation = CENTER;
469
#if GFX_VER >= 8
470
/* The PRM says that this bit is valid only for DX9:
471
*
472
* SW can choose to set this bit only for DX9 API. DX10/OGL API's
473
* should not have any effect by setting or not setting this bit.
474
*/
475
ms.PixelPositionOffsetEnable = false;
476
#else
477
478
if (locations) {
479
switch (samples) {
480
case 1:
481
INTEL_SAMPLE_POS_1X_ARRAY(ms.Sample, locations);
482
break;
483
case 2:
484
INTEL_SAMPLE_POS_2X_ARRAY(ms.Sample, locations);
485
break;
486
case 4:
487
INTEL_SAMPLE_POS_4X_ARRAY(ms.Sample, locations);
488
break;
489
case 8:
490
INTEL_SAMPLE_POS_8X_ARRAY(ms.Sample, locations);
491
break;
492
default:
493
break;
494
}
495
} else {
496
switch (samples) {
497
case 1:
498
INTEL_SAMPLE_POS_1X(ms.Sample);
499
break;
500
case 2:
501
INTEL_SAMPLE_POS_2X(ms.Sample);
502
break;
503
case 4:
504
INTEL_SAMPLE_POS_4X(ms.Sample);
505
break;
506
case 8:
507
INTEL_SAMPLE_POS_8X(ms.Sample);
508
break;
509
default:
510
break;
511
}
512
}
513
#endif
514
}
515
}
516
517
#if GFX_VER >= 8
518
void
519
genX(emit_sample_pattern)(struct anv_batch *batch, uint32_t samples,
520
const VkSampleLocationEXT *locations)
521
{
522
/* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and
523
* VkPhysicalDeviceFeatures::standardSampleLocations.
524
*/
525
anv_batch_emit(batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) {
526
if (locations) {
527
/* The Skylake PRM Vol. 2a "3DSTATE_SAMPLE_PATTERN" says:
528
*
529
* "When programming the sample offsets (for NUMSAMPLES_4 or _8
530
* and MSRASTMODE_xxx_PATTERN), the order of the samples 0 to 3
531
* (or 7 for 8X, or 15 for 16X) must have monotonically increasing
532
* distance from the pixel center. This is required to get the
533
* correct centroid computation in the device."
534
*
535
* However, the Vulkan spec seems to require that the the samples
536
* occur in the order provided through the API. The standard sample
537
* patterns have the above property that they have monotonically
538
* increasing distances from the center but client-provided ones do
539
* not. As long as this only affects centroid calculations as the
540
* docs say, we should be ok because OpenGL and Vulkan only require
541
* that the centroid be some lit sample and that it's the same for
542
* all samples in a pixel; they have no requirement that it be the
543
* one closest to center.
544
*/
545
switch (samples) {
546
case 1:
547
INTEL_SAMPLE_POS_1X_ARRAY(sp._1xSample, locations);
548
break;
549
case 2:
550
INTEL_SAMPLE_POS_2X_ARRAY(sp._2xSample, locations);
551
break;
552
case 4:
553
INTEL_SAMPLE_POS_4X_ARRAY(sp._4xSample, locations);
554
break;
555
case 8:
556
INTEL_SAMPLE_POS_8X_ARRAY(sp._8xSample, locations);
557
break;
558
#if GFX_VER >= 9
559
case 16:
560
INTEL_SAMPLE_POS_16X_ARRAY(sp._16xSample, locations);
561
break;
562
#endif
563
default:
564
break;
565
}
566
} else {
567
INTEL_SAMPLE_POS_1X(sp._1xSample);
568
INTEL_SAMPLE_POS_2X(sp._2xSample);
569
INTEL_SAMPLE_POS_4X(sp._4xSample);
570
INTEL_SAMPLE_POS_8X(sp._8xSample);
571
#if GFX_VER >= 9
572
INTEL_SAMPLE_POS_16X(sp._16xSample);
573
#endif
574
}
575
}
576
}
577
#endif
578
579
#if GFX_VER >= 11
580
void
581
genX(emit_shading_rate)(struct anv_batch *batch,
582
const struct anv_graphics_pipeline *pipeline,
583
struct anv_state cps_states,
584
struct anv_dynamic_state *dynamic_state)
585
{
586
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
587
const bool cps_enable = wm_prog_data && wm_prog_data->per_coarse_pixel_dispatch;
588
589
#if GFX_VER == 11
590
anv_batch_emit(batch, GENX(3DSTATE_CPS), cps) {
591
cps.CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE;
592
if (cps_enable) {
593
cps.MinCPSizeX = dynamic_state->fragment_shading_rate.width;
594
cps.MinCPSizeY = dynamic_state->fragment_shading_rate.height;
595
}
596
}
597
#elif GFX_VER == 12
598
for (uint32_t i = 0; i < dynamic_state->viewport.count; i++) {
599
uint32_t *cps_state_dwords =
600
cps_states.map + GENX(CPS_STATE_length) * 4 * i;
601
struct GENX(CPS_STATE) cps_state = {
602
.CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE,
603
};
604
605
if (cps_enable) {
606
cps_state.MinCPSizeX = dynamic_state->fragment_shading_rate.width;
607
cps_state.MinCPSizeY = dynamic_state->fragment_shading_rate.height;
608
}
609
610
GENX(CPS_STATE_pack)(NULL, cps_state_dwords, &cps_state);
611
}
612
613
anv_batch_emit(batch, GENX(3DSTATE_CPS_POINTERS), cps) {
614
cps.CoarsePixelShadingStateArrayPointer = cps_states.offset;
615
}
616
#endif
617
}
618
#endif /* GFX_VER >= 11 */
619
620
static uint32_t
621
vk_to_intel_tex_filter(VkFilter filter, bool anisotropyEnable)
622
{
623
switch (filter) {
624
default:
625
assert(!"Invalid filter");
626
case VK_FILTER_NEAREST:
627
return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_NEAREST;
628
case VK_FILTER_LINEAR:
629
return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_LINEAR;
630
}
631
}
632
633
static uint32_t
634
vk_to_intel_max_anisotropy(float ratio)
635
{
636
return (anv_clamp_f(ratio, 2, 16) - 2) / 2;
637
}
638
639
static const uint32_t vk_to_intel_mipmap_mode[] = {
640
[VK_SAMPLER_MIPMAP_MODE_NEAREST] = MIPFILTER_NEAREST,
641
[VK_SAMPLER_MIPMAP_MODE_LINEAR] = MIPFILTER_LINEAR
642
};
643
644
static const uint32_t vk_to_intel_tex_address[] = {
645
[VK_SAMPLER_ADDRESS_MODE_REPEAT] = TCM_WRAP,
646
[VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = TCM_MIRROR,
647
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE] = TCM_CLAMP,
648
[VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
649
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
650
};
651
652
/* Vulkan specifies the result of shadow comparisons as:
653
* 1 if ref <op> texel,
654
* 0 otherwise.
655
*
656
* The hardware does:
657
* 0 if texel <op> ref,
658
* 1 otherwise.
659
*
660
* So, these look a bit strange because there's both a negation
661
* and swapping of the arguments involved.
662
*/
663
static const uint32_t vk_to_intel_shadow_compare_op[] = {
664
[VK_COMPARE_OP_NEVER] = PREFILTEROP_ALWAYS,
665
[VK_COMPARE_OP_LESS] = PREFILTEROP_LEQUAL,
666
[VK_COMPARE_OP_EQUAL] = PREFILTEROP_NOTEQUAL,
667
[VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROP_LESS,
668
[VK_COMPARE_OP_GREATER] = PREFILTEROP_GEQUAL,
669
[VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROP_EQUAL,
670
[VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROP_GREATER,
671
[VK_COMPARE_OP_ALWAYS] = PREFILTEROP_NEVER,
672
};
673
674
#if GFX_VER >= 9
675
static const uint32_t vk_to_intel_sampler_reduction_mode[] = {
676
[VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT] = STD_FILTER,
677
[VK_SAMPLER_REDUCTION_MODE_MIN_EXT] = MINIMUM,
678
[VK_SAMPLER_REDUCTION_MODE_MAX_EXT] = MAXIMUM,
679
};
680
#endif
681
682
VkResult genX(CreateSampler)(
683
VkDevice _device,
684
const VkSamplerCreateInfo* pCreateInfo,
685
const VkAllocationCallbacks* pAllocator,
686
VkSampler* pSampler)
687
{
688
ANV_FROM_HANDLE(anv_device, device, _device);
689
struct anv_sampler *sampler;
690
691
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
692
693
sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
694
VK_OBJECT_TYPE_SAMPLER);
695
if (!sampler)
696
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
697
698
sampler->n_planes = 1;
699
700
uint32_t border_color_stride = GFX_VERx10 == 75 ? 512 : 64;
701
uint32_t border_color_offset;
702
ASSERTED bool has_custom_color = false;
703
if (pCreateInfo->borderColor <= VK_BORDER_COLOR_INT_OPAQUE_WHITE) {
704
border_color_offset = device->border_colors.offset +
705
pCreateInfo->borderColor *
706
border_color_stride;
707
} else {
708
assert(GFX_VER >= 8);
709
sampler->custom_border_color =
710
anv_state_reserved_pool_alloc(&device->custom_border_colors);
711
border_color_offset = sampler->custom_border_color.offset;
712
}
713
714
#if GFX_VER >= 9
715
unsigned sampler_reduction_mode = STD_FILTER;
716
bool enable_sampler_reduction = false;
717
#endif
718
719
vk_foreach_struct(ext, pCreateInfo->pNext) {
720
switch (ext->sType) {
721
case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {
722
VkSamplerYcbcrConversionInfo *pSamplerConversion =
723
(VkSamplerYcbcrConversionInfo *) ext;
724
ANV_FROM_HANDLE(anv_ycbcr_conversion, conversion,
725
pSamplerConversion->conversion);
726
727
/* Ignore conversion for non-YUV formats. This fulfills a requirement
728
* for clients that want to utilize same code path for images with
729
* external formats (VK_FORMAT_UNDEFINED) and "regular" RGBA images
730
* where format is known.
731
*/
732
if (conversion == NULL || !conversion->format->can_ycbcr)
733
break;
734
735
sampler->n_planes = conversion->format->n_planes;
736
sampler->conversion = conversion;
737
break;
738
}
739
#if GFX_VER >= 9
740
case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO: {
741
VkSamplerReductionModeCreateInfo *sampler_reduction =
742
(VkSamplerReductionModeCreateInfo *) ext;
743
sampler_reduction_mode =
744
vk_to_intel_sampler_reduction_mode[sampler_reduction->reductionMode];
745
enable_sampler_reduction = true;
746
break;
747
}
748
#endif
749
case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: {
750
VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =
751
(VkSamplerCustomBorderColorCreateInfoEXT *) ext;
752
if (sampler->custom_border_color.map == NULL)
753
break;
754
struct gfx8_border_color *cbc = sampler->custom_border_color.map;
755
if (custom_border_color->format == VK_FORMAT_B4G4R4A4_UNORM_PACK16) {
756
/* B4G4R4A4_UNORM_PACK16 is treated as R4G4B4A4_UNORM_PACK16 with
757
* a swizzle, but this does not carry over to the sampler for
758
* border colors, so we need to do the swizzle ourselves here.
759
*/
760
cbc->uint32[0] = custom_border_color->customBorderColor.uint32[2];
761
cbc->uint32[1] = custom_border_color->customBorderColor.uint32[1];
762
cbc->uint32[2] = custom_border_color->customBorderColor.uint32[0];
763
cbc->uint32[3] = custom_border_color->customBorderColor.uint32[3];
764
} else {
765
/* Both structs share the same layout, so just copy them over. */
766
memcpy(cbc, &custom_border_color->customBorderColor,
767
sizeof(VkClearColorValue));
768
}
769
has_custom_color = true;
770
break;
771
}
772
default:
773
anv_debug_ignored_stype(ext->sType);
774
break;
775
}
776
}
777
778
assert((sampler->custom_border_color.map == NULL) || has_custom_color);
779
780
if (device->physical->has_bindless_samplers) {
781
/* If we have bindless, allocate enough samplers. We allocate 32 bytes
782
* for each sampler instead of 16 bytes because we want all bindless
783
* samplers to be 32-byte aligned so we don't have to use indirect
784
* sampler messages on them.
785
*/
786
sampler->bindless_state =
787
anv_state_pool_alloc(&device->dynamic_state_pool,
788
sampler->n_planes * 32, 32);
789
}
790
791
for (unsigned p = 0; p < sampler->n_planes; p++) {
792
const bool plane_has_chroma =
793
sampler->conversion && sampler->conversion->format->planes[p].has_chroma;
794
const VkFilter min_filter =
795
plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->minFilter;
796
const VkFilter mag_filter =
797
plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->magFilter;
798
const bool enable_min_filter_addr_rounding = min_filter != VK_FILTER_NEAREST;
799
const bool enable_mag_filter_addr_rounding = mag_filter != VK_FILTER_NEAREST;
800
/* From Broadwell PRM, SAMPLER_STATE:
801
* "Mip Mode Filter must be set to MIPFILTER_NONE for Planar YUV surfaces."
802
*/
803
const bool isl_format_is_planar_yuv = sampler->conversion &&
804
isl_format_is_yuv(sampler->conversion->format->planes[0].isl_format) &&
805
isl_format_is_planar(sampler->conversion->format->planes[0].isl_format);
806
807
const uint32_t mip_filter_mode =
808
isl_format_is_planar_yuv ?
809
MIPFILTER_NONE : vk_to_intel_mipmap_mode[pCreateInfo->mipmapMode];
810
811
struct GENX(SAMPLER_STATE) sampler_state = {
812
.SamplerDisable = false,
813
.TextureBorderColorMode = DX10OGL,
814
815
#if GFX_VER >= 11
816
.CPSLODCompensationEnable = true,
817
#endif
818
819
#if GFX_VER >= 8
820
.LODPreClampMode = CLAMP_MODE_OGL,
821
#else
822
.LODPreClampEnable = CLAMP_ENABLE_OGL,
823
#endif
824
825
#if GFX_VER == 8
826
.BaseMipLevel = 0.0,
827
#endif
828
.MipModeFilter = mip_filter_mode,
829
.MagModeFilter = vk_to_intel_tex_filter(mag_filter, pCreateInfo->anisotropyEnable),
830
.MinModeFilter = vk_to_intel_tex_filter(min_filter, pCreateInfo->anisotropyEnable),
831
.TextureLODBias = anv_clamp_f(pCreateInfo->mipLodBias, -16, 15.996),
832
.AnisotropicAlgorithm =
833
pCreateInfo->anisotropyEnable ? EWAApproximation : LEGACY,
834
.MinLOD = anv_clamp_f(pCreateInfo->minLod, 0, 14),
835
.MaxLOD = anv_clamp_f(pCreateInfo->maxLod, 0, 14),
836
.ChromaKeyEnable = 0,
837
.ChromaKeyIndex = 0,
838
.ChromaKeyMode = 0,
839
.ShadowFunction =
840
vk_to_intel_shadow_compare_op[pCreateInfo->compareEnable ?
841
pCreateInfo->compareOp : VK_COMPARE_OP_NEVER],
842
.CubeSurfaceControlMode = OVERRIDE,
843
844
.BorderColorPointer = border_color_offset,
845
846
#if GFX_VER >= 8
847
.LODClampMagnificationMode = MIPNONE,
848
#endif
849
850
.MaximumAnisotropy = vk_to_intel_max_anisotropy(pCreateInfo->maxAnisotropy),
851
.RAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
852
.RAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
853
.VAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
854
.VAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
855
.UAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
856
.UAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
857
.TrilinearFilterQuality = 0,
858
.NonnormalizedCoordinateEnable = pCreateInfo->unnormalizedCoordinates,
859
.TCXAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeU],
860
.TCYAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeV],
861
.TCZAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeW],
862
863
#if GFX_VER >= 9
864
.ReductionType = sampler_reduction_mode,
865
.ReductionTypeEnable = enable_sampler_reduction,
866
#endif
867
};
868
869
GENX(SAMPLER_STATE_pack)(NULL, sampler->state[p], &sampler_state);
870
871
if (sampler->bindless_state.map) {
872
memcpy(sampler->bindless_state.map + p * 32,
873
sampler->state[p], GENX(SAMPLER_STATE_length) * 4);
874
}
875
}
876
877
*pSampler = anv_sampler_to_handle(sampler);
878
879
return VK_SUCCESS;
880
}
881
882