Path: blob/21.2-virgl/src/intel/vulkan/genX_state.c
4547 views
/*1* Copyright © 2015 Intel Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*/2223#include <assert.h>24#include <stdbool.h>25#include <string.h>26#include <unistd.h>27#include <fcntl.h>2829#include "anv_private.h"3031#include "common/intel_aux_map.h"32#include "common/intel_sample_positions.h"33#include "genxml/gen_macros.h"34#include "genxml/genX_pack.h"3536#include "vk_util.h"3738/**39* Compute an \p n x \p m pixel hashing table usable as slice, subslice or40* pixel pipe hashing table. The resulting table is the cyclic repetition of41* a fixed pattern with periodicity equal to \p period.42*43* If \p index is specified to be equal to \p period, a 2-way hashing table44* will be generated such that indices 0 and 1 are returned for the following45* fractions of entries respectively:46*47* p_0 = ceil(period / 2) / period48* p_1 = floor(period / 2) / period49*50* If \p index is even and less than \p period, a 3-way hashing table will be51* generated such that indices 0, 1 and 2 are returned for the following52* fractions of entries:53*54* p_0 = (ceil(period / 2) - 1) / period55* p_1 = floor(period / 2) / period56* p_2 = 1 / period57*58* The equations above apply if \p flip is equal to 0, if it is equal to 1 p_059* and p_1 will be swapped for the result. Note that in the context of pixel60* pipe hashing this can be always 0 on Gfx12 platforms, since the hardware61* transparently remaps logical indices found on the table to physical pixel62* pipe indices from the highest to lowest EU count.63*/64UNUSED static void65calculate_pixel_hashing_table(unsigned n, unsigned m,66unsigned period, unsigned index, bool flip,67uint32_t *p)68{69for (unsigned i = 0; i < n; i++) {70for (unsigned j = 0; j < m; j++) {71const unsigned k = (i + j) % period;72p[j + m * i] = (k == index ? 2 : (k & 1) ^ flip);73}74}75}7677static void78genX(emit_slice_hashing_state)(struct anv_device *device,79struct anv_batch *batch)80{81device->slice_hash = (struct anv_state) { 0 };8283#if GFX_VER == 1184assert(device->info.ppipe_subslices[2] == 0);8586if (device->info.ppipe_subslices[0] == device->info.ppipe_subslices[1])87return;8889unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;90device->slice_hash =91anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);9293const bool flip = device->info.ppipe_subslices[0] <94device->info.ppipe_subslices[1];95struct GENX(SLICE_HASH_TABLE) table;96calculate_pixel_hashing_table(16, 16, 3, 3, flip, table.Entry[0]);9798GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, &table);99100anv_batch_emit(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {101ptr.SliceHashStatePointerValid = true;102ptr.SliceHashTableStatePointer = device->slice_hash.offset;103}104105anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), mode) {106mode.SliceHashingTableEnable = true;107}108#elif GFX_VERx10 == 120109/* For each n calculate ppipes_of[n], equal to the number of pixel pipes110* present with n active dual subslices.111*/112unsigned ppipes_of[3] = {};113114for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {115for (unsigned p = 0; p < ARRAY_SIZE(device->info.ppipe_subslices); p++)116ppipes_of[n] += (device->info.ppipe_subslices[p] == n);117}118119/* Gfx12 has three pixel pipes. */120assert(ppipes_of[0] + ppipes_of[1] + ppipes_of[2] == 3);121122if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {123/* All three pixel pipes have the maximum number of active dual124* subslices, or there is only one active pixel pipe: Nothing to do.125*/126return;127}128129anv_batch_emit(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {130p.SliceHashControl[0] = TABLE_0;131132if (ppipes_of[2] == 2 && ppipes_of[0] == 1)133calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);134else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)135calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);136137if (ppipes_of[2] == 2 && ppipes_of[1] == 1)138calculate_pixel_hashing_table(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);139else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)140calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);141else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)142calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);143else144unreachable("Illegal fusing.");145}146147anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), p) {148p.SubsliceHashingTableEnable = true;149p.SubsliceHashingTableEnableMask = true;150}151#endif152}153154static VkResult155init_render_queue_state(struct anv_queue *queue)156{157struct anv_device *device = queue->device;158struct anv_batch batch;159160uint32_t cmds[64];161batch.start = batch.next = cmds;162batch.end = (void *) cmds + sizeof(cmds);163164anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) {165#if GFX_VER >= 9166ps.MaskBits = GFX_VER >= 12 ? 0x13 : 3;167ps.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;168#endif169ps.PipelineSelection = _3D;170}171172#if GFX_VER == 9173anv_batch_write_reg(&batch, GENX(CACHE_MODE_1), cm1) {174cm1.FloatBlendOptimizationEnable = true;175cm1.FloatBlendOptimizationEnableMask = true;176cm1.MSCRAWHazardAvoidanceBit = true;177cm1.MSCRAWHazardAvoidanceBitMask = true;178cm1.PartialResolveDisableInVC = true;179cm1.PartialResolveDisableInVCMask = true;180}181#endif182183anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa);184185anv_batch_emit(&batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {186rect.ClippedDrawingRectangleYMin = 0;187rect.ClippedDrawingRectangleXMin = 0;188rect.ClippedDrawingRectangleYMax = UINT16_MAX;189rect.ClippedDrawingRectangleXMax = UINT16_MAX;190rect.DrawingRectangleOriginY = 0;191rect.DrawingRectangleOriginX = 0;192}193194#if GFX_VER >= 8195anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck);196197genX(emit_sample_pattern)(&batch, 0, NULL);198199/* The BDW+ docs describe how to use the 3DSTATE_WM_HZ_OP instruction in the200* section titled, "Optimized Depth Buffer Clear and/or Stencil Buffer201* Clear." It mentions that the packet overrides GPU state for the clear202* operation and needs to be reset to 0s to clear the overrides. Depending203* on the kernel, we may not get a context with the state for this packet204* zeroed. Do it ourselves just in case. We've observed this to prevent a205* number of GPU hangs on ICL.206*/207anv_batch_emit(&batch, GENX(3DSTATE_WM_HZ_OP), hzp);208#endif209210#if GFX_VER == 11211/* The default behavior of bit 5 "Headerless Message for Pre-emptable212* Contexts" in SAMPLER MODE register is set to 0, which means213* headerless sampler messages are not allowed for pre-emptable214* contexts. Set the bit 5 to 1 to allow them.215*/216anv_batch_write_reg(&batch, GENX(SAMPLER_MODE), sm) {217sm.HeaderlessMessageforPreemptableContexts = true;218sm.HeaderlessMessageforPreemptableContextsMask = true;219}220221/* Bit 1 "Enabled Texel Offset Precision Fix" must be set in222* HALF_SLICE_CHICKEN7 register.223*/224anv_batch_write_reg(&batch, GENX(HALF_SLICE_CHICKEN7), hsc7) {225hsc7.EnabledTexelOffsetPrecisionFix = true;226hsc7.EnabledTexelOffsetPrecisionFixMask = true;227}228229anv_batch_write_reg(&batch, GENX(TCCNTLREG), tcc) {230tcc.L3DataPartialWriteMergingEnable = true;231tcc.ColorZPartialWriteMergingEnable = true;232tcc.URBPartialWriteMergingEnable = true;233tcc.TCDisable = true;234}235#endif236genX(emit_slice_hashing_state)(device, &batch);237238#if GFX_VER >= 11239/* hardware specification recommends disabling repacking for240* the compatibility with decompression mechanism in display controller.241*/242if (device->info.disable_ccs_repack) {243anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) {244cm0.DisableRepackingforCompression = true;245cm0.DisableRepackingforCompressionMask = true;246}247}248249/* an unknown issue is causing vs push constants to become250* corrupted during object-level preemption. For now, restrict251* to command buffer level preemption to avoid rendering252* corruption.253*/254anv_batch_write_reg(&batch, GENX(CS_CHICKEN1), cc1) {255cc1.ReplayMode = MidcmdbufferPreemption;256cc1.ReplayModeMask = true;257}258#endif259260#if GFX_VER == 12261if (device->info.has_aux_map) {262uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);263assert(aux_base_addr % (32 * 1024) == 0);264anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {265lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num);266lri.DataDWord = aux_base_addr & 0xffffffff;267}268anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {269lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num) + 4;270lri.DataDWord = aux_base_addr >> 32;271}272}273#endif274275/* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so276* 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.277*278* This is only safe on kernels with context isolation support.279*/280if (GFX_VER >= 8 && device->physical->has_context_isolation) {281#if GFX_VER >= 9282anv_batch_write_reg(&batch, GENX(CS_DEBUG_MODE2), csdm2) {283csdm2.CONSTANT_BUFFERAddressOffsetDisable = true;284csdm2.CONSTANT_BUFFERAddressOffsetDisableMask = true;285}286#elif GFX_VER == 8287anv_batch_write_reg(&batch, GENX(INSTPM), instpm) {288instpm.CONSTANT_BUFFERAddressOffsetDisable = true;289instpm.CONSTANT_BUFFERAddressOffsetDisableMask = true;290}291#endif292}293294#if GFX_VER >= 11295/* Starting with GFX version 11, SLM is no longer part of the L3$ config296* so it never changes throughout the lifetime of the VkDevice.297*/298const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info);299genX(emit_l3_config)(&batch, device, cfg);300device->l3_config = cfg;301#endif302303anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);304305assert(batch.next <= batch.end);306307return anv_queue_submit_simple_batch(queue, &batch);308}309310void311genX(init_physical_device_state)(ASSERTED struct anv_physical_device *device)312{313assert(device->info.verx10 == GFX_VERx10);314}315316VkResult317genX(init_device_state)(struct anv_device *device)318{319VkResult res;320321for (uint32_t i = 0; i < device->queue_count; i++) {322struct anv_queue *queue = &device->queues[i];323switch (queue->family->engine_class) {324case I915_ENGINE_CLASS_RENDER:325res = init_render_queue_state(queue);326break;327default:328res = vk_error(VK_ERROR_INITIALIZATION_FAILED);329break;330}331if (res != VK_SUCCESS)332return res;333}334335return res;336}337338void339genX(emit_l3_config)(struct anv_batch *batch,340const struct anv_device *device,341const struct intel_l3_config *cfg)342{343UNUSED const struct intel_device_info *devinfo = &device->info;344345#if GFX_VER >= 8346347#if GFX_VER >= 12348#define L3_ALLOCATION_REG GENX(L3ALLOC)349#define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)350#else351#define L3_ALLOCATION_REG GENX(L3CNTLREG)352#define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)353#endif354355anv_batch_write_reg(batch, L3_ALLOCATION_REG, l3cr) {356if (cfg == NULL) {357#if GFX_VER >= 12358l3cr.L3FullWayAllocationEnable = true;359#else360unreachable("Invalid L3$ config");361#endif362} else {363#if GFX_VER < 11364l3cr.SLMEnable = cfg->n[INTEL_L3P_SLM];365#endif366#if GFX_VER == 11367/* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be368* set in L3CNTLREG register. The default setting of the bit is not369* the desirable behavior.370*/371l3cr.ErrorDetectionBehaviorControl = true;372l3cr.UseFullWays = true;373#endif /* GFX_VER == 11 */374assert(cfg->n[INTEL_L3P_IS] == 0);375assert(cfg->n[INTEL_L3P_C] == 0);376assert(cfg->n[INTEL_L3P_T] == 0);377l3cr.URBAllocation = cfg->n[INTEL_L3P_URB];378l3cr.ROAllocation = cfg->n[INTEL_L3P_RO];379l3cr.DCAllocation = cfg->n[INTEL_L3P_DC];380l3cr.AllAllocation = cfg->n[INTEL_L3P_ALL];381}382}383384#else /* GFX_VER < 8 */385386const bool has_dc = cfg->n[INTEL_L3P_DC] || cfg->n[INTEL_L3P_ALL];387const bool has_is = cfg->n[INTEL_L3P_IS] || cfg->n[INTEL_L3P_RO] ||388cfg->n[INTEL_L3P_ALL];389const bool has_c = cfg->n[INTEL_L3P_C] || cfg->n[INTEL_L3P_RO] ||390cfg->n[INTEL_L3P_ALL];391const bool has_t = cfg->n[INTEL_L3P_T] || cfg->n[INTEL_L3P_RO] ||392cfg->n[INTEL_L3P_ALL];393394assert(!cfg->n[INTEL_L3P_ALL]);395396/* When enabled SLM only uses a portion of the L3 on half of the banks,397* the matching space on the remaining banks has to be allocated to a398* client (URB for all validated configurations) set to the399* lower-bandwidth 2-bank address hashing mode.400*/401const bool urb_low_bw = cfg->n[INTEL_L3P_SLM] && !devinfo->is_baytrail;402assert(!urb_low_bw || cfg->n[INTEL_L3P_URB] == cfg->n[INTEL_L3P_SLM]);403404/* Minimum number of ways that can be allocated to the URB. */405const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;406assert(cfg->n[INTEL_L3P_URB] >= n0_urb);407408anv_batch_write_reg(batch, GENX(L3SQCREG1), l3sqc) {409l3sqc.ConvertDC_UC = !has_dc;410l3sqc.ConvertIS_UC = !has_is;411l3sqc.ConvertC_UC = !has_c;412l3sqc.ConvertT_UC = !has_t;413#if GFX_VERx10 == 75414l3sqc.L3SQGeneralPriorityCreditInitialization = SQGPCI_DEFAULT;415#else416l3sqc.L3SQGeneralPriorityCreditInitialization =417devinfo->is_baytrail ? BYT_SQGPCI_DEFAULT : SQGPCI_DEFAULT;418#endif419l3sqc.L3SQHighPriorityCreditInitialization = SQHPCI_DEFAULT;420}421422anv_batch_write_reg(batch, GENX(L3CNTLREG2), l3cr2) {423l3cr2.SLMEnable = cfg->n[INTEL_L3P_SLM];424l3cr2.URBLowBandwidth = urb_low_bw;425l3cr2.URBAllocation = cfg->n[INTEL_L3P_URB] - n0_urb;426#if !GFX_VERx10 == 75427l3cr2.ALLAllocation = cfg->n[INTEL_L3P_ALL];428#endif429l3cr2.ROAllocation = cfg->n[INTEL_L3P_RO];430l3cr2.DCAllocation = cfg->n[INTEL_L3P_DC];431}432433anv_batch_write_reg(batch, GENX(L3CNTLREG3), l3cr3) {434l3cr3.ISAllocation = cfg->n[INTEL_L3P_IS];435l3cr3.ISLowBandwidth = 0;436l3cr3.CAllocation = cfg->n[INTEL_L3P_C];437l3cr3.CLowBandwidth = 0;438l3cr3.TAllocation = cfg->n[INTEL_L3P_T];439l3cr3.TLowBandwidth = 0;440}441442#if GFX_VERx10 == 75443if (device->physical->cmd_parser_version >= 4) {444/* Enable L3 atomics on HSW if we have a DC partition, otherwise keep445* them disabled to avoid crashing the system hard.446*/447anv_batch_write_reg(batch, GENX(SCRATCH1), s1) {448s1.L3AtomicDisable = !has_dc;449}450anv_batch_write_reg(batch, GENX(CHICKEN3), c3) {451c3.L3AtomicDisableMask = true;452c3.L3AtomicDisable = !has_dc;453}454}455#endif /* GFX_VERx10 == 75 */456457#endif /* GFX_VER < 8 */458}459460void461genX(emit_multisample)(struct anv_batch *batch, uint32_t samples,462const VkSampleLocationEXT *locations)463{464anv_batch_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {465ms.NumberofMultisamples = __builtin_ffs(samples) - 1;466467ms.PixelLocation = CENTER;468#if GFX_VER >= 8469/* The PRM says that this bit is valid only for DX9:470*471* SW can choose to set this bit only for DX9 API. DX10/OGL API's472* should not have any effect by setting or not setting this bit.473*/474ms.PixelPositionOffsetEnable = false;475#else476477if (locations) {478switch (samples) {479case 1:480INTEL_SAMPLE_POS_1X_ARRAY(ms.Sample, locations);481break;482case 2:483INTEL_SAMPLE_POS_2X_ARRAY(ms.Sample, locations);484break;485case 4:486INTEL_SAMPLE_POS_4X_ARRAY(ms.Sample, locations);487break;488case 8:489INTEL_SAMPLE_POS_8X_ARRAY(ms.Sample, locations);490break;491default:492break;493}494} else {495switch (samples) {496case 1:497INTEL_SAMPLE_POS_1X(ms.Sample);498break;499case 2:500INTEL_SAMPLE_POS_2X(ms.Sample);501break;502case 4:503INTEL_SAMPLE_POS_4X(ms.Sample);504break;505case 8:506INTEL_SAMPLE_POS_8X(ms.Sample);507break;508default:509break;510}511}512#endif513}514}515516#if GFX_VER >= 8517void518genX(emit_sample_pattern)(struct anv_batch *batch, uint32_t samples,519const VkSampleLocationEXT *locations)520{521/* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and522* VkPhysicalDeviceFeatures::standardSampleLocations.523*/524anv_batch_emit(batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) {525if (locations) {526/* The Skylake PRM Vol. 2a "3DSTATE_SAMPLE_PATTERN" says:527*528* "When programming the sample offsets (for NUMSAMPLES_4 or _8529* and MSRASTMODE_xxx_PATTERN), the order of the samples 0 to 3530* (or 7 for 8X, or 15 for 16X) must have monotonically increasing531* distance from the pixel center. This is required to get the532* correct centroid computation in the device."533*534* However, the Vulkan spec seems to require that the the samples535* occur in the order provided through the API. The standard sample536* patterns have the above property that they have monotonically537* increasing distances from the center but client-provided ones do538* not. As long as this only affects centroid calculations as the539* docs say, we should be ok because OpenGL and Vulkan only require540* that the centroid be some lit sample and that it's the same for541* all samples in a pixel; they have no requirement that it be the542* one closest to center.543*/544switch (samples) {545case 1:546INTEL_SAMPLE_POS_1X_ARRAY(sp._1xSample, locations);547break;548case 2:549INTEL_SAMPLE_POS_2X_ARRAY(sp._2xSample, locations);550break;551case 4:552INTEL_SAMPLE_POS_4X_ARRAY(sp._4xSample, locations);553break;554case 8:555INTEL_SAMPLE_POS_8X_ARRAY(sp._8xSample, locations);556break;557#if GFX_VER >= 9558case 16:559INTEL_SAMPLE_POS_16X_ARRAY(sp._16xSample, locations);560break;561#endif562default:563break;564}565} else {566INTEL_SAMPLE_POS_1X(sp._1xSample);567INTEL_SAMPLE_POS_2X(sp._2xSample);568INTEL_SAMPLE_POS_4X(sp._4xSample);569INTEL_SAMPLE_POS_8X(sp._8xSample);570#if GFX_VER >= 9571INTEL_SAMPLE_POS_16X(sp._16xSample);572#endif573}574}575}576#endif577578#if GFX_VER >= 11579void580genX(emit_shading_rate)(struct anv_batch *batch,581const struct anv_graphics_pipeline *pipeline,582struct anv_state cps_states,583struct anv_dynamic_state *dynamic_state)584{585const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);586const bool cps_enable = wm_prog_data && wm_prog_data->per_coarse_pixel_dispatch;587588#if GFX_VER == 11589anv_batch_emit(batch, GENX(3DSTATE_CPS), cps) {590cps.CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE;591if (cps_enable) {592cps.MinCPSizeX = dynamic_state->fragment_shading_rate.width;593cps.MinCPSizeY = dynamic_state->fragment_shading_rate.height;594}595}596#elif GFX_VER == 12597for (uint32_t i = 0; i < dynamic_state->viewport.count; i++) {598uint32_t *cps_state_dwords =599cps_states.map + GENX(CPS_STATE_length) * 4 * i;600struct GENX(CPS_STATE) cps_state = {601.CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE,602};603604if (cps_enable) {605cps_state.MinCPSizeX = dynamic_state->fragment_shading_rate.width;606cps_state.MinCPSizeY = dynamic_state->fragment_shading_rate.height;607}608609GENX(CPS_STATE_pack)(NULL, cps_state_dwords, &cps_state);610}611612anv_batch_emit(batch, GENX(3DSTATE_CPS_POINTERS), cps) {613cps.CoarsePixelShadingStateArrayPointer = cps_states.offset;614}615#endif616}617#endif /* GFX_VER >= 11 */618619static uint32_t620vk_to_intel_tex_filter(VkFilter filter, bool anisotropyEnable)621{622switch (filter) {623default:624assert(!"Invalid filter");625case VK_FILTER_NEAREST:626return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_NEAREST;627case VK_FILTER_LINEAR:628return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_LINEAR;629}630}631632static uint32_t633vk_to_intel_max_anisotropy(float ratio)634{635return (anv_clamp_f(ratio, 2, 16) - 2) / 2;636}637638static const uint32_t vk_to_intel_mipmap_mode[] = {639[VK_SAMPLER_MIPMAP_MODE_NEAREST] = MIPFILTER_NEAREST,640[VK_SAMPLER_MIPMAP_MODE_LINEAR] = MIPFILTER_LINEAR641};642643static const uint32_t vk_to_intel_tex_address[] = {644[VK_SAMPLER_ADDRESS_MODE_REPEAT] = TCM_WRAP,645[VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = TCM_MIRROR,646[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE] = TCM_CLAMP,647[VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,648[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,649};650651/* Vulkan specifies the result of shadow comparisons as:652* 1 if ref <op> texel,653* 0 otherwise.654*655* The hardware does:656* 0 if texel <op> ref,657* 1 otherwise.658*659* So, these look a bit strange because there's both a negation660* and swapping of the arguments involved.661*/662static const uint32_t vk_to_intel_shadow_compare_op[] = {663[VK_COMPARE_OP_NEVER] = PREFILTEROP_ALWAYS,664[VK_COMPARE_OP_LESS] = PREFILTEROP_LEQUAL,665[VK_COMPARE_OP_EQUAL] = PREFILTEROP_NOTEQUAL,666[VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROP_LESS,667[VK_COMPARE_OP_GREATER] = PREFILTEROP_GEQUAL,668[VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROP_EQUAL,669[VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROP_GREATER,670[VK_COMPARE_OP_ALWAYS] = PREFILTEROP_NEVER,671};672673#if GFX_VER >= 9674static const uint32_t vk_to_intel_sampler_reduction_mode[] = {675[VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT] = STD_FILTER,676[VK_SAMPLER_REDUCTION_MODE_MIN_EXT] = MINIMUM,677[VK_SAMPLER_REDUCTION_MODE_MAX_EXT] = MAXIMUM,678};679#endif680681VkResult genX(CreateSampler)(682VkDevice _device,683const VkSamplerCreateInfo* pCreateInfo,684const VkAllocationCallbacks* pAllocator,685VkSampler* pSampler)686{687ANV_FROM_HANDLE(anv_device, device, _device);688struct anv_sampler *sampler;689690assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);691692sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),693VK_OBJECT_TYPE_SAMPLER);694if (!sampler)695return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);696697sampler->n_planes = 1;698699uint32_t border_color_stride = GFX_VERx10 == 75 ? 512 : 64;700uint32_t border_color_offset;701ASSERTED bool has_custom_color = false;702if (pCreateInfo->borderColor <= VK_BORDER_COLOR_INT_OPAQUE_WHITE) {703border_color_offset = device->border_colors.offset +704pCreateInfo->borderColor *705border_color_stride;706} else {707assert(GFX_VER >= 8);708sampler->custom_border_color =709anv_state_reserved_pool_alloc(&device->custom_border_colors);710border_color_offset = sampler->custom_border_color.offset;711}712713#if GFX_VER >= 9714unsigned sampler_reduction_mode = STD_FILTER;715bool enable_sampler_reduction = false;716#endif717718vk_foreach_struct(ext, pCreateInfo->pNext) {719switch (ext->sType) {720case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {721VkSamplerYcbcrConversionInfo *pSamplerConversion =722(VkSamplerYcbcrConversionInfo *) ext;723ANV_FROM_HANDLE(anv_ycbcr_conversion, conversion,724pSamplerConversion->conversion);725726/* Ignore conversion for non-YUV formats. This fulfills a requirement727* for clients that want to utilize same code path for images with728* external formats (VK_FORMAT_UNDEFINED) and "regular" RGBA images729* where format is known.730*/731if (conversion == NULL || !conversion->format->can_ycbcr)732break;733734sampler->n_planes = conversion->format->n_planes;735sampler->conversion = conversion;736break;737}738#if GFX_VER >= 9739case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO: {740VkSamplerReductionModeCreateInfo *sampler_reduction =741(VkSamplerReductionModeCreateInfo *) ext;742sampler_reduction_mode =743vk_to_intel_sampler_reduction_mode[sampler_reduction->reductionMode];744enable_sampler_reduction = true;745break;746}747#endif748case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: {749VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =750(VkSamplerCustomBorderColorCreateInfoEXT *) ext;751if (sampler->custom_border_color.map == NULL)752break;753struct gfx8_border_color *cbc = sampler->custom_border_color.map;754if (custom_border_color->format == VK_FORMAT_B4G4R4A4_UNORM_PACK16) {755/* B4G4R4A4_UNORM_PACK16 is treated as R4G4B4A4_UNORM_PACK16 with756* a swizzle, but this does not carry over to the sampler for757* border colors, so we need to do the swizzle ourselves here.758*/759cbc->uint32[0] = custom_border_color->customBorderColor.uint32[2];760cbc->uint32[1] = custom_border_color->customBorderColor.uint32[1];761cbc->uint32[2] = custom_border_color->customBorderColor.uint32[0];762cbc->uint32[3] = custom_border_color->customBorderColor.uint32[3];763} else {764/* Both structs share the same layout, so just copy them over. */765memcpy(cbc, &custom_border_color->customBorderColor,766sizeof(VkClearColorValue));767}768has_custom_color = true;769break;770}771default:772anv_debug_ignored_stype(ext->sType);773break;774}775}776777assert((sampler->custom_border_color.map == NULL) || has_custom_color);778779if (device->physical->has_bindless_samplers) {780/* If we have bindless, allocate enough samplers. We allocate 32 bytes781* for each sampler instead of 16 bytes because we want all bindless782* samplers to be 32-byte aligned so we don't have to use indirect783* sampler messages on them.784*/785sampler->bindless_state =786anv_state_pool_alloc(&device->dynamic_state_pool,787sampler->n_planes * 32, 32);788}789790for (unsigned p = 0; p < sampler->n_planes; p++) {791const bool plane_has_chroma =792sampler->conversion && sampler->conversion->format->planes[p].has_chroma;793const VkFilter min_filter =794plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->minFilter;795const VkFilter mag_filter =796plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->magFilter;797const bool enable_min_filter_addr_rounding = min_filter != VK_FILTER_NEAREST;798const bool enable_mag_filter_addr_rounding = mag_filter != VK_FILTER_NEAREST;799/* From Broadwell PRM, SAMPLER_STATE:800* "Mip Mode Filter must be set to MIPFILTER_NONE for Planar YUV surfaces."801*/802const bool isl_format_is_planar_yuv = sampler->conversion &&803isl_format_is_yuv(sampler->conversion->format->planes[0].isl_format) &&804isl_format_is_planar(sampler->conversion->format->planes[0].isl_format);805806const uint32_t mip_filter_mode =807isl_format_is_planar_yuv ?808MIPFILTER_NONE : vk_to_intel_mipmap_mode[pCreateInfo->mipmapMode];809810struct GENX(SAMPLER_STATE) sampler_state = {811.SamplerDisable = false,812.TextureBorderColorMode = DX10OGL,813814#if GFX_VER >= 11815.CPSLODCompensationEnable = true,816#endif817818#if GFX_VER >= 8819.LODPreClampMode = CLAMP_MODE_OGL,820#else821.LODPreClampEnable = CLAMP_ENABLE_OGL,822#endif823824#if GFX_VER == 8825.BaseMipLevel = 0.0,826#endif827.MipModeFilter = mip_filter_mode,828.MagModeFilter = vk_to_intel_tex_filter(mag_filter, pCreateInfo->anisotropyEnable),829.MinModeFilter = vk_to_intel_tex_filter(min_filter, pCreateInfo->anisotropyEnable),830.TextureLODBias = anv_clamp_f(pCreateInfo->mipLodBias, -16, 15.996),831.AnisotropicAlgorithm =832pCreateInfo->anisotropyEnable ? EWAApproximation : LEGACY,833.MinLOD = anv_clamp_f(pCreateInfo->minLod, 0, 14),834.MaxLOD = anv_clamp_f(pCreateInfo->maxLod, 0, 14),835.ChromaKeyEnable = 0,836.ChromaKeyIndex = 0,837.ChromaKeyMode = 0,838.ShadowFunction =839vk_to_intel_shadow_compare_op[pCreateInfo->compareEnable ?840pCreateInfo->compareOp : VK_COMPARE_OP_NEVER],841.CubeSurfaceControlMode = OVERRIDE,842843.BorderColorPointer = border_color_offset,844845#if GFX_VER >= 8846.LODClampMagnificationMode = MIPNONE,847#endif848849.MaximumAnisotropy = vk_to_intel_max_anisotropy(pCreateInfo->maxAnisotropy),850.RAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,851.RAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,852.VAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,853.VAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,854.UAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,855.UAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,856.TrilinearFilterQuality = 0,857.NonnormalizedCoordinateEnable = pCreateInfo->unnormalizedCoordinates,858.TCXAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeU],859.TCYAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeV],860.TCZAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeW],861862#if GFX_VER >= 9863.ReductionType = sampler_reduction_mode,864.ReductionTypeEnable = enable_sampler_reduction,865#endif866};867868GENX(SAMPLER_STATE_pack)(NULL, sampler->state[p], &sampler_state);869870if (sampler->bindless_state.map) {871memcpy(sampler->bindless_state.map + p * 32,872sampler->state[p], GENX(SAMPLER_STATE_length) * 4);873}874}875876*pSampler = anv_sampler_to_handle(sampler);877878return VK_SUCCESS;879}880881882