Path: blob/21.2-virgl/src/panfrost/util/pan_lower_framebuffer.c
4560 views
/*1* Copyright (C) 2020 Collabora, Ltd.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*22* Authors (Collabora):23* Alyssa Rosenzweig <[email protected]>24*/2526/**27* Implements framebuffer format conversions in software for Midgard/Bifrost28* blend shaders. This pass is designed for a single render target; Midgard29* duplicates blend shaders for MRT to simplify everything. A particular30* framebuffer format may be categorized as 1) typed load available, 2) typed31* unpack available, or 3) software unpack only, and likewise for stores. The32* first two types are handled in the compiler backend directly, so this module33* is responsible for identifying type 3 formats (hardware dependent) and34* inserting appropriate ALU code to perform the conversion from the packed35* type to a designated unpacked type, and vice versa.36*37* The unpacked type depends on the format:38*39* - For 32-bit float formats, 32-bit floats.40* - For other floats, 16-bit floats.41* - For 32-bit ints, 32-bit ints.42* - For 8-bit ints, 8-bit ints.43* - For other ints, 16-bit ints.44*45* The rationale is to optimize blending and logic op instructions by using the46* smallest precision necessary to store the pixel losslessly.47*/4849#include "compiler/nir/nir.h"50#include "compiler/nir/nir_builder.h"51#include "compiler/nir/nir_format_convert.h"52#include "util/format/u_format.h"53#include "pan_lower_framebuffer.h"54#include "panfrost-quirks.h"5556/* Determines the unpacked type best suiting a given format, so the rest of the57* pipeline may be adjusted accordingly */5859nir_alu_type60pan_unpacked_type_for_format(const struct util_format_description *desc)61{62int c = util_format_get_first_non_void_channel(desc->format);6364if (c == -1)65unreachable("Void format not renderable");6667bool large = (desc->channel[c].size > 16);68bool large_norm = (desc->channel[c].size > 8);69bool bit8 = (desc->channel[c].size == 8);70assert(desc->channel[c].size <= 32);7172if (desc->channel[c].normalized)73return large_norm ? nir_type_float32 : nir_type_float16;7475switch (desc->channel[c].type) {76case UTIL_FORMAT_TYPE_UNSIGNED:77return bit8 ? nir_type_uint8 :78large ? nir_type_uint32 : nir_type_uint16;79case UTIL_FORMAT_TYPE_SIGNED:80return bit8 ? nir_type_int8 :81large ? nir_type_int32 : nir_type_int16;82case UTIL_FORMAT_TYPE_FLOAT:83return large ? nir_type_float32 : nir_type_float16;84default:85unreachable("Format not renderable");86}87}8889enum pan_format_class90pan_format_class_load(const struct util_format_description *desc, unsigned quirks)91{92/* Pure integers can be loaded via EXT_framebuffer_fetch and should be93* handled as a raw load with a size conversion (it's cheap). Likewise,94* since float framebuffers are internally implemented as raw (i.e.95* integer) framebuffers with blend shaders to go back and forth, they96* should be s/w as well */9798if (util_format_is_pure_integer(desc->format) || util_format_is_float(desc->format))99return PAN_FORMAT_SOFTWARE;100101/* Check if we can do anything better than software architecturally */102if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {103return (quirks & NO_BLEND_PACKS)104? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;105}106107/* Some formats are missing as typed on some GPUs but have unpacks */108if (quirks & MIDGARD_MISSING_LOADS) {109switch (desc->format) {110case PIPE_FORMAT_R11G11B10_FLOAT:111case PIPE_FORMAT_R10G10B10A2_UNORM:112case PIPE_FORMAT_B10G10R10A2_UNORM:113case PIPE_FORMAT_R10G10B10X2_UNORM:114case PIPE_FORMAT_B10G10R10X2_UNORM:115case PIPE_FORMAT_R10G10B10A2_UINT:116return PAN_FORMAT_PACK;117default:118return PAN_FORMAT_NATIVE;119}120}121122/* Otherwise, we can do native */123return PAN_FORMAT_NATIVE;124}125126enum pan_format_class127pan_format_class_store(const struct util_format_description *desc, unsigned quirks)128{129/* Check if we can do anything better than software architecturally */130if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {131return (quirks & NO_BLEND_PACKS)132? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;133}134135return PAN_FORMAT_NATIVE;136}137138/* Convenience method */139140static enum pan_format_class141pan_format_class(const struct util_format_description *desc, unsigned quirks, bool is_store)142{143if (is_store)144return pan_format_class_store(desc, quirks);145else146return pan_format_class_load(desc, quirks);147}148149/* Software packs/unpacks, by format class. Packs take in the pixel value typed150* as `pan_unpacked_type_for_format` of the format and return an i32vec4151* suitable for storing (with components replicated to fill). Unpacks do the152* reverse but cannot rely on replication.153*154* Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to155* replicate to fill */156157static nir_ssa_def *158pan_pack_pure_32(nir_builder *b, nir_ssa_def *v, unsigned num_components)159{160nir_ssa_def *replicated[4];161162for (unsigned i = 0; i < 4; ++i)163replicated[i] = nir_channel(b, v, i % num_components);164165return nir_vec(b, replicated, 4);166}167168static nir_ssa_def *169pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)170{171return nir_channels(b, pack, (1 << num_components) - 1);172}173174/* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack175* upper/lower halves of course */176177static nir_ssa_def *178pan_pack_pure_16(nir_builder *b, nir_ssa_def *v, unsigned num_components)179{180nir_ssa_def *replicated[4];181182for (unsigned i = 0; i < 4; ++i) {183unsigned c = 2 * i;184185nir_ssa_def *parts[2] = {186nir_channel(b, v, (c + 0) % num_components),187nir_channel(b, v, (c + 1) % num_components)188};189190replicated[i] = nir_pack_32_2x16(b, nir_vec(b, parts, 2));191}192193return nir_vec(b, replicated, 4);194}195196static nir_ssa_def *197pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)198{199nir_ssa_def *unpacked[4];200201assert(num_components <= 4);202203for (unsigned i = 0; i < num_components; i += 2) {204nir_ssa_def *halves =205nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));206207unpacked[i + 0] = nir_channel(b, halves, 0);208unpacked[i + 1] = nir_channel(b, halves, 1);209}210211for (unsigned i = num_components; i < 4; ++i)212unpacked[i] = nir_imm_intN_t(b, 0, 16);213214return nir_vec(b, unpacked, 4);215}216217/* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel218* vector (n <= 4), replicating as needed. pan_replicate_4 constructs a219* 4-channel vector from a scalar via replication */220221static nir_ssa_def *222pan_fill_4(nir_builder *b, nir_ssa_def *v, unsigned num_components)223{224nir_ssa_def *q[4];225assert(v->num_components <= 4);226227for (unsigned j = 0; j < 4; ++j)228q[j] = nir_channel(b, v, j % num_components);229230return nir_vec(b, q, 4);231}232233static nir_ssa_def *234pan_extend(nir_builder *b, nir_ssa_def *v, unsigned N)235{236nir_ssa_def *q[4];237assert(v->num_components <= 4);238assert(N <= 4);239240for (unsigned j = 0; j < v->num_components; ++j)241q[j] = nir_channel(b, v, j);242243for (unsigned j = v->num_components; j < N; ++j)244q[j] = nir_imm_intN_t(b, 0, v->bit_size);245246return nir_vec(b, q, N);247}248249static nir_ssa_def *250pan_replicate_4(nir_builder *b, nir_ssa_def *v)251{252nir_ssa_def *replicated[4] = { v, v, v, v };253return nir_vec(b, replicated, 4);254}255256static nir_ssa_def *257pan_pack_pure_8(nir_builder *b, nir_ssa_def *v, unsigned num_components)258{259return pan_replicate_4(b, nir_pack_32_4x8(b, pan_fill_4(b, v, num_components)));260}261262static nir_ssa_def *263pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)264{265assert(num_components <= 4);266nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));267return nir_channels(b, unpacked, (1 << num_components) - 1);268}269270/* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8271* ops provided we replicate appropriately, but for packing we'd rather stay in272* 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */273274static nir_ssa_def *275pan_pack_unorm_8(nir_builder *b, nir_ssa_def *v)276{277return pan_replicate_4(b, nir_pack_32_4x8(b,278nir_f2u8(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b,279pan_fill_4(b, v, v->num_components)), nir_imm_float16(b, 255.0))))));280}281282static nir_ssa_def *283pan_unpack_unorm_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)284{285assert(num_components <= 4);286nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, pack, 0));287return nir_f2fmp(b, unpacked);288}289290/* UNORM 4 is also unpacked to f16, which prevents us from using the shared291* unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:292*293* [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]294*295* In other words, spacing it out so we're aligned to bytes and on top. So296* pack as:297*298* pack_32_4x8(f2u8_rte(v * 15.0) << 4)299*/300301static nir_ssa_def *302pan_pack_unorm_small(nir_builder *b, nir_ssa_def *v,303nir_ssa_def *scales, nir_ssa_def *shifts)304{305nir_ssa_def *f = nir_fmul(b, nir_fsat(b, pan_fill_4(b, v, v->num_components)), scales);306nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));307nir_ssa_def *s = nir_ishl(b, u8, shifts);308nir_ssa_def *repl = nir_pack_32_4x8(b, s);309310return pan_replicate_4(b, repl);311}312313static nir_ssa_def *314pan_unpack_unorm_small(nir_builder *b, nir_ssa_def *pack,315nir_ssa_def *scales, nir_ssa_def *shifts)316{317nir_ssa_def *channels = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));318nir_ssa_def *raw = nir_ushr(b, nir_i2i16(b, channels), shifts);319return nir_fmul(b, nir_u2f16(b, raw), scales);320}321322static nir_ssa_def *323pan_pack_unorm_4(nir_builder *b, nir_ssa_def *v)324{325return pan_pack_unorm_small(b, v,326nir_imm_vec4_16(b, 15.0, 15.0, 15.0, 15.0),327nir_imm_ivec4(b, 4, 4, 4, 4));328}329330static nir_ssa_def *331pan_unpack_unorm_4(nir_builder *b, nir_ssa_def *v)332{333return pan_unpack_unorm_small(b, v,334nir_imm_vec4_16(b, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),335nir_imm_ivec4(b, 4, 4, 4, 4));336}337338/* UNORM RGB5_A1 and RGB565 are similar */339340static nir_ssa_def *341pan_pack_unorm_5551(nir_builder *b, nir_ssa_def *v)342{343return pan_pack_unorm_small(b, v,344nir_imm_vec4_16(b, 31.0, 31.0, 31.0, 1.0),345nir_imm_ivec4(b, 3, 3, 3, 7));346}347348static nir_ssa_def *349pan_unpack_unorm_5551(nir_builder *b, nir_ssa_def *v)350{351return pan_unpack_unorm_small(b, v,352nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),353nir_imm_ivec4(b, 3, 3, 3, 7));354}355356static nir_ssa_def *357pan_pack_unorm_565(nir_builder *b, nir_ssa_def *v)358{359return pan_pack_unorm_small(b, v,360nir_imm_vec4_16(b, 31.0, 63.0, 31.0, 0.0),361nir_imm_ivec4(b, 3, 2, 3, 0));362}363364static nir_ssa_def *365pan_unpack_unorm_565(nir_builder *b, nir_ssa_def *v)366{367return pan_unpack_unorm_small(b, v,368nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),369nir_imm_ivec4(b, 3, 2, 3, 0));370}371372/* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top373* 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin374* pointed out, this means free conversion to RGBX8 */375376static nir_ssa_def *377pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)378{379nir_ssa_def *scale = nir_imm_vec4_16(b, 1023.0, 1023.0, 1023.0, 3.0);380nir_ssa_def *s = nir_f2u32(b, nir_fround_even(b, nir_f2f32(b, nir_fmul(b, nir_fsat(b, v), scale))));381382nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));383nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));384385nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));386387nir_ssa_def *top =388nir_ior(b,389nir_ior(b,390nir_ishl(b, nir_channel(b, bottom2, 0), nir_imm_int(b, 24 + 0)),391nir_ishl(b, nir_channel(b, bottom2, 1), nir_imm_int(b, 24 + 2))),392nir_ior(b,393nir_ishl(b, nir_channel(b, bottom2, 2), nir_imm_int(b, 24 + 4)),394nir_ishl(b, nir_channel(b, bottom2, 3), nir_imm_int(b, 24 + 6))));395396nir_ssa_def *p = nir_ior(b, top, top8_rgb);397return pan_replicate_4(b, p);398}399400static nir_ssa_def *401pan_unpack_unorm_1010102(nir_builder *b, nir_ssa_def *packed)402{403nir_ssa_def *p = nir_channel(b, packed, 0);404nir_ssa_def *bytes = nir_unpack_32_4x8(b, p);405nir_ssa_def *ubytes = nir_i2i16(b, bytes);406407nir_ssa_def *shifts = nir_ushr(b, pan_replicate_4(b, nir_channel(b, ubytes, 3)),408nir_imm_ivec4(b, 0, 2, 4, 6));409nir_ssa_def *precision = nir_iand(b, shifts,410nir_i2i16(b, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3)));411412nir_ssa_def *top_rgb = nir_ishl(b, nir_channels(b, ubytes, 0x7), nir_imm_int(b, 2));413top_rgb = nir_ior(b, nir_channels(b, precision, 0x7), top_rgb);414415nir_ssa_def *chans [4] = {416nir_channel(b, top_rgb, 0),417nir_channel(b, top_rgb, 1),418nir_channel(b, top_rgb, 2),419nir_channel(b, precision, 3)420};421422nir_ssa_def *scale = nir_imm_vec4(b, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);423return nir_f2fmp(b, nir_fmul(b, nir_u2f32(b, nir_vec(b, chans, 4)), scale));424}425426/* On the other hand, the pure int RGB10_A2 is identical to the spec */427428static nir_ssa_def *429pan_pack_uint_1010102(nir_builder *b, nir_ssa_def *v)430{431nir_ssa_def *shift = nir_ishl(b, nir_u2u32(b, v),432nir_imm_ivec4(b, 0, 10, 20, 30));433434nir_ssa_def *p = nir_ior(b,435nir_ior(b, nir_channel(b, shift, 0), nir_channel(b, shift, 1)),436nir_ior(b, nir_channel(b, shift, 2), nir_channel(b, shift, 3)));437438return pan_replicate_4(b, p);439}440441static nir_ssa_def *442pan_unpack_uint_1010102(nir_builder *b, nir_ssa_def *packed)443{444nir_ssa_def *chan = nir_channel(b, packed, 0);445446nir_ssa_def *shift = nir_ushr(b, pan_replicate_4(b, chan),447nir_imm_ivec4(b, 0, 10, 20, 30));448449nir_ssa_def *mask = nir_iand(b, shift,450nir_imm_ivec4(b, 0x3ff, 0x3ff, 0x3ff, 0x3));451452return nir_i2i16(b, mask);453}454455/* NIR means we can *finally* catch a break */456457static nir_ssa_def *458pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)459{460return pan_replicate_4(b, nir_format_pack_11f11f10f(b,461nir_f2f32(b, v)));462}463464static nir_ssa_def *465pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)466{467nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));468nir_ssa_def *f16 = nir_f2fmp(b, f32);469470/* Extend to vec4 with alpha */471nir_ssa_def *components[4] = {472nir_channel(b, f16, 0),473nir_channel(b, f16, 1),474nir_channel(b, f16, 2),475nir_imm_float16(b, 1.0)476};477478return nir_vec(b, components, 4);479}480481/* Wrapper around sRGB conversion */482483static nir_ssa_def *484pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)485{486nir_ssa_def *rgb = nir_channels(b, linear, 0x7);487488/* TODO: fp16 native conversion */489nir_ssa_def *srgb = nir_f2fmp(b,490nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));491492nir_ssa_def *comp[4] = {493nir_channel(b, srgb, 0),494nir_channel(b, srgb, 1),495nir_channel(b, srgb, 2),496nir_channel(b, linear, 3),497};498499return nir_vec(b, comp, 4);500}501502static nir_ssa_def *503pan_srgb_to_linear(nir_builder *b, nir_ssa_def *srgb)504{505nir_ssa_def *rgb = nir_channels(b, srgb, 0x7);506507/* TODO: fp16 native conversion */508nir_ssa_def *linear = nir_f2fmp(b,509nir_format_srgb_to_linear(b, nir_f2f32(b, rgb)));510511nir_ssa_def *comp[4] = {512nir_channel(b, linear, 0),513nir_channel(b, linear, 1),514nir_channel(b, linear, 2),515nir_channel(b, srgb, 3),516};517518return nir_vec(b, comp, 4);519}520521522523/* Generic dispatches for un/pack regardless of format */524525static bool526pan_is_unorm4(const struct util_format_description *desc)527{528switch (desc->format) {529case PIPE_FORMAT_B4G4R4A4_UNORM:530case PIPE_FORMAT_B4G4R4X4_UNORM:531case PIPE_FORMAT_A4R4_UNORM:532case PIPE_FORMAT_R4A4_UNORM:533case PIPE_FORMAT_A4B4G4R4_UNORM:534case PIPE_FORMAT_R4G4B4A4_UNORM:535return true;536default:537return false;538}539540}541542static nir_ssa_def *543pan_unpack(nir_builder *b,544const struct util_format_description *desc,545nir_ssa_def *packed)546{547if (util_format_is_unorm8(desc))548return pan_unpack_unorm_8(b, packed, desc->nr_channels);549550if (pan_is_unorm4(desc))551return pan_unpack_unorm_4(b, packed);552553if (desc->is_array) {554int c = util_format_get_first_non_void_channel(desc->format);555assert(c >= 0);556struct util_format_channel_description d = desc->channel[c];557558if (d.size == 32 || d.size == 16) {559assert(!d.normalized);560assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);561562return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :563pan_unpack_pure_16(b, packed, desc->nr_channels);564} else if (d.size == 8) {565assert(d.pure_integer);566return pan_unpack_pure_8(b, packed, desc->nr_channels);567} else {568unreachable("Unrenderable size");569}570}571572switch (desc->format) {573case PIPE_FORMAT_B5G5R5A1_UNORM:574case PIPE_FORMAT_R5G5B5A1_UNORM:575return pan_unpack_unorm_5551(b, packed);576case PIPE_FORMAT_B5G6R5_UNORM:577return pan_unpack_unorm_565(b, packed);578case PIPE_FORMAT_R10G10B10A2_UNORM:579return pan_unpack_unorm_1010102(b, packed);580case PIPE_FORMAT_R10G10B10A2_UINT:581return pan_unpack_uint_1010102(b, packed);582case PIPE_FORMAT_R11G11B10_FLOAT:583return pan_unpack_r11g11b10(b, packed);584default:585break;586}587588fprintf(stderr, "%s\n", desc->name);589unreachable("Unknown format");590}591592static nir_ssa_def *593pan_pack(nir_builder *b,594const struct util_format_description *desc,595nir_ssa_def *unpacked)596{597if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)598unpacked = pan_linear_to_srgb(b, unpacked);599600if (util_format_is_unorm8(desc))601return pan_pack_unorm_8(b, unpacked);602603if (pan_is_unorm4(desc))604return pan_pack_unorm_4(b, unpacked);605606if (desc->is_array) {607int c = util_format_get_first_non_void_channel(desc->format);608assert(c >= 0);609struct util_format_channel_description d = desc->channel[c];610611if (d.size == 32 || d.size == 16) {612assert(!d.normalized);613assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);614615return d.size == 32 ? pan_pack_pure_32(b, unpacked, desc->nr_channels) :616pan_pack_pure_16(b, unpacked, desc->nr_channels);617} else if (d.size == 8) {618assert(d.pure_integer);619return pan_pack_pure_8(b, unpacked, desc->nr_channels);620} else {621unreachable("Unrenderable size");622}623}624625switch (desc->format) {626case PIPE_FORMAT_B5G5R5A1_UNORM:627case PIPE_FORMAT_R5G5B5A1_UNORM:628return pan_pack_unorm_5551(b, unpacked);629case PIPE_FORMAT_B5G6R5_UNORM:630return pan_pack_unorm_565(b, unpacked);631case PIPE_FORMAT_R10G10B10A2_UNORM:632return pan_pack_unorm_1010102(b, unpacked);633case PIPE_FORMAT_R10G10B10A2_UINT:634return pan_pack_uint_1010102(b, unpacked);635case PIPE_FORMAT_R11G11B10_FLOAT:636return pan_pack_r11g11b10(b, unpacked);637default:638break;639}640641fprintf(stderr, "%s\n", desc->name);642unreachable("Unknown format");643}644645static void646pan_lower_fb_store(nir_shader *shader,647nir_builder *b,648nir_intrinsic_instr *intr,649const struct util_format_description *desc,650unsigned quirks)651{652/* For stores, add conversion before */653nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);654nir_ssa_def *packed = pan_pack(b, desc, unpacked);655656nir_store_raw_output_pan(b, packed);657}658659static nir_ssa_def *660pan_sample_id(nir_builder *b, int sample)661{662return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);663}664665static void666pan_lower_fb_load(nir_shader *shader,667nir_builder *b,668nir_intrinsic_instr *intr,669const struct util_format_description *desc,670unsigned base, int sample, unsigned quirks)671{672nir_ssa_def *packed =673nir_load_raw_output_pan(b, 4, 32, pan_sample_id(b, sample),674.base = base);675676/* Convert the raw value */677nir_ssa_def *unpacked = pan_unpack(b, desc, packed);678679if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)680unpacked = pan_srgb_to_linear(b, unpacked);681682/* Convert to the size of the load intrinsic.683*684* We can assume that the type will match with the framebuffer format:685*686* Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:687*688* If [UNORM or SNORM, convert to fixed-point]; otherwise no type689* conversion is applied. If the values written by the fragment shader690* do not match the format(s) of the corresponding color buffer(s),691* the result is undefined.692*/693694unsigned bits = nir_dest_bit_size(intr->dest);695696nir_alu_type src_type;697if (desc->channel[0].pure_integer) {698if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED)699src_type = nir_type_int;700else701src_type = nir_type_uint;702} else {703src_type = nir_type_float;704}705706unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);707unpacked = pan_extend(b, unpacked, nir_dest_num_components(intr->dest));708709nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);710}711712bool713pan_lower_framebuffer(nir_shader *shader, const enum pipe_format *rt_fmts,714bool is_blend, unsigned quirks)715{716if (shader->info.stage != MESA_SHADER_FRAGMENT)717return false;718719bool progress = false;720721nir_foreach_function(func, shader) {722nir_foreach_block(block, func->impl) {723nir_foreach_instr_safe(instr, block) {724if (instr->type != nir_instr_type_intrinsic)725continue;726727nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);728729bool is_load = intr->intrinsic == nir_intrinsic_load_deref;730bool is_store = intr->intrinsic == nir_intrinsic_store_deref;731732if (!(is_load || (is_store && is_blend)))733continue;734735nir_variable *var = nir_intrinsic_get_var(intr, 0);736737if (var->data.mode != nir_var_shader_out)738continue;739740if (var->data.location < FRAG_RESULT_DATA0)741continue;742743unsigned base = var->data.driver_location;744unsigned rt = var->data.location - FRAG_RESULT_DATA0;745746if (rt_fmts[rt] == PIPE_FORMAT_NONE)747continue;748749const struct util_format_description *desc =750util_format_description(rt_fmts[rt]);751752enum pan_format_class fmt_class =753pan_format_class(desc, quirks, is_store);754755/* Don't lower */756if (fmt_class == PAN_FORMAT_NATIVE)757continue;758759/* EXT_shader_framebuffer_fetch requires760* per-sample loads.761* MSAA blend shaders are not yet handled, so762* for now always load sample 0. */763int sample = is_blend ? 0 : -1;764765nir_builder b;766nir_builder_init(&b, func->impl);767768if (is_store) {769b.cursor = nir_before_instr(instr);770pan_lower_fb_store(shader, &b, intr, desc, quirks);771} else {772b.cursor = nir_after_instr(instr);773pan_lower_fb_load(shader, &b, intr, desc, base, sample, quirks);774}775776nir_instr_remove(instr);777778progress = true;779}780}781782nir_metadata_preserve(func->impl, nir_metadata_block_index |783nir_metadata_dominance);784}785786return progress;787}788789790