Path: blob/21.2-virgl/src/panfrost/midgard/midgard_ra_pipeline.c
4564 views
/*1* Copyright (C) 2019 Alyssa Rosenzweig <[email protected]>2* Copyright (C) 2019 Collabora, Ltd.3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the "Software"),6* to deal in the Software without restriction, including without limitation7* the rights to use, copy, modify, merge, publish, distribute, sublicense,8* and/or sell copies of the Software, and to permit persons to whom the9* Software is furnished to do so, subject to the following conditions:10*11* The above copyright notice and this permission notice (including the next12* paragraph) shall be included in all copies or substantial portions of the13* Software.14*15* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR16* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,17* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL18* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER19* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,20* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE21* SOFTWARE.22*/2324#include "compiler.h"2526/* Creates pipeline registers. This is a prepass run before the main register27* allocator but after scheduling, once bundles are created. It works by28* iterating the scheduled IR, checking if a value is ever used after the end29* of the current bundle. If it is not, it is promoted to a bundle-specific30* pipeline register.31*32* Pipeline registers are only written from the first two stages of the33* pipeline (vmul/sadd) lasting the duration of the bundle only. There are two34* 128-bit pipeline registers available (r24/r25). The upshot is that no actual35* register allocation is needed; we can _always_ promote a value to a pipeline36* register, liveness permitting. This greatly simplifies the logic of this37* passing, negating the need for a proper RA like work registers.38*/3940static bool41mir_pipeline_ins(42compiler_context *ctx,43midgard_block *block,44midgard_bundle *bundle, unsigned i,45unsigned pipeline_count)46{47midgard_instruction *ins = bundle->instructions[i];4849/* Our goal is to create a pipeline register. Pipeline registers are50* created at the start of the bundle and are destroyed at the end. So51* we conservatively require:52*53* 1. Each component read in the second stage is written in the first stage.54* 2. The index is not live after the bundle.55* 3. We're not a special index (writeout, conditionals, ..)56*57* Rationale: #1 ensures that there is no need to go before the58* creation of the bundle, so the pipeline register can exist. #2 is59* since the pipeline register will be destroyed at the end. This60* ensures that nothing will try to read/write the pipeline register61* once it is not live, and that there's no need to go earlier. */6263unsigned node = ins->dest;64unsigned read_mask = 0;6566if (node >= SSA_FIXED_MINIMUM)67return false;6869if (node == ctx->blend_src1)70return false;7172/* Analyze the bundle for a per-byte read mask */7374for (unsigned j = 0; j < bundle->instruction_count; ++j) {75midgard_instruction *q = bundle->instructions[j];7677/* The fragment colour can't be pipelined (well, it is78* pipelined in r0, but this is a delicate dance with79* scheduling and RA, not for us to worry about) */8081if (q->compact_branch && q->writeout && mir_has_arg(q, node))82return false;8384if (q->unit < UNIT_VADD) continue;85read_mask |= mir_bytemask_of_read_components(q, node);86}8788/* Now check what's written in the beginning stage */89for (unsigned j = 0; j < bundle->instruction_count; ++j) {90midgard_instruction *q = bundle->instructions[j];91if (q->unit >= UNIT_VADD) break;92if (q->dest != node) continue;9394/* Remove the written mask from the read requirements */95read_mask &= ~mir_bytemask(q);96}9798/* Check for leftovers */99if (read_mask)100return false;101102/* We want to know if we live after this bundle, so check if103* we're live after the last instruction of the bundle */104105midgard_instruction *end = bundle->instructions[106bundle->instruction_count - 1];107108if (mir_is_live_after(ctx, block, end, ins->dest))109return false;110111/* We're only live in this bundle -- pipeline! */112unsigned preg = SSA_FIXED_REGISTER(24 + pipeline_count);113114for (unsigned j = 0; j < bundle->instruction_count; ++j) {115midgard_instruction *q = bundle->instructions[j];116117if (q->unit >= UNIT_VADD)118mir_rewrite_index_src_single(q, node, preg);119else120mir_rewrite_index_dst_single(q, node, preg);121}122123return true;124}125126void127mir_create_pipeline_registers(compiler_context *ctx)128{129mir_invalidate_liveness(ctx);130131mir_foreach_block(ctx, _block) {132midgard_block *block = (midgard_block *) _block;133134mir_foreach_bundle_in_block(block, bundle) {135if (!mir_is_alu_bundle(bundle)) continue;136if (bundle->instruction_count < 2) continue;137138/* Only first 2 instructions could pipeline */139bool succ = mir_pipeline_ins(ctx, block, bundle, 0, 0);140mir_pipeline_ins(ctx, block, bundle, 1, succ);141}142}143}144145146