Path: blob/21.2-virgl/src/amd/compiler/aco_scheduler.cpp
4550 views
/*1* Copyright © 2018 Valve Corporation2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* the rights to use, copy, modify, merge, publish, distribute, sublicense,7* and/or sell copies of the Software, and to permit persons to whom the8* Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL17* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING19* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS20* IN THE SOFTWARE.21*22*/2324#include "aco_builder.h"25#include "aco_ir.h"2627#include "common/amdgfxregs.h"2829#include <algorithm>30#include <unordered_set>31#include <vector>3233#define SMEM_WINDOW_SIZE (350 - ctx.num_waves * 35)34#define VMEM_WINDOW_SIZE (1024 - ctx.num_waves * 64)35#define POS_EXP_WINDOW_SIZE 51236#define SMEM_MAX_MOVES (64 - ctx.num_waves * 4)37#define VMEM_MAX_MOVES (256 - ctx.num_waves * 16)38/* creating clauses decreases def-use distances, so make it less aggressive the lower num_waves is */39#define VMEM_CLAUSE_MAX_GRAB_DIST (ctx.num_waves * 8)40#define POS_EXP_MAX_MOVES 5124142namespace aco {4344enum MoveResult {45move_success,46move_fail_ssa,47move_fail_rar,48move_fail_pressure,49};5051/**52* Cursor for downwards moves, where a single instruction is moved towards53* or below a group of instruction that hardware can execute as a clause.54*/55struct DownwardsCursor {56int source_idx; /* Current instruction to consider for moving */5758int insert_idx_clause; /* First clause instruction */59int insert_idx; /* First instruction *after* the clause */6061/* Maximum demand of all clause instructions,62* i.e. from insert_idx_clause (inclusive) to insert_idx (exclusive) */63RegisterDemand clause_demand;64/* Maximum demand of instructions from source_idx to insert_idx_clause (both exclusive) */65RegisterDemand total_demand;6667DownwardsCursor(int current_idx, RegisterDemand initial_clause_demand)68: source_idx(current_idx - 1), insert_idx_clause(current_idx), insert_idx(current_idx + 1),69clause_demand(initial_clause_demand)70{}7172void verify_invariants(const RegisterDemand* register_demand);73};7475/**76* Cursor for upwards moves, where a single instruction is moved below77* another instruction.78*/79struct UpwardsCursor {80int source_idx; /* Current instruction to consider for moving */81int insert_idx; /* Instruction to move in front of */8283/* Maximum demand of instructions from insert_idx (inclusive) to source_idx (exclusive) */84RegisterDemand total_demand;8586UpwardsCursor(int source_idx_) : source_idx(source_idx_)87{88insert_idx = -1; /* to be initialized later */89}9091bool has_insert_idx() const { return insert_idx != -1; }92void verify_invariants(const RegisterDemand* register_demand);93};9495struct MoveState {96RegisterDemand max_registers;9798Block* block;99Instruction* current;100RegisterDemand* register_demand; /* demand per instruction */101bool improved_rar;102103std::vector<bool> depends_on;104/* Two are needed because, for downwards VMEM scheduling, one needs to105* exclude the instructions in the clause, since new instructions in the106* clause are not moved past any other instructions in the clause. */107std::vector<bool> RAR_dependencies;108std::vector<bool> RAR_dependencies_clause;109110/* for moving instructions before the current instruction to after it */111DownwardsCursor downwards_init(int current_idx, bool improved_rar, bool may_form_clauses);112MoveResult downwards_move(DownwardsCursor&, bool clause);113void downwards_skip(DownwardsCursor&);114115/* for moving instructions after the first use of the current instruction upwards */116UpwardsCursor upwards_init(int source_idx, bool improved_rar);117bool upwards_check_deps(UpwardsCursor&);118void upwards_update_insert_idx(UpwardsCursor&);119MoveResult upwards_move(UpwardsCursor&);120void upwards_skip(UpwardsCursor&);121};122123struct sched_ctx {124int16_t num_waves;125int16_t last_SMEM_stall;126int last_SMEM_dep_idx;127MoveState mv;128bool schedule_pos_exports = true;129unsigned schedule_pos_export_div = 1;130};131132/* This scheduler is a simple bottom-up pass based on ideas from133* "A Novel Lightweight Instruction Scheduling Algorithm for Just-In-Time Compiler"134* from Xiaohua Shi and Peng Guo.135* The basic approach is to iterate over all instructions. When a memory instruction136* is encountered it tries to move independent instructions from above and below137* between the memory instruction and it's first user.138* The novelty is that this scheduler cares for the current register pressure:139* Instructions will only be moved if the register pressure won't exceed a certain bound.140*/141142template <typename T>143void144move_element(T begin_it, size_t idx, size_t before)145{146if (idx < before) {147auto begin = std::next(begin_it, idx);148auto end = std::next(begin_it, before);149std::rotate(begin, begin + 1, end);150} else if (idx > before) {151auto begin = std::next(begin_it, before);152auto end = std::next(begin_it, idx + 1);153std::rotate(begin, end - 1, end);154}155}156157void158DownwardsCursor::verify_invariants(const RegisterDemand* register_demand)159{160assert(source_idx < insert_idx_clause);161assert(insert_idx_clause < insert_idx);162163#ifndef NDEBUG164RegisterDemand reference_demand;165for (int i = source_idx + 1; i < insert_idx_clause; ++i) {166reference_demand.update(register_demand[i]);167}168assert(total_demand == reference_demand);169170reference_demand = {};171for (int i = insert_idx_clause; i < insert_idx; ++i) {172reference_demand.update(register_demand[i]);173}174assert(clause_demand == reference_demand);175#endif176}177178DownwardsCursor179MoveState::downwards_init(int current_idx, bool improved_rar_, bool may_form_clauses)180{181improved_rar = improved_rar_;182183std::fill(depends_on.begin(), depends_on.end(), false);184if (improved_rar) {185std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);186if (may_form_clauses)187std::fill(RAR_dependencies_clause.begin(), RAR_dependencies_clause.end(), false);188}189190for (const Operand& op : current->operands) {191if (op.isTemp()) {192depends_on[op.tempId()] = true;193if (improved_rar && op.isFirstKill())194RAR_dependencies[op.tempId()] = true;195}196}197198DownwardsCursor cursor(current_idx, register_demand[current_idx]);199cursor.verify_invariants(register_demand);200return cursor;201}202203/* If add_to_clause is true, the current clause is extended by moving the204* instruction at source_idx in front of the clause. Otherwise, the instruction205* is moved past the end of the clause without extending it */206MoveResult207MoveState::downwards_move(DownwardsCursor& cursor, bool add_to_clause)208{209aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];210211for (const Definition& def : instr->definitions)212if (def.isTemp() && depends_on[def.tempId()])213return move_fail_ssa;214215/* check if one of candidate's operands is killed by depending instruction */216std::vector<bool>& RAR_deps =217improved_rar ? (add_to_clause ? RAR_dependencies_clause : RAR_dependencies) : depends_on;218for (const Operand& op : instr->operands) {219if (op.isTemp() && RAR_deps[op.tempId()]) {220// FIXME: account for difference in register pressure221return move_fail_rar;222}223}224225if (add_to_clause) {226for (const Operand& op : instr->operands) {227if (op.isTemp()) {228depends_on[op.tempId()] = true;229if (op.isFirstKill())230RAR_dependencies[op.tempId()] = true;231}232}233}234235const int dest_insert_idx = add_to_clause ? cursor.insert_idx_clause : cursor.insert_idx;236RegisterDemand register_pressure = cursor.total_demand;237if (!add_to_clause) {238register_pressure.update(cursor.clause_demand);239}240241/* Check the new demand of the instructions being moved over */242const RegisterDemand candidate_diff = get_live_changes(instr);243if (RegisterDemand(register_pressure - candidate_diff).exceeds(max_registers))244return move_fail_pressure;245246/* New demand for the moved instruction */247const RegisterDemand temp = get_temp_registers(instr);248const RegisterDemand temp2 = get_temp_registers(block->instructions[dest_insert_idx - 1]);249const RegisterDemand new_demand = register_demand[dest_insert_idx - 1] - temp2 + temp;250if (new_demand.exceeds(max_registers))251return move_fail_pressure;252253/* move the candidate below the memory load */254move_element(block->instructions.begin(), cursor.source_idx, dest_insert_idx);255256/* update register pressure */257move_element(register_demand, cursor.source_idx, dest_insert_idx);258for (int i = cursor.source_idx; i < dest_insert_idx - 1; i++)259register_demand[i] -= candidate_diff;260register_demand[dest_insert_idx - 1] = new_demand;261cursor.insert_idx_clause--;262if (cursor.source_idx != cursor.insert_idx_clause) {263/* Update demand if we moved over any instructions before the clause */264cursor.total_demand -= candidate_diff;265} else {266assert(cursor.total_demand == RegisterDemand{});267}268if (add_to_clause) {269cursor.clause_demand.update(new_demand);270} else {271cursor.clause_demand -= candidate_diff;272cursor.insert_idx--;273}274275cursor.source_idx--;276cursor.verify_invariants(register_demand);277return move_success;278}279280void281MoveState::downwards_skip(DownwardsCursor& cursor)282{283aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];284285for (const Operand& op : instr->operands) {286if (op.isTemp()) {287depends_on[op.tempId()] = true;288if (improved_rar && op.isFirstKill()) {289RAR_dependencies[op.tempId()] = true;290RAR_dependencies_clause[op.tempId()] = true;291}292}293}294cursor.total_demand.update(register_demand[cursor.source_idx]);295cursor.source_idx--;296cursor.verify_invariants(register_demand);297}298299void300UpwardsCursor::verify_invariants(const RegisterDemand* register_demand)301{302#ifndef NDEBUG303if (!has_insert_idx()) {304return;305}306307assert(insert_idx < source_idx);308309RegisterDemand reference_demand;310for (int i = insert_idx; i < source_idx; ++i) {311reference_demand.update(register_demand[i]);312}313assert(total_demand == reference_demand);314#endif315}316317UpwardsCursor318MoveState::upwards_init(int source_idx, bool improved_rar_)319{320improved_rar = improved_rar_;321322std::fill(depends_on.begin(), depends_on.end(), false);323std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);324325for (const Definition& def : current->definitions) {326if (def.isTemp())327depends_on[def.tempId()] = true;328}329330return UpwardsCursor(source_idx);331}332333bool334MoveState::upwards_check_deps(UpwardsCursor& cursor)335{336aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];337for (const Operand& op : instr->operands) {338if (op.isTemp() && depends_on[op.tempId()])339return false;340}341return true;342}343344void345MoveState::upwards_update_insert_idx(UpwardsCursor& cursor)346{347cursor.insert_idx = cursor.source_idx;348cursor.total_demand = register_demand[cursor.insert_idx];349}350351MoveResult352MoveState::upwards_move(UpwardsCursor& cursor)353{354assert(cursor.has_insert_idx());355356aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];357for (const Operand& op : instr->operands) {358if (op.isTemp() && depends_on[op.tempId()])359return move_fail_ssa;360}361362/* check if candidate uses/kills an operand which is used by a dependency */363for (const Operand& op : instr->operands) {364if (op.isTemp() && (!improved_rar || op.isFirstKill()) && RAR_dependencies[op.tempId()])365return move_fail_rar;366}367368/* check if register pressure is low enough: the diff is negative if register pressure is369* decreased */370const RegisterDemand candidate_diff = get_live_changes(instr);371const RegisterDemand temp = get_temp_registers(instr);372if (RegisterDemand(cursor.total_demand + candidate_diff).exceeds(max_registers))373return move_fail_pressure;374const RegisterDemand temp2 = get_temp_registers(block->instructions[cursor.insert_idx - 1]);375const RegisterDemand new_demand =376register_demand[cursor.insert_idx - 1] - temp2 + candidate_diff + temp;377if (new_demand.exceeds(max_registers))378return move_fail_pressure;379380/* move the candidate above the insert_idx */381move_element(block->instructions.begin(), cursor.source_idx, cursor.insert_idx);382383/* update register pressure */384move_element(register_demand, cursor.source_idx, cursor.insert_idx);385register_demand[cursor.insert_idx] = new_demand;386for (int i = cursor.insert_idx + 1; i <= cursor.source_idx; i++)387register_demand[i] += candidate_diff;388cursor.total_demand += candidate_diff;389390cursor.total_demand.update(register_demand[cursor.source_idx]);391392cursor.insert_idx++;393cursor.source_idx++;394395cursor.verify_invariants(register_demand);396397return move_success;398}399400void401MoveState::upwards_skip(UpwardsCursor& cursor)402{403if (cursor.has_insert_idx()) {404aco_ptr<Instruction>& instr = block->instructions[cursor.source_idx];405for (const Definition& def : instr->definitions) {406if (def.isTemp())407depends_on[def.tempId()] = true;408}409for (const Operand& op : instr->operands) {410if (op.isTemp())411RAR_dependencies[op.tempId()] = true;412}413cursor.total_demand.update(register_demand[cursor.source_idx]);414}415416cursor.source_idx++;417418cursor.verify_invariants(register_demand);419}420421bool422is_gs_or_done_sendmsg(const Instruction* instr)423{424if (instr->opcode == aco_opcode::s_sendmsg) {425uint16_t imm = instr->sopp().imm;426return (imm & sendmsg_id_mask) == _sendmsg_gs || (imm & sendmsg_id_mask) == _sendmsg_gs_done;427}428return false;429}430431bool432is_done_sendmsg(const Instruction* instr)433{434if (instr->opcode == aco_opcode::s_sendmsg)435return (instr->sopp().imm & sendmsg_id_mask) == _sendmsg_gs_done;436return false;437}438439memory_sync_info440get_sync_info_with_hack(const Instruction* instr)441{442memory_sync_info sync = get_sync_info(instr);443if (instr->isSMEM() && !instr->operands.empty() && instr->operands[0].bytes() == 16) {444// FIXME: currently, it doesn't seem beneficial to omit this due to how our scheduler works445sync.storage = (storage_class)(sync.storage | storage_buffer);446sync.semantics =447(memory_semantics)((sync.semantics | semantic_private) & ~semantic_can_reorder);448}449return sync;450}451452struct memory_event_set {453bool has_control_barrier;454455unsigned bar_acquire;456unsigned bar_release;457unsigned bar_classes;458459unsigned access_acquire;460unsigned access_release;461unsigned access_relaxed;462unsigned access_atomic;463};464465struct hazard_query {466bool contains_spill;467bool contains_sendmsg;468bool uses_exec;469memory_event_set mem_events;470unsigned aliasing_storage; /* storage classes which are accessed (non-SMEM) */471unsigned aliasing_storage_smem; /* storage classes which are accessed (SMEM) */472};473474void475init_hazard_query(hazard_query* query)476{477query->contains_spill = false;478query->contains_sendmsg = false;479query->uses_exec = false;480memset(&query->mem_events, 0, sizeof(query->mem_events));481query->aliasing_storage = 0;482query->aliasing_storage_smem = 0;483}484485void486add_memory_event(memory_event_set* set, Instruction* instr, memory_sync_info* sync)487{488set->has_control_barrier |= is_done_sendmsg(instr);489if (instr->opcode == aco_opcode::p_barrier) {490Pseudo_barrier_instruction& bar = instr->barrier();491if (bar.sync.semantics & semantic_acquire)492set->bar_acquire |= bar.sync.storage;493if (bar.sync.semantics & semantic_release)494set->bar_release |= bar.sync.storage;495set->bar_classes |= bar.sync.storage;496497set->has_control_barrier |= bar.exec_scope > scope_invocation;498}499500if (!sync->storage)501return;502503if (sync->semantics & semantic_acquire)504set->access_acquire |= sync->storage;505if (sync->semantics & semantic_release)506set->access_release |= sync->storage;507508if (!(sync->semantics & semantic_private)) {509if (sync->semantics & semantic_atomic)510set->access_atomic |= sync->storage;511else512set->access_relaxed |= sync->storage;513}514}515516void517add_to_hazard_query(hazard_query* query, Instruction* instr)518{519if (instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload)520query->contains_spill = true;521query->contains_sendmsg |= instr->opcode == aco_opcode::s_sendmsg;522query->uses_exec |= needs_exec_mask(instr);523524memory_sync_info sync = get_sync_info_with_hack(instr);525526add_memory_event(&query->mem_events, instr, &sync);527528if (!(sync.semantics & semantic_can_reorder)) {529unsigned storage = sync.storage;530/* images and buffer/global memory can alias */ // TODO: more precisely, buffer images and531// buffer/global memory can alias532if (storage & (storage_buffer | storage_image))533storage |= storage_buffer | storage_image;534if (instr->isSMEM())535query->aliasing_storage_smem |= storage;536else537query->aliasing_storage |= storage;538}539}540541enum HazardResult {542hazard_success,543hazard_fail_reorder_vmem_smem,544hazard_fail_reorder_ds,545hazard_fail_reorder_sendmsg,546hazard_fail_spill,547hazard_fail_export,548hazard_fail_barrier,549/* Must stop at these failures. The hazard query code doesn't consider them550* when added. */551hazard_fail_exec,552hazard_fail_unreorderable,553};554555HazardResult556perform_hazard_query(hazard_query* query, Instruction* instr, bool upwards)557{558/* don't schedule discards downwards */559if (!upwards && instr->opcode == aco_opcode::p_exit_early_if)560return hazard_fail_unreorderable;561562if (query->uses_exec) {563for (const Definition& def : instr->definitions) {564if (def.isFixed() && def.physReg() == exec)565return hazard_fail_exec;566}567}568569/* don't move exports so that they stay closer together */570if (instr->isEXP())571return hazard_fail_export;572573/* don't move non-reorderable instructions */574if (instr->opcode == aco_opcode::s_memtime || instr->opcode == aco_opcode::s_memrealtime ||575instr->opcode == aco_opcode::s_setprio || instr->opcode == aco_opcode::s_getreg_b32)576return hazard_fail_unreorderable;577578memory_event_set instr_set;579memset(&instr_set, 0, sizeof(instr_set));580memory_sync_info sync = get_sync_info_with_hack(instr);581add_memory_event(&instr_set, instr, &sync);582583memory_event_set* first = &instr_set;584memory_event_set* second = &query->mem_events;585if (upwards)586std::swap(first, second);587588/* everything after barrier(acquire) happens after the atomics/control_barriers before589* everything after load(acquire) happens after the load590*/591if ((first->has_control_barrier || first->access_atomic) && second->bar_acquire)592return hazard_fail_barrier;593if (((first->access_acquire || first->bar_acquire) && second->bar_classes) ||594((first->access_acquire | first->bar_acquire) &595(second->access_relaxed | second->access_atomic)))596return hazard_fail_barrier;597598/* everything before barrier(release) happens before the atomics/control_barriers after *599* everything before store(release) happens before the store600*/601if (first->bar_release && (second->has_control_barrier || second->access_atomic))602return hazard_fail_barrier;603if ((first->bar_classes && (second->bar_release || second->access_release)) ||604((first->access_relaxed | first->access_atomic) &605(second->bar_release | second->access_release)))606return hazard_fail_barrier;607608/* don't move memory barriers around other memory barriers */609if (first->bar_classes && second->bar_classes)610return hazard_fail_barrier;611612/* Don't move memory accesses to before control barriers. I don't think613* this is necessary for the Vulkan memory model, but it might be for GLSL450. */614unsigned control_classes =615storage_buffer | storage_atomic_counter | storage_image | storage_shared;616if (first->has_control_barrier &&617((second->access_atomic | second->access_relaxed) & control_classes))618return hazard_fail_barrier;619620/* don't move memory loads/stores past potentially aliasing loads/stores */621unsigned aliasing_storage =622instr->isSMEM() ? query->aliasing_storage_smem : query->aliasing_storage;623if ((sync.storage & aliasing_storage) && !(sync.semantics & semantic_can_reorder)) {624unsigned intersect = sync.storage & aliasing_storage;625if (intersect & storage_shared)626return hazard_fail_reorder_ds;627return hazard_fail_reorder_vmem_smem;628}629630if ((instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload) &&631query->contains_spill)632return hazard_fail_spill;633634if (instr->opcode == aco_opcode::s_sendmsg && query->contains_sendmsg)635return hazard_fail_reorder_sendmsg;636637return hazard_success;638}639640void641schedule_SMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& register_demand,642Instruction* current, int idx)643{644assert(idx != 0);645int window_size = SMEM_WINDOW_SIZE;646int max_moves = SMEM_MAX_MOVES;647int16_t k = 0;648649/* don't move s_memtime/s_memrealtime */650if (current->opcode == aco_opcode::s_memtime || current->opcode == aco_opcode::s_memrealtime)651return;652653/* first, check if we have instructions before current to move down */654hazard_query hq;655init_hazard_query(&hq);656add_to_hazard_query(&hq, current);657658DownwardsCursor cursor = ctx.mv.downwards_init(idx, false, false);659660for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;661candidate_idx--) {662assert(candidate_idx >= 0);663assert(candidate_idx == cursor.source_idx);664aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];665666/* break if we'd make the previous SMEM instruction stall */667bool can_stall_prev_smem =668idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;669if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)670break;671672/* break when encountering another MEM instruction, logical_start or barriers */673if (candidate->opcode == aco_opcode::p_logical_start)674break;675/* only move VMEM instructions below descriptor loads. be more aggressive at higher num_waves676* to help create more vmem clauses */677if (candidate->isVMEM() && (cursor.insert_idx - cursor.source_idx > (ctx.num_waves * 4) ||678current->operands[0].size() == 4))679break;680/* don't move descriptor loads below buffer loads */681if (candidate->format == Format::SMEM && current->operands[0].size() == 4 &&682candidate->operands[0].size() == 2)683break;684685bool can_move_down = true;686687HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);688if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||689haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||690haz == hazard_fail_export)691can_move_down = false;692else if (haz != hazard_success)693break;694695/* don't use LDS/GDS instructions to hide latency since it can696* significanly worsen LDS scheduling */697if (candidate->isDS() || !can_move_down) {698add_to_hazard_query(&hq, candidate.get());699ctx.mv.downwards_skip(cursor);700continue;701}702703MoveResult res = ctx.mv.downwards_move(cursor, false);704if (res == move_fail_ssa || res == move_fail_rar) {705add_to_hazard_query(&hq, candidate.get());706ctx.mv.downwards_skip(cursor);707continue;708} else if (res == move_fail_pressure) {709break;710}711712if (candidate_idx < ctx.last_SMEM_dep_idx)713ctx.last_SMEM_stall++;714k++;715}716717/* find the first instruction depending on current or find another MEM */718UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, false);719720bool found_dependency = false;721/* second, check if we have instructions after current to move up */722for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int)idx + window_size;723candidate_idx++) {724assert(candidate_idx == up_cursor.source_idx);725assert(candidate_idx < (int)block->instructions.size());726aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];727728if (candidate->opcode == aco_opcode::p_logical_end)729break;730731/* check if candidate depends on current */732bool is_dependency = !found_dependency && !ctx.mv.upwards_check_deps(up_cursor);733/* no need to steal from following VMEM instructions */734if (is_dependency && candidate->isVMEM())735break;736737if (found_dependency) {738HazardResult haz = perform_hazard_query(&hq, candidate.get(), true);739if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||740haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||741haz == hazard_fail_export)742is_dependency = true;743else if (haz != hazard_success)744break;745}746747if (is_dependency) {748if (!found_dependency) {749ctx.mv.upwards_update_insert_idx(up_cursor);750init_hazard_query(&hq);751found_dependency = true;752}753}754755if (is_dependency || !found_dependency) {756if (found_dependency)757add_to_hazard_query(&hq, candidate.get());758else759k++;760ctx.mv.upwards_skip(up_cursor);761continue;762}763764MoveResult res = ctx.mv.upwards_move(up_cursor);765if (res == move_fail_ssa || res == move_fail_rar) {766/* no need to steal from following VMEM instructions */767if (res == move_fail_ssa && candidate->isVMEM())768break;769add_to_hazard_query(&hq, candidate.get());770ctx.mv.upwards_skip(up_cursor);771continue;772} else if (res == move_fail_pressure) {773break;774}775k++;776}777778ctx.last_SMEM_dep_idx = found_dependency ? up_cursor.insert_idx : 0;779ctx.last_SMEM_stall = 10 - ctx.num_waves - k;780}781782void783schedule_VMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& register_demand,784Instruction* current, int idx)785{786assert(idx != 0);787int window_size = VMEM_WINDOW_SIZE;788int max_moves = VMEM_MAX_MOVES;789int clause_max_grab_dist = VMEM_CLAUSE_MAX_GRAB_DIST;790int16_t k = 0;791792/* first, check if we have instructions before current to move down */793hazard_query indep_hq;794hazard_query clause_hq;795init_hazard_query(&indep_hq);796init_hazard_query(&clause_hq);797add_to_hazard_query(&indep_hq, current);798799DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, true);800801for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;802candidate_idx--) {803assert(candidate_idx == cursor.source_idx);804assert(candidate_idx >= 0);805aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];806bool is_vmem = candidate->isVMEM() || candidate->isFlatLike();807808/* break when encountering another VMEM instruction, logical_start or barriers */809if (candidate->opcode == aco_opcode::p_logical_start)810break;811812/* break if we'd make the previous SMEM instruction stall */813bool can_stall_prev_smem =814idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;815if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)816break;817818bool part_of_clause = false;819if (current->isVMEM() == candidate->isVMEM()) {820int grab_dist = cursor.insert_idx_clause - candidate_idx;821/* We can't easily tell how much this will decrease the def-to-use822* distances, so just use how far it will be moved as a heuristic. */823part_of_clause =824grab_dist < clause_max_grab_dist && should_form_clause(current, candidate.get());825}826827/* if current depends on candidate, add additional dependencies and continue */828bool can_move_down = !is_vmem || part_of_clause;829830HazardResult haz =831perform_hazard_query(part_of_clause ? &clause_hq : &indep_hq, candidate.get(), false);832if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||833haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||834haz == hazard_fail_export)835can_move_down = false;836else if (haz != hazard_success)837break;838839if (!can_move_down) {840add_to_hazard_query(&indep_hq, candidate.get());841add_to_hazard_query(&clause_hq, candidate.get());842ctx.mv.downwards_skip(cursor);843continue;844}845846Instruction* candidate_ptr = candidate.get();847MoveResult res = ctx.mv.downwards_move(cursor, part_of_clause);848if (res == move_fail_ssa || res == move_fail_rar) {849add_to_hazard_query(&indep_hq, candidate.get());850add_to_hazard_query(&clause_hq, candidate.get());851ctx.mv.downwards_skip(cursor);852continue;853} else if (res == move_fail_pressure) {854break;855}856if (part_of_clause)857add_to_hazard_query(&indep_hq, candidate_ptr);858else859k++;860if (candidate_idx < ctx.last_SMEM_dep_idx)861ctx.last_SMEM_stall++;862}863864/* find the first instruction depending on current or find another VMEM */865UpwardsCursor up_cursor = ctx.mv.upwards_init(idx + 1, true);866867bool found_dependency = false;868/* second, check if we have instructions after current to move up */869for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int)idx + window_size;870candidate_idx++) {871assert(candidate_idx == up_cursor.source_idx);872assert(candidate_idx < (int)block->instructions.size());873aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];874bool is_vmem = candidate->isVMEM() || candidate->isFlatLike();875876if (candidate->opcode == aco_opcode::p_logical_end)877break;878879/* check if candidate depends on current */880bool is_dependency = false;881if (found_dependency) {882HazardResult haz = perform_hazard_query(&indep_hq, candidate.get(), true);883if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||884haz == hazard_fail_reorder_vmem_smem || haz == hazard_fail_reorder_sendmsg ||885haz == hazard_fail_barrier || haz == hazard_fail_export)886is_dependency = true;887else if (haz != hazard_success)888break;889}890891is_dependency |= !found_dependency && !ctx.mv.upwards_check_deps(up_cursor);892if (is_dependency) {893if (!found_dependency) {894ctx.mv.upwards_update_insert_idx(up_cursor);895init_hazard_query(&indep_hq);896found_dependency = true;897}898} else if (is_vmem) {899/* don't move up dependencies of other VMEM instructions */900for (const Definition& def : candidate->definitions) {901if (def.isTemp())902ctx.mv.depends_on[def.tempId()] = true;903}904}905906if (is_dependency || !found_dependency) {907if (found_dependency)908add_to_hazard_query(&indep_hq, candidate.get());909else910k++;911ctx.mv.upwards_skip(up_cursor);912continue;913}914915MoveResult res = ctx.mv.upwards_move(up_cursor);916if (res == move_fail_ssa || res == move_fail_rar) {917add_to_hazard_query(&indep_hq, candidate.get());918ctx.mv.upwards_skip(up_cursor);919continue;920} else if (res == move_fail_pressure) {921break;922}923k++;924}925}926927void928schedule_position_export(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& register_demand,929Instruction* current, int idx)930{931assert(idx != 0);932int window_size = POS_EXP_WINDOW_SIZE / ctx.schedule_pos_export_div;933int max_moves = POS_EXP_MAX_MOVES / ctx.schedule_pos_export_div;934int16_t k = 0;935936DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, false);937938hazard_query hq;939init_hazard_query(&hq);940add_to_hazard_query(&hq, current);941942for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;943candidate_idx--) {944assert(candidate_idx >= 0);945aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];946947if (candidate->opcode == aco_opcode::p_logical_start)948break;949if (candidate->isVMEM() || candidate->isSMEM() || candidate->isFlatLike())950break;951952HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);953if (haz == hazard_fail_exec || haz == hazard_fail_unreorderable)954break;955956if (haz != hazard_success) {957add_to_hazard_query(&hq, candidate.get());958ctx.mv.downwards_skip(cursor);959continue;960}961962MoveResult res = ctx.mv.downwards_move(cursor, false);963if (res == move_fail_ssa || res == move_fail_rar) {964add_to_hazard_query(&hq, candidate.get());965ctx.mv.downwards_skip(cursor);966continue;967} else if (res == move_fail_pressure) {968break;969}970k++;971}972}973974void975schedule_block(sched_ctx& ctx, Program* program, Block* block, live& live_vars)976{977ctx.last_SMEM_dep_idx = 0;978ctx.last_SMEM_stall = INT16_MIN;979ctx.mv.block = block;980ctx.mv.register_demand = live_vars.register_demand[block->index].data();981982/* go through all instructions and find memory loads */983for (unsigned idx = 0; idx < block->instructions.size(); idx++) {984Instruction* current = block->instructions[idx].get();985986if (block->kind & block_kind_export_end && current->isEXP() && ctx.schedule_pos_exports) {987unsigned target = current->exp().dest;988if (target >= V_008DFC_SQ_EXP_POS && target < V_008DFC_SQ_EXP_PRIM) {989ctx.mv.current = current;990schedule_position_export(ctx, block, live_vars.register_demand[block->index], current,991idx);992}993}994995if (current->definitions.empty())996continue;997998if (current->isVMEM() || current->isFlatLike()) {999ctx.mv.current = current;1000schedule_VMEM(ctx, block, live_vars.register_demand[block->index], current, idx);1001}10021003if (current->isSMEM()) {1004ctx.mv.current = current;1005schedule_SMEM(ctx, block, live_vars.register_demand[block->index], current, idx);1006}1007}10081009/* resummarize the block's register demand */1010block->register_demand = RegisterDemand();1011for (unsigned idx = 0; idx < block->instructions.size(); idx++) {1012block->register_demand.update(live_vars.register_demand[block->index][idx]);1013}1014}10151016void1017schedule_program(Program* program, live& live_vars)1018{1019/* don't use program->max_reg_demand because that is affected by max_waves_per_simd */1020RegisterDemand demand;1021for (Block& block : program->blocks)1022demand.update(block.register_demand);1023demand.vgpr += program->config->num_shared_vgprs / 2;10241025sched_ctx ctx;1026ctx.mv.depends_on.resize(program->peekAllocationId());1027ctx.mv.RAR_dependencies.resize(program->peekAllocationId());1028ctx.mv.RAR_dependencies_clause.resize(program->peekAllocationId());1029/* Allowing the scheduler to reduce the number of waves to as low as 51030* improves performance of Thrones of Britannia significantly and doesn't1031* seem to hurt anything else. */1032// TODO: account for possible uneven num_waves on GFX10+1033unsigned wave_fac = program->dev.physical_vgprs / 256;1034if (program->num_waves <= 5 * wave_fac)1035ctx.num_waves = program->num_waves;1036else if (demand.vgpr >= 29)1037ctx.num_waves = 5 * wave_fac;1038else if (demand.vgpr >= 25)1039ctx.num_waves = 6 * wave_fac;1040else1041ctx.num_waves = 7 * wave_fac;1042ctx.num_waves = std::max<uint16_t>(ctx.num_waves, program->min_waves);1043ctx.num_waves = std::min<uint16_t>(ctx.num_waves, program->num_waves);10441045/* VMEM_MAX_MOVES and such assume pre-GFX10 wave count */1046ctx.num_waves = std::max<uint16_t>(ctx.num_waves / wave_fac, 1);10471048assert(ctx.num_waves > 0);1049ctx.mv.max_registers = {int16_t(get_addr_vgpr_from_waves(program, ctx.num_waves * wave_fac) - 2),1050int16_t(get_addr_sgpr_from_waves(program, ctx.num_waves * wave_fac))};10511052/* NGG culling shaders are very sensitive to position export scheduling.1053* Schedule less aggressively when early primitive export is used, and1054* keep the position export at the very bottom when late primitive export is used.1055*/1056if (program->info->has_ngg_culling && program->stage.num_sw_stages() == 1) {1057if (!program->info->has_ngg_early_prim_export)1058ctx.schedule_pos_exports = false;1059else1060ctx.schedule_pos_export_div = 4;1061}10621063for (Block& block : program->blocks)1064schedule_block(ctx, program, &block, live_vars);10651066/* update max_reg_demand and num_waves */1067RegisterDemand new_demand;1068for (Block& block : program->blocks) {1069new_demand.update(block.register_demand);1070}1071update_vgpr_sgpr_demand(program, new_demand);10721073/* if enabled, this code asserts that register_demand is updated correctly */1074#if 01075int prev_num_waves = program->num_waves;1076const RegisterDemand prev_max_demand = program->max_reg_demand;10771078std::vector<RegisterDemand> demands(program->blocks.size());1079for (unsigned j = 0; j < program->blocks.size(); j++) {1080demands[j] = program->blocks[j].register_demand;1081}10821083live live_vars2 = aco::live_var_analysis(program);10841085for (unsigned j = 0; j < program->blocks.size(); j++) {1086Block &b = program->blocks[j];1087for (unsigned i = 0; i < b.instructions.size(); i++)1088assert(live_vars.register_demand[b.index][i] == live_vars2.register_demand[b.index][i]);1089assert(b.register_demand == demands[j]);1090}10911092assert(program->max_reg_demand == prev_max_demand);1093assert(program->num_waves == prev_num_waves);1094#endif1095}10961097} // namespace aco109810991100