CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
Path: blob/master/Core/MIPS/IR/IRRegCache.cpp
Views: 1401
// Copyright (c) 2023- PPSSPP Project.12// This program is free software: you can redistribute it and/or modify3// it under the terms of the GNU General Public License as published by4// the Free Software Foundation, version 2.0 or later versions.56// This program is distributed in the hope that it will be useful,7// but WITHOUT ANY WARRANTY; without even the implied warranty of8// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the9// GNU General Public License 2.0 for more details.1011// A copy of the GPL 2.0 should have been included with the program.12// If not, see http://www.gnu.org/licenses/1314// Official git repository and contact information can be found at15// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.1617#ifndef offsetof18#include <cstddef>19#endif2021#include <cstring>22#include "Common/Log.h"23#include "Common/LogReporting.h"24#include "Core/MemMap.h"25#include "Core/MIPS/IR/IRAnalysis.h"26#include "Core/MIPS/IR/IRRegCache.h"27#include "Core/MIPS/IR/IRInst.h"28#include "Core/MIPS/IR/IRJit.h"29#include "Core/MIPS/JitCommon/JitState.h"3031void IRImmRegCache::Flush(IRReg rd) {32if (isImm_[rd]) {33if (rd == 0) {34return;35}36_assert_((rd > 0 && rd < 32) || (rd >= IRTEMP_0 && rd < IRREG_VFPU_CTRL_BASE));37ir_->WriteSetConstant(rd, immVal_[rd]);38isImm_[rd] = false;39}40}4142void IRImmRegCache::Discard(IRReg rd) {43if (rd == 0) {44return;45}46isImm_[rd] = false;47}4849IRImmRegCache::IRImmRegCache(IRWriter *ir) : ir_(ir) {50memset(&isImm_, 0, sizeof(isImm_));51memset(&immVal_, 0, sizeof(immVal_));52isImm_[0] = true;53ir_ = ir;54}5556void IRImmRegCache::FlushAll() {57for (int i = 1; i < TOTAL_MAPPABLE_IRREGS; ) {58if (isImm_[i]) {59Flush(i);60}6162// Most of the time, lots are not. This speeds it up a lot.63bool *next = (bool *)memchr(&isImm_[i], 1, TOTAL_MAPPABLE_IRREGS - i);64if (!next)65break;66i = (int)(next - &isImm_[0]);67}68}6970void IRImmRegCache::MapIn(IRReg rd) {71Flush(rd);72}7374void IRImmRegCache::MapDirty(IRReg rd) {75Discard(rd);76}7778void IRImmRegCache::MapInIn(IRReg rs, IRReg rt) {79Flush(rs);80Flush(rt);81}8283void IRImmRegCache::MapInInIn(IRReg rd, IRReg rs, IRReg rt) {84Flush(rd);85Flush(rs);86Flush(rt);87}8889void IRImmRegCache::MapDirtyIn(IRReg rd, IRReg rs) {90if (rs != rd) {91Discard(rd);92}93Flush(rs);94}9596void IRImmRegCache::MapDirtyInIn(IRReg rd, IRReg rs, IRReg rt) {97if (rs != rd && rt != rd) {98Discard(rd);99}100Flush(rs);101Flush(rt);102}103104IRNativeRegCacheBase::IRNativeRegCacheBase(MIPSComp::JitOptions *jo)105: jo_(jo) {}106107void IRNativeRegCacheBase::Start(MIPSComp::IRBlockCache *irBlockCache, int blockNum) {108if (!initialReady_) {109SetupInitialRegs();110initialReady_ = true;111}112113memcpy(nr, nrInitial_, sizeof(nr[0]) * config_.totalNativeRegs);114memcpy(mr, mrInitial_, sizeof(mr));115116irBlock_ = irBlockCache->GetBlock(blockNum);117118int numStatics;119const StaticAllocation *statics = GetStaticAllocations(numStatics);120for (int i = 0; i < numStatics; i++) {121nr[statics[i].nr].mipsReg = statics[i].mr;122nr[statics[i].nr].pointerified = statics[i].pointerified && jo_->enablePointerify;123nr[statics[i].nr].normalized32 = statics[i].normalized32;124mr[statics[i].mr].loc = statics[i].loc;125mr[statics[i].mr].nReg = statics[i].nr;126mr[statics[i].mr].isStatic = true;127// Lock it until the very end.128mr[statics[i].mr].spillLockIRIndex = irBlock_->GetNumIRInstructions();129}130131irBlockNum_ = blockNum;132irBlockCache_ = irBlockCache;133irIndex_ = 0;134}135136void IRNativeRegCacheBase::SetupInitialRegs() {137_assert_msg_(config_.totalNativeRegs > 0, "totalNativeRegs was never set by backend");138139// Everything else is initialized in the struct.140mrInitial_[MIPS_REG_ZERO].loc = MIPSLoc::IMM;141mrInitial_[MIPS_REG_ZERO].imm = 0;142}143144bool IRNativeRegCacheBase::IsGPRInRAM(IRReg gpr) {145_dbg_assert_(IsValidGPR(gpr));146return mr[gpr].loc == MIPSLoc::MEM;147}148149bool IRNativeRegCacheBase::IsFPRInRAM(IRReg fpr) {150_dbg_assert_(IsValidFPR(fpr));151return mr[fpr + 32].loc == MIPSLoc::MEM;152}153154bool IRNativeRegCacheBase::IsGPRMapped(IRReg gpr) {155_dbg_assert_(IsValidGPR(gpr));156return mr[gpr].loc == MIPSLoc::REG || mr[gpr].loc == MIPSLoc::REG_IMM;157}158159bool IRNativeRegCacheBase::IsFPRMapped(IRReg fpr) {160_dbg_assert_(IsValidFPR(fpr));161return mr[fpr + 32].loc == MIPSLoc::FREG || mr[fpr + 32].loc == MIPSLoc::VREG;162}163164int IRNativeRegCacheBase::GetFPRLaneCount(IRReg fpr) {165if (!IsFPRMapped(fpr))166return 0;167if (mr[fpr + 32].lane == -1)168return 1;169170IRReg base = fpr + 32 - mr[fpr + 32].lane;171int c = 1;172for (int i = 1; i < 4; ++i) {173if (mr[base + i].nReg != mr[base].nReg || mr[base + i].loc != mr[base].loc)174return c;175if (mr[base + i].lane != i)176return c;177178c++;179}180181return c;182}183184int IRNativeRegCacheBase::GetFPRLane(IRReg fpr) {185_dbg_assert_(IsValidFPR(fpr));186if (mr[fpr + 32].loc == MIPSLoc::FREG || mr[fpr + 32].loc == MIPSLoc::VREG) {187int l = mr[fpr + 32].lane;188return l == -1 ? 0 : l;189}190return -1;191}192193bool IRNativeRegCacheBase::IsGPRMappedAsPointer(IRReg gpr) {194_dbg_assert_(IsValidGPR(gpr));195if (mr[gpr].loc == MIPSLoc::REG) {196return nr[mr[gpr].nReg].pointerified;197} else if (mr[gpr].loc == MIPSLoc::REG_IMM) {198_assert_msg_(!nr[mr[gpr].nReg].pointerified, "Really shouldn't be pointerified here");199} else if (mr[gpr].loc == MIPSLoc::REG_AS_PTR) {200return true;201}202return false;203}204205bool IRNativeRegCacheBase::IsGPRMappedAsStaticPointer(IRReg gpr) {206if (IsGPRMappedAsPointer(gpr)) {207return mr[gpr].isStatic;208}209return false;210}211212bool IRNativeRegCacheBase::IsGPRImm(IRReg gpr) {213_dbg_assert_(IsValidGPR(gpr));214if (gpr == MIPS_REG_ZERO)215return true;216return mr[gpr].loc == MIPSLoc::IMM || mr[gpr].loc == MIPSLoc::REG_IMM;217}218219bool IRNativeRegCacheBase::IsGPR2Imm(IRReg base) {220return IsGPRImm(base) && IsGPRImm(base + 1);221}222223uint32_t IRNativeRegCacheBase::GetGPRImm(IRReg gpr) {224_dbg_assert_(IsValidGPR(gpr));225if (gpr == MIPS_REG_ZERO)226return 0;227if (mr[gpr].loc != MIPSLoc::IMM && mr[gpr].loc != MIPSLoc::REG_IMM) {228_assert_msg_(mr[gpr].loc == MIPSLoc::IMM || mr[gpr].loc == MIPSLoc::REG_IMM, "GPR %d not in an imm", gpr);229}230return mr[gpr].imm;231}232233uint64_t IRNativeRegCacheBase::GetGPR2Imm(IRReg base) {234return (uint64_t)GetGPRImm(base) | ((uint64_t)GetGPRImm(base + 1) << 32);235}236237void IRNativeRegCacheBase::SetGPRImm(IRReg gpr, uint32_t immVal) {238_dbg_assert_(IsValidGPR(gpr));239if (gpr == MIPS_REG_ZERO && immVal != 0) {240ERROR_LOG_REPORT(Log::JIT, "Trying to set immediate %08x to r0", immVal);241return;242}243244if (mr[gpr].loc == MIPSLoc::REG_IMM && mr[gpr].imm == immVal) {245// Already have that value, let's keep it in the reg.246return;247}248249if (mr[gpr].nReg != -1) {250// Zap existing value if cached in a reg.251_assert_msg_(mr[gpr].lane == -1, "Should not be a multilane reg");252DiscardNativeReg(mr[gpr].nReg);253}254255mr[gpr].loc = MIPSLoc::IMM;256mr[gpr].imm = immVal;257}258259void IRNativeRegCacheBase::SetGPR2Imm(IRReg base, uint64_t immVal) {260_dbg_assert_(IsValidGPRNoZero(base));261uint32_t imm0 = (uint32_t)(immVal & 0xFFFFFFFF);262uint32_t imm1 = (uint32_t)(immVal >> 32);263264if (IsGPRImm(base) && IsGPRImm(base + 1) && GetGPRImm(base) == imm0 && GetGPRImm(base + 1) == imm1) {265// Already set to this, don't bother.266return;267}268269if (mr[base].nReg != -1) {270// Zap existing value if cached in a reg.271DiscardNativeReg(mr[base].nReg);272if (mr[base + 1].nReg != -1)273DiscardNativeReg(mr[base + 1].nReg);274}275276mr[base].loc = MIPSLoc::IMM;277mr[base].imm = imm0;278mr[base + 1].loc = MIPSLoc::IMM;279mr[base + 1].imm = imm1;280}281282void IRNativeRegCacheBase::SpillLockGPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {283_dbg_assert_(IsValidGPR(r1));284_dbg_assert_(r2 == IRREG_INVALID || IsValidGPR(r2));285_dbg_assert_(r3 == IRREG_INVALID || IsValidGPR(r3));286_dbg_assert_(r4 == IRREG_INVALID || IsValidGPR(r4));287SetSpillLockIRIndex(r1, r2, r3, r4, 0, irIndex_);288}289290void IRNativeRegCacheBase::SpillLockFPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {291_dbg_assert_(IsValidFPR(r1));292_dbg_assert_(r2 == IRREG_INVALID || IsValidFPR(r2));293_dbg_assert_(r3 == IRREG_INVALID || IsValidFPR(r3));294_dbg_assert_(r4 == IRREG_INVALID || IsValidFPR(r4));295SetSpillLockIRIndex(r1, r2, r3, r4, 32, irIndex_);296}297298void IRNativeRegCacheBase::ReleaseSpillLockGPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {299_dbg_assert_(IsValidGPR(r1));300_dbg_assert_(r2 == IRREG_INVALID || IsValidGPR(r2));301_dbg_assert_(r3 == IRREG_INVALID || IsValidGPR(r3));302_dbg_assert_(r4 == IRREG_INVALID || IsValidGPR(r4));303SetSpillLockIRIndex(r1, r2, r3, r4, 0, -1);304}305306void IRNativeRegCacheBase::ReleaseSpillLockFPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {307_dbg_assert_(IsValidFPR(r1));308_dbg_assert_(r2 == IRREG_INVALID || IsValidFPR(r2));309_dbg_assert_(r3 == IRREG_INVALID || IsValidFPR(r3));310_dbg_assert_(r4 == IRREG_INVALID || IsValidFPR(r4));311SetSpillLockIRIndex(r1, r2, r3, r4, 32, -1);312}313314void IRNativeRegCacheBase::SetSpillLockIRIndex(IRReg r1, IRReg r2, IRReg r3, IRReg r4, int offset, int index) {315if (!mr[r1 + offset].isStatic)316mr[r1 + offset].spillLockIRIndex = index;317if (r2 != IRREG_INVALID && !mr[r2 + offset].isStatic)318mr[r2 + offset].spillLockIRIndex = index;319if (r3 != IRREG_INVALID && !mr[r3 + offset].isStatic)320mr[r3 + offset].spillLockIRIndex = index;321if (r4 != IRREG_INVALID && !mr[r4 + offset].isStatic)322mr[r4 + offset].spillLockIRIndex = index;323}324325void IRNativeRegCacheBase::SetSpillLockIRIndex(IRReg r1, int index) {326if (!mr[r1].isStatic)327mr[r1].spillLockIRIndex = index;328}329330void IRNativeRegCacheBase::MarkGPRDirty(IRReg gpr, bool andNormalized32) {331_assert_(IsGPRMapped(gpr));332if (!IsGPRMapped(gpr))333return;334335IRNativeReg nreg = mr[gpr].nReg;336nr[nreg].isDirty = true;337nr[nreg].normalized32 = andNormalized32;338// If reg is written to, pointerification is assumed lost.339nr[nreg].pointerified = false;340if (mr[gpr].loc == MIPSLoc::REG_AS_PTR || mr[gpr].loc == MIPSLoc::REG_IMM) {341mr[gpr].loc = MIPSLoc::REG;342mr[gpr].imm = -1;343}344_dbg_assert_(mr[gpr].loc == MIPSLoc::REG);345}346347void IRNativeRegCacheBase::MarkGPRAsPointerDirty(IRReg gpr) {348_assert_(IsGPRMappedAsPointer(gpr));349if (!IsGPRMappedAsPointer(gpr))350return;351352#ifdef MASKED_PSP_MEMORY353if (mr[gpr].loc == MIPSLoc::REG_AS_PTR) {354_assert_msg_(false, "MarkGPRAsPointerDirty is not possible when using MASKED_PSP_MEMORY");355}356#endif357358IRNativeReg nreg = mr[gpr].nReg;359_dbg_assert_(!nr[nreg].normalized32);360nr[nreg].isDirty = true;361// Stays pointerified or REG_AS_PTR.362}363364IRNativeReg IRNativeRegCacheBase::AllocateReg(MIPSLoc type, MIPSMap flags) {365_dbg_assert_(type == MIPSLoc::REG || type == MIPSLoc::FREG || type == MIPSLoc::VREG);366367IRNativeReg nreg = FindFreeReg(type, flags);368if (nreg != -1)369return nreg;370371// Still nothing. Let's spill a reg and goto 10.372bool clobbered;373IRNativeReg bestToSpill = FindBestToSpill(type, flags, true, &clobbered);374if (bestToSpill == -1) {375bestToSpill = FindBestToSpill(type, flags, false, &clobbered);376}377378if (bestToSpill != -1) {379if (clobbered) {380DiscardNativeReg(bestToSpill);381} else {382FlushNativeReg(bestToSpill);383}384// Now one must be free.385return FindFreeReg(type, flags);386}387388// Uh oh, we have all of them spilllocked....389ERROR_LOG_REPORT(Log::JIT, "Out of spillable registers in block PC %08x, index %d", irBlock_->GetOriginalStart(), irIndex_);390_assert_(bestToSpill != -1);391return -1;392}393394IRNativeReg IRNativeRegCacheBase::FindFreeReg(MIPSLoc type, MIPSMap flags) const {395int allocCount = 0, base = 0;396const int *allocOrder = GetAllocationOrder(type, flags, allocCount, base);397398for (int i = 0; i < allocCount; i++) {399IRNativeReg nreg = IRNativeReg(allocOrder[i] - base);400401if (nr[nreg].mipsReg == IRREG_INVALID && nr[nreg].tempLockIRIndex < irIndex_) {402return nreg;403}404}405406return -1;407}408409bool IRNativeRegCacheBase::IsGPRClobbered(IRReg gpr) const {410_dbg_assert_(IsValidGPR(gpr));411return IsRegClobbered(MIPSLoc::REG, gpr);412}413414bool IRNativeRegCacheBase::IsFPRClobbered(IRReg fpr) const {415_dbg_assert_(IsValidFPR(fpr));416return IsRegClobbered(MIPSLoc::FREG, fpr + 32);417}418419IRUsage IRNativeRegCacheBase::GetNextRegUsage(const IRSituation &info, MIPSLoc type, IRReg r) const {420if (type == MIPSLoc::REG)421return IRNextGPRUsage(r, info);422else if (type == MIPSLoc::FREG || type == MIPSLoc::VREG)423return IRNextFPRUsage(r - 32, info);424_assert_msg_(false, "Unknown spill allocation type");425return IRUsage::UNKNOWN;426}427428bool IRNativeRegCacheBase::IsRegClobbered(MIPSLoc type, IRReg r) const {429static const int UNUSED_LOOKAHEAD_OPS = 30;430431IRSituation info;432info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;433// We look starting one ahead, unlike spilling. We want to know if it clobbers later.434info.currentIndex = irIndex_ + 1;435info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);436info.numInstructions = irBlock_->GetNumIRInstructions();437438// Make sure we're on the first one if this is multi-lane.439IRReg first = r;440if (mr[r].lane != -1)441first -= mr[r].lane;442443IRUsage usage = GetNextRegUsage(info, type, first);444if (usage == IRUsage::CLOBBERED) {445// If multiple mips regs use this native reg (i.e. vector, HI/LO), check each.446bool canClobber = true;447for (IRReg m = first + 1; mr[m].nReg == mr[first].nReg && m < IRREG_INVALID && canClobber; ++m)448canClobber = GetNextRegUsage(info, type, m) == IRUsage::CLOBBERED;449450return canClobber;451}452return false;453}454455bool IRNativeRegCacheBase::IsRegRead(MIPSLoc type, IRReg first) const {456static const int UNUSED_LOOKAHEAD_OPS = 30;457458IRSituation info;459info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;460// We look starting one ahead, unlike spilling.461info.currentIndex = irIndex_ + 1;462info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);463info.numInstructions = irBlock_->GetNumIRInstructions();464465// Note: this intentionally doesn't look at the full reg, only the lane.466IRUsage usage = GetNextRegUsage(info, type, first);467return usage == IRUsage::READ;468}469470IRNativeReg IRNativeRegCacheBase::FindBestToSpill(MIPSLoc type, MIPSMap flags, bool unusedOnly, bool *clobbered) const {471int allocCount = 0, base = 0;472const int *allocOrder = GetAllocationOrder(type, flags, allocCount, base);473474static const int UNUSED_LOOKAHEAD_OPS = 30;475476IRSituation info;477info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;478info.currentIndex = irIndex_;479info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);480info.numInstructions = irBlock_->GetNumIRInstructions();481482*clobbered = false;483for (int i = 0; i < allocCount; i++) {484IRNativeReg nreg = IRNativeReg(allocOrder[i] - base);485if (nr[nreg].mipsReg != IRREG_INVALID && mr[nr[nreg].mipsReg].spillLockIRIndex >= irIndex_)486continue;487if (nr[nreg].tempLockIRIndex >= irIndex_)488continue;489490// As it's in alloc-order, we know it's not static so we don't need to check for that.491IRReg mipsReg = nr[nreg].mipsReg;492IRUsage usage = GetNextRegUsage(info, type, mipsReg);493494// Awesome, a clobbered reg. Let's use it?495if (usage == IRUsage::CLOBBERED) {496// If multiple mips regs use this native reg (i.e. vector, HI/LO), check each.497// Note: mipsReg points to the lowest numbered IRReg.498bool canClobber = true;499for (IRReg m = mipsReg + 1; mr[m].nReg == nreg && m < IRREG_INVALID && canClobber; ++m)500canClobber = GetNextRegUsage(info, type, m) == IRUsage::CLOBBERED;501502// Okay, if all can be clobbered, we're good to go.503if (canClobber) {504*clobbered = true;505return nreg;506}507}508509// Not awesome. A used reg. Let's try to avoid spilling.510if (!unusedOnly || usage == IRUsage::UNUSED) {511// TODO: Use age or something to choose which register to spill?512// TODO: Spill dirty regs first? or opposite?513*clobbered = mipsReg == MIPS_REG_ZERO;514return nreg;515}516}517518return -1;519}520521bool IRNativeRegCacheBase::IsNativeRegCompatible(IRNativeReg nreg, MIPSLoc type, MIPSMap flags, int lanes) {522int allocCount = 0, base = 0;523const int *allocOrder = GetAllocationOrder(type, flags, allocCount, base);524525for (int i = 0; i < allocCount; i++) {526IRNativeReg allocReg = IRNativeReg(allocOrder[i] - base);527if (allocReg == nreg)528return true;529}530531return false;532}533534bool IRNativeRegCacheBase::TransferNativeReg(IRNativeReg nreg, IRNativeReg dest, MIPSLoc type, IRReg first, int lanes, MIPSMap flags) {535// To be overridden if the backend supports transfers.536return false;537}538539void IRNativeRegCacheBase::DiscardNativeReg(IRNativeReg nreg) {540_assert_msg_(nreg >= 0 && nreg < config_.totalNativeRegs, "DiscardNativeReg on invalid register %d", nreg);541if (nr[nreg].mipsReg != IRREG_INVALID) {542int8_t lanes = 0;543for (IRReg m = nr[nreg].mipsReg; mr[m].nReg == nreg && m < IRREG_INVALID; ++m)544lanes++;545546if (mr[nr[nreg].mipsReg].isStatic) {547_assert_(nr[nreg].mipsReg != MIPS_REG_ZERO);548549int numStatics;550const StaticAllocation *statics = GetStaticAllocations(numStatics);551552// If it's not currently marked as in a reg, throw it away.553for (IRReg m = nr[nreg].mipsReg; m < nr[nreg].mipsReg + lanes; ++m) {554_assert_msg_(mr[m].isStatic, "Reg in lane %d mismatched static status", m - nr[nreg].mipsReg);555for (int i = 0; i < numStatics; i++) {556if (m == statics[i].mr)557mr[m].loc = statics[i].loc;558}559}560} else {561for (IRReg m = nr[nreg].mipsReg; m < nr[nreg].mipsReg + lanes; ++m) {562mr[m].loc = MIPSLoc::MEM;563mr[m].nReg = -1;564mr[m].imm = 0;565mr[m].lane = -1;566_assert_msg_(!mr[m].isStatic, "Reg in lane %d mismatched static status", m - nr[nreg].mipsReg);567}568569nr[nreg].mipsReg = IRREG_INVALID;570}571}572573// Even for a static reg, we assume this means it's not pointerified anymore.574nr[nreg].pointerified = false;575nr[nreg].isDirty = false;576nr[nreg].normalized32 = false;577}578579void IRNativeRegCacheBase::FlushNativeReg(IRNativeReg nreg) {580_assert_msg_(nreg >= 0 && nreg < config_.totalNativeRegs, "FlushNativeReg on invalid register %d", nreg);581if (nr[nreg].mipsReg == IRREG_INVALID || nr[nreg].mipsReg == MIPS_REG_ZERO) {582// Nothing to do, reg not mapped or mapped to fixed zero.583_dbg_assert_(!nr[nreg].isDirty);584return;585}586_dbg_assert_(!mr[nr[nreg].mipsReg].isStatic);587if (mr[nr[nreg].mipsReg].isStatic) {588ERROR_LOG(Log::JIT, "Cannot FlushNativeReg a statically mapped register");589return;590}591592// Multiple mipsRegs may match this if a vector or HI/LO, etc.593bool isDirty = nr[nreg].isDirty;594int8_t lanes = 0;595for (IRReg m = nr[nreg].mipsReg; mr[m].nReg == nreg && m < IRREG_INVALID; ++m) {596_assert_(!mr[m].isStatic);597// If we're flushing a native reg, better not be partially in mem or an imm.598_assert_(mr[m].loc != MIPSLoc::MEM && mr[m].loc != MIPSLoc::IMM);599lanes++;600}601602if (isDirty) {603IRReg first = nr[nreg].mipsReg;604if (mr[first].loc == MIPSLoc::REG_AS_PTR) {605// We assume this can't be multiple lanes. Maybe some gather craziness?606_assert_(lanes == 1);607AdjustNativeRegAsPtr(nreg, false);608mr[first].loc = MIPSLoc::REG;609}610StoreNativeReg(nreg, first, lanes);611}612613for (int8_t i = 0; i < lanes; ++i) {614auto &mreg = mr[nr[nreg].mipsReg + i];615mreg.nReg = -1;616// Note that it loses its imm status, because imms are always dirty.617mreg.loc = MIPSLoc::MEM;618mreg.imm = 0;619mreg.lane = -1;620}621622nr[nreg].mipsReg = IRREG_INVALID;623nr[nreg].isDirty = false;624nr[nreg].pointerified = false;625nr[nreg].normalized32 = false;626}627628void IRNativeRegCacheBase::DiscardReg(IRReg mreg) {629if (mr[mreg].isStatic) {630DiscardNativeReg(mr[mreg].nReg);631return;632}633switch (mr[mreg].loc) {634case MIPSLoc::IMM:635if (mreg != MIPS_REG_ZERO) {636mr[mreg].loc = MIPSLoc::MEM;637mr[mreg].imm = 0;638}639break;640641case MIPSLoc::REG:642case MIPSLoc::REG_AS_PTR:643case MIPSLoc::REG_IMM:644case MIPSLoc::FREG:645case MIPSLoc::VREG:646DiscardNativeReg(mr[mreg].nReg);647break;648649case MIPSLoc::MEM:650// Already discarded.651break;652}653mr[mreg].spillLockIRIndex = -1;654}655656void IRNativeRegCacheBase::FlushReg(IRReg mreg) {657_assert_msg_(!mr[mreg].isStatic, "Cannot flush static reg %d", mreg);658659switch (mr[mreg].loc) {660case MIPSLoc::IMM:661// IMM is always "dirty".662StoreRegValue(mreg, mr[mreg].imm);663mr[mreg].loc = MIPSLoc::MEM;664mr[mreg].nReg = -1;665mr[mreg].imm = 0;666break;667668case MIPSLoc::REG:669case MIPSLoc::REG_IMM:670case MIPSLoc::REG_AS_PTR:671case MIPSLoc::FREG:672case MIPSLoc::VREG:673// Might be in a native reg with multiple IR regs, flush together.674FlushNativeReg(mr[mreg].nReg);675break;676677case MIPSLoc::MEM:678// Already there, nothing to do.679break;680}681}682683void IRNativeRegCacheBase::FlushAll(bool gprs, bool fprs) {684// Note: make sure not to change the registers when flushing.685// Branching code may expect the native reg to retain its value.686687if (!mr[MIPS_REG_ZERO].isStatic && mr[MIPS_REG_ZERO].nReg != -1)688DiscardNativeReg(mr[MIPS_REG_ZERO].nReg);689690for (int i = 1; i < TOTAL_MAPPABLE_IRREGS; i++) {691IRReg mipsReg = (IRReg)i;692if (!fprs && i >= 32 && IsValidFPR(mipsReg - 32))693continue;694if (!gprs && IsValidGPR(mipsReg))695continue;696697if (mr[i].isStatic) {698IRNativeReg nreg = mr[i].nReg;699// Cannot leave any IMMs in registers, not even MIPSLoc::REG_IMM.700// Can confuse the regalloc later if this flush is mid-block701// due to an interpreter fallback that changes the register.702if (mr[i].loc == MIPSLoc::IMM) {703SetNativeRegValue(mr[i].nReg, mr[i].imm);704_assert_(IsValidGPR(mipsReg));705mr[i].loc = MIPSLoc::REG;706nr[nreg].pointerified = false;707} else if (mr[i].loc == MIPSLoc::REG_IMM) {708// The register already contains the immediate.709if (nr[nreg].pointerified) {710ERROR_LOG(Log::JIT, "RVREG_IMM but pointerified. Wrong.");711nr[nreg].pointerified = false;712}713mr[i].loc = MIPSLoc::REG;714} else if (mr[i].loc == MIPSLoc::REG_AS_PTR) {715AdjustNativeRegAsPtr(mr[i].nReg, false);716mr[i].loc = MIPSLoc::REG;717}718_assert_(mr[i].nReg != -1);719} else if (mr[i].loc != MIPSLoc::MEM) {720FlushReg(mipsReg);721}722}723724int count = 0;725const StaticAllocation *allocs = GetStaticAllocations(count);726for (int i = 0; i < count; i++) {727if (!fprs && allocs[i].loc != MIPSLoc::FREG && allocs[i].loc != MIPSLoc::VREG)728continue;729if (!gprs && allocs[i].loc != MIPSLoc::REG)730continue;731if (allocs[i].pointerified && !nr[allocs[i].nr].pointerified && jo_->enablePointerify) {732// Re-pointerify733if (mr[allocs[i].mr].loc == MIPSLoc::REG_IMM)734mr[allocs[i].mr].loc = MIPSLoc::REG;735_dbg_assert_(mr[allocs[i].mr].loc == MIPSLoc::REG);736AdjustNativeRegAsPtr(allocs[i].nr, true);737nr[allocs[i].nr].pointerified = true;738} else if (!allocs[i].pointerified) {739// If this register got pointerified on the way, mark it as not.740// This is so that after save/reload (like in an interpreter fallback),741// it won't be regarded as such, as it may no longer be.742nr[allocs[i].nr].pointerified = false;743}744}745// Sanity check746for (int i = 0; i < config_.totalNativeRegs; i++) {747if (nr[i].mipsReg != IRREG_INVALID && !mr[nr[i].mipsReg].isStatic) {748ERROR_LOG_REPORT(Log::JIT, "Flush fail: nr[%i].mipsReg=%i", i, nr[i].mipsReg);749}750}751}752753void IRNativeRegCacheBase::Map(const IRInst &inst) {754Mapping mapping[3];755MappingFromInst(inst, mapping);756757ApplyMapping(mapping, 3);758CleanupMapping(mapping, 3);759}760761void IRNativeRegCacheBase::MapWithExtra(const IRInst &inst, std::vector<Mapping> extra) {762extra.resize(extra.size() + 3);763MappingFromInst(inst, &extra[extra.size() - 3]);764765ApplyMapping(extra.data(), (int)extra.size());766CleanupMapping(extra.data(), (int)extra.size());767}768769IRNativeReg IRNativeRegCacheBase::MapWithTemp(const IRInst &inst, MIPSLoc type) {770Mapping mapping[3];771MappingFromInst(inst, mapping);772773ApplyMapping(mapping, 3);774// Grab a temp while things are spill locked.775IRNativeReg temp = AllocateReg(type, MIPSMap::INIT);776CleanupMapping(mapping, 3);777return temp;778}779780void IRNativeRegCacheBase::ApplyMapping(const Mapping *mapping, int count) {781for (int i = 0; i < count; ++i) {782SetSpillLockIRIndex(mapping[i].reg, irIndex_);783if (!config_.mapFPUSIMD && mapping[i].type != 'G') {784for (int j = 1; j < mapping[i].lanes; ++j)785SetSpillLockIRIndex(mapping[i].reg + j, irIndex_);786}787}788789auto isNoinit = [](MIPSMap f) {790return (f & MIPSMap::NOINIT) == MIPSMap::NOINIT;791};792793auto mapRegs = [&](int i) {794MIPSLoc type = MIPSLoc::MEM;795switch (mapping[i].type) {796case 'G': type = MIPSLoc::REG; break;797case 'F': type = MIPSLoc::FREG; break;798case 'V': type = MIPSLoc::VREG; break;799800case '_':801// Ignored intentionally.802return;803804default:805_assert_msg_(false, "Unexpected type: %c", mapping[i].type);806return;807}808809bool mapSIMD = config_.mapFPUSIMD || mapping[i].type == 'G';810MIPSMap flags = mapping[i].flags;811for (int j = 0; j < count; ++j) {812if (mapping[j].type == mapping[i].type && mapping[j].reg == mapping[i].reg && i != j) {813_assert_msg_(!mapSIMD || mapping[j].lanes == mapping[i].lanes, "Lane aliasing not supported yet");814815if (!isNoinit(mapping[j].flags) && isNoinit(flags)) {816flags = (flags & MIPSMap::BACKEND_MASK) | MIPSMap::DIRTY;817}818}819}820821if (mapSIMD) {822MapNativeReg(type, mapping[i].reg, mapping[i].lanes, flags);823return;824}825826for (int j = 0; j < mapping[i].lanes; ++j)827MapNativeReg(type, mapping[i].reg + j, 1, flags);828};829auto mapFilteredRegs = [&](auto pred) {830for (int i = 0; i < count; ++i) {831if (pred(mapping[i].flags))832mapRegs(i);833}834};835836// Do two passes: with backend special flags, and without.837mapFilteredRegs([](MIPSMap flags) {838return (flags & MIPSMap::BACKEND_MASK) != MIPSMap::INIT;839});840mapFilteredRegs([](MIPSMap flags) {841return (flags & MIPSMap::BACKEND_MASK) == MIPSMap::INIT;842});843}844845void IRNativeRegCacheBase::CleanupMapping(const Mapping *mapping, int count) {846for (int i = 0; i < count; ++i) {847SetSpillLockIRIndex(mapping[i].reg, -1);848if (!config_.mapFPUSIMD && mapping[i].type != 'G') {849for (int j = 1; j < mapping[i].lanes; ++j)850SetSpillLockIRIndex(mapping[i].reg + j, -1);851}852}853854// Sanity check. If these don't pass, we may have Vec overlap issues or etc.855for (int i = 0; i < count; ++i) {856if (mapping[i].reg != IRREG_INVALID) {857auto &mreg = mr[mapping[i].reg];858_dbg_assert_(mreg.nReg != -1);859if (mapping[i].type == 'G') {860_dbg_assert_(mreg.loc == MIPSLoc::REG || mreg.loc == MIPSLoc::REG_AS_PTR || mreg.loc == MIPSLoc::REG_IMM);861} else if (mapping[i].type == 'F') {862_dbg_assert_(mreg.loc == MIPSLoc::FREG);863} else if (mapping[i].type == 'V') {864_dbg_assert_(mreg.loc == MIPSLoc::VREG);865}866if (mapping[i].lanes != 1 && (config_.mapFPUSIMD || mapping[i].type == 'G')) {867_dbg_assert_(mreg.lane == 0);868_dbg_assert_(mr[mapping[i].reg + mapping[i].lanes - 1].lane == mapping[i].lanes - 1);869_dbg_assert_(mreg.nReg == mr[mapping[i].reg + mapping[i].lanes - 1].nReg);870} else {871_dbg_assert_(mreg.lane == -1);872}873}874}875}876877void IRNativeRegCacheBase::MappingFromInst(const IRInst &inst, Mapping mapping[3]) {878mapping[0].reg = inst.dest;879mapping[1].reg = inst.src1;880mapping[2].reg = inst.src2;881882const IRMeta *m = GetIRMeta(inst.op);883for (int i = 0; i < 3; ++i) {884switch (m->types[i]) {885case 'G':886mapping[i].type = 'G';887_assert_msg_(IsValidGPR(mapping[i].reg), "G was not valid GPR?");888break;889890case 'F':891mapping[i].reg += 32;892mapping[i].type = 'F';893_assert_msg_(IsValidFPR(mapping[i].reg - 32), "F was not valid FPR?");894break;895896case 'V':897case '2':898mapping[i].reg += 32;899mapping[i].type = config_.mapUseVRegs ? 'V' : 'F';900mapping[i].lanes = m->types[i] == 'V' ? 4 : (m->types[i] == '2' ? 2 : 1);901_assert_msg_(IsValidFPR(mapping[i].reg - 32), "%c was not valid FPR?", m->types[i]);902break;903904case 'T':905mapping[i].type = 'G';906_assert_msg_(mapping[i].reg < VFPU_CTRL_MAX, "T was not valid VFPU CTRL?");907mapping[i].reg += IRREG_VFPU_CTRL_BASE;908break;909910case '\0':911case '_':912case 'C':913case 'r':914case 'I':915case 'v':916case 's':917case 'm':918mapping[i].type = '_';919mapping[i].reg = IRREG_INVALID;920mapping[i].lanes = 0;921break;922923default:924_assert_msg_(mapping[i].reg == IRREG_INVALID, "Unexpected register type %c", m->types[i]);925break;926}927}928929if (mapping[0].type != '_') {930if ((m->flags & IRFLAG_SRC3DST) != 0)931mapping[0].flags = MIPSMap::DIRTY;932else if ((m->flags & IRFLAG_SRC3) != 0)933mapping[0].flags = MIPSMap::INIT;934else935mapping[0].flags = MIPSMap::NOINIT;936}937}938939IRNativeReg IRNativeRegCacheBase::MapNativeReg(MIPSLoc type, IRReg first, int lanes, MIPSMap flags) {940_assert_msg_(first != IRREG_INVALID, "Cannot map invalid register");941_assert_msg_(lanes >= 1 && lanes <= 4, "Cannot map %d lanes", lanes);942if (first == IRREG_INVALID || lanes < 0)943return -1;944945// Let's see if it's already mapped or we need a new reg.946IRNativeReg nreg = mr[first].nReg;947if (mr[first].isStatic) {948_assert_msg_(nreg != -1, "MapIRReg on static without an nReg?");949} else {950switch (mr[first].loc) {951case MIPSLoc::REG_IMM:952case MIPSLoc::REG_AS_PTR:953case MIPSLoc::REG:954if (type != MIPSLoc::REG) {955nreg = AllocateReg(type, flags);956} else if (!IsNativeRegCompatible(nreg, type, flags, lanes)) {957// If it's not compatible, we'll need to reallocate.958if (TransferNativeReg(nreg, -1, type, first, lanes, flags)) {959nreg = mr[first].nReg;960} else {961FlushNativeReg(nreg);962nreg = AllocateReg(type, flags);963}964}965break;966967case MIPSLoc::FREG:968case MIPSLoc::VREG:969if (type != mr[first].loc) {970nreg = AllocateReg(type, flags);971} else if (!IsNativeRegCompatible(nreg, type, flags, lanes)) {972if (TransferNativeReg(nreg, -1, type, first, lanes, flags)) {973nreg = mr[first].nReg;974} else {975FlushNativeReg(nreg);976nreg = AllocateReg(type, flags);977}978}979break;980981case MIPSLoc::IMM:982case MIPSLoc::MEM:983nreg = AllocateReg(type, flags);984break;985}986}987988if (nreg != -1) {989// This will handle already mapped and new mappings.990MapNativeReg(type, nreg, first, lanes, flags);991}992993return nreg;994}995996void IRNativeRegCacheBase::MapNativeReg(MIPSLoc type, IRNativeReg nreg, IRReg first, int lanes, MIPSMap flags) {997// First, try to clean up any lane mismatches.998// It must either be in the same nreg and lane count, or not in an nreg.999for (int i = 0; i < lanes; ++i) {1000auto &mreg = mr[first + i];1001if (mreg.nReg != -1) {1002// How many lanes is it currently in?1003int oldlanes = 0;1004for (IRReg m = nr[mreg.nReg].mipsReg; mr[m].nReg == mreg.nReg && m < IRREG_INVALID; ++m)1005oldlanes++;10061007// We may need to flush if it goes outside or we're initing.1008int oldlane = mreg.lane == -1 ? 0 : mreg.lane;1009bool mismatch = oldlanes != lanes || oldlane != i;1010if (mismatch) {1011_assert_msg_(!mreg.isStatic, "Cannot MapNativeReg a static reg mismatch");1012if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {1013// If we need init, we have to flush mismatches.1014if (!TransferNativeReg(mreg.nReg, nreg, type, first, lanes, flags)) {1015// TODO: We may also be motivated to have multiple read-only "views" or an IRReg.1016// For example Vec4Scale v0..v3, v0..v3, v31017FlushNativeReg(mreg.nReg);1018}1019// The mismatch has been "resolved" now.1020mismatch = false;1021} else if (oldlanes != 1) {1022// Even if we don't care about the current contents, we can't discard outside.1023bool extendsBefore = oldlane > i;1024bool extendsAfter = i + oldlanes - oldlane > lanes;1025if (extendsBefore || extendsAfter) {1026// Usually, this is 4->1. Check for clobber.1027bool clobbered = false;1028if (lanes == 1) {1029IRSituation info;1030info.lookaheadCount = 16;1031info.currentIndex = irIndex_;1032info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);1033info.numInstructions = irBlock_->GetNumIRInstructions();10341035IRReg basefpr = first - oldlane - 32;1036clobbered = true;1037for (int l = 0; l < oldlanes; ++l) {1038// Ignore the one we're modifying.1039if (l == oldlane)1040continue;10411042if (IRNextFPRUsage(basefpr + l, info) != IRUsage::CLOBBERED) {1043clobbered = false;1044break;1045}1046}1047}10481049if (clobbered)1050DiscardNativeReg(mreg.nReg);1051else1052FlushNativeReg(mreg.nReg);10531054// That took care of the mismatch, either by clobber or flush.1055mismatch = false;1056}1057}1058}10591060// If it's still in a different reg, either discard or possibly transfer.1061if (mreg.nReg != -1 && (mreg.nReg != nreg || mismatch)) {1062_assert_msg_(!mreg.isStatic, "Cannot MapNativeReg a static reg to a new reg");1063if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {1064// We better not be trying to map to a different nreg if it's in one now.1065// This might happen on some sort of transfer...1066if (!TransferNativeReg(mreg.nReg, nreg, type, first, lanes, flags))1067FlushNativeReg(mreg.nReg);1068} else {1069DiscardNativeReg(mreg.nReg);1070}1071}1072}10731074// If somehow this is an imm and mapping to a multilane native reg (HI/LO?), we store it.1075// TODO: Could check the others are imm and be smarter, but seems an unlikely case.1076if (mreg.loc == MIPSLoc::IMM && lanes > 1) {1077if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT)1078StoreRegValue(first + i, mreg.imm);1079mreg.loc = MIPSLoc::MEM;1080if (!mreg.isStatic)1081mreg.nReg = -1;1082mreg.imm = 0;1083}1084}10851086// Double check: everything should be in the same loc for multilane now.1087for (int i = 1; i < lanes; ++i) {1088_assert_(mr[first + i].loc == mr[first].loc);1089}10901091bool markDirty = (flags & MIPSMap::DIRTY) == MIPSMap::DIRTY;1092if (mr[first].nReg != nreg) {1093nr[nreg].isDirty = markDirty;1094nr[nreg].pointerified = false;1095nr[nreg].normalized32 = false;1096}10971098// Alright, now to actually map.1099if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {1100if (first == MIPS_REG_ZERO) {1101_assert_msg_(lanes == 1, "Cannot use MIPS_REG_ZERO in multilane");1102SetNativeRegValue(nreg, 0);1103mr[first].loc = MIPSLoc::REG_IMM;1104mr[first].imm = 0;1105} else {1106// Note: we checked above, everything is in the same loc if multilane.1107switch (mr[first].loc) {1108case MIPSLoc::IMM:1109_assert_msg_(lanes == 1, "Not handling multilane imm here");1110SetNativeRegValue(nreg, mr[first].imm);1111// IMM is always dirty unless static.1112if (!mr[first].isStatic)1113nr[nreg].isDirty = true;11141115// If we are mapping dirty, it means we're gonna overwrite.1116// So the imm value is no longer valid.1117if ((flags & MIPSMap::DIRTY) == MIPSMap::DIRTY)1118mr[first].loc = MIPSLoc::REG;1119else1120mr[first].loc = MIPSLoc::REG_IMM;1121break;11221123case MIPSLoc::REG_IMM:1124// If it's not dirty, we can keep it.1125_assert_msg_(type == MIPSLoc::REG, "Should have flushed this reg already");1126if ((flags & MIPSMap::DIRTY) == MIPSMap::DIRTY || lanes != 1)1127mr[first].loc = MIPSLoc::REG;1128for (int i = 1; i < lanes; ++i)1129mr[first + i].loc = type;1130break;11311132case MIPSLoc::REG_AS_PTR:1133_assert_msg_(lanes == 1, "Should have flushed before getting here");1134_assert_msg_(type == MIPSLoc::REG, "Should have flushed this reg already");1135#ifndef MASKED_PSP_MEMORY1136AdjustNativeRegAsPtr(nreg, false);1137#endif1138for (int i = 0; i < lanes; ++i)1139mr[first + i].loc = type;1140#ifdef MASKED_PSP_MEMORY1141LoadNativeReg(nreg, first, lanes);1142#endif1143break;11441145case MIPSLoc::REG:1146case MIPSLoc::FREG:1147case MIPSLoc::VREG:1148// Might be flipping from FREG -> VREG or something.1149_assert_msg_(type == mr[first].loc, "Should have flushed this reg already");1150for (int i = 0; i < lanes; ++i)1151mr[first + i].loc = type;1152break;11531154case MIPSLoc::MEM:1155for (int i = 0; i < lanes; ++i)1156mr[first + i].loc = type;1157LoadNativeReg(nreg, first, lanes);1158break;1159}1160}1161} else {1162for (int i = 0; i < lanes; ++i)1163mr[first + i].loc = type;1164}11651166for (int i = 0; i < lanes; ++i) {1167mr[first + i].nReg = nreg;1168mr[first + i].lane = lanes == 1 ? -1 : i;1169}11701171nr[nreg].mipsReg = first;11721173if (markDirty) {1174nr[nreg].isDirty = true;1175nr[nreg].pointerified = false;1176nr[nreg].normalized32 = false;1177_assert_(first != MIPS_REG_ZERO);1178}1179}11801181IRNativeReg IRNativeRegCacheBase::MapNativeRegAsPointer(IRReg gpr) {1182_dbg_assert_(IsValidGPRNoZero(gpr));11831184// Already mapped.1185if (mr[gpr].loc == MIPSLoc::REG_AS_PTR) {1186return mr[gpr].nReg;1187}11881189// Cannot use if somehow multilane.1190if (mr[gpr].nReg != -1 && mr[gpr].lane != -1) {1191FlushNativeReg(mr[gpr].nReg);1192}11931194IRNativeReg nreg = mr[gpr].nReg;1195if (mr[gpr].loc != MIPSLoc::REG && mr[gpr].loc != MIPSLoc::REG_IMM) {1196nreg = MapNativeReg(MIPSLoc::REG, gpr, 1, MIPSMap::INIT);1197}11981199if (mr[gpr].loc == MIPSLoc::REG || mr[gpr].loc == MIPSLoc::REG_IMM) {1200// If there was an imm attached, discard it.1201mr[gpr].loc = MIPSLoc::REG;1202mr[gpr].imm = 0;12031204#ifdef MASKED_PSP_MEMORY1205if (nr[mr[gpr].nReg].isDirty) {1206StoreNativeReg(mr[gpr].nReg, gpr, 1);1207nr[mr[gpr].nReg].isDirty = false;1208}1209#endif12101211if (!jo_->enablePointerify) {1212AdjustNativeRegAsPtr(nreg, true);1213mr[gpr].loc = MIPSLoc::REG_AS_PTR;1214} else if (!nr[nreg].pointerified) {1215AdjustNativeRegAsPtr(nreg, true);1216nr[nreg].pointerified = true;1217}1218} else {1219ERROR_LOG(Log::JIT, "MapNativeRegAsPointer: MapNativeReg failed to allocate a register?");1220}1221return nreg;1222}12231224void IRNativeRegCacheBase::AdjustNativeRegAsPtr(IRNativeReg nreg, bool state) {1225// This isn't necessary to implement if REG_AS_PTR is unsupported entirely.1226_assert_msg_(false, "AdjustNativeRegAsPtr unimplemented");1227}12281229int IRNativeRegCacheBase::GetMipsRegOffset(IRReg r) {1230_dbg_assert_(IsValidGPR(r) || (r >= 32 && IsValidFPR(r - 32)));1231return r * 4;1232}12331234bool IRNativeRegCacheBase::IsValidGPR(IRReg r) const {1235// See MIPSState for these offsets.12361237// Don't allow FPU regs, VFPU regs, or VFPU temps here.1238if (r >= 32 && IsValidFPR(r - 32))1239return false;1240// Don't allow nextPC, etc. since it's probably a mistake.1241if (r > IRREG_FPCOND && r != IRREG_LLBIT)1242return false;1243// Don't allow PC either.1244if (r == 241)1245return false;12461247return true;1248}12491250bool IRNativeRegCacheBase::IsValidGPRNoZero(IRReg r) const {1251return IsValidGPR(r) && r != MIPS_REG_ZERO;1252}12531254bool IRNativeRegCacheBase::IsValidFPR(IRReg r) const {1255// FPR parameters are off by 32 within the MIPSState object.1256if (r >= TOTAL_MAPPABLE_IRREGS - 32)1257return false;12581259// See MIPSState for these offsets.1260int index = r + 32;12611262// Allow FPU or VFPU regs here.1263if (index >= 32 && index < 32 + 32 + 128)1264return true;1265// Also allow VFPU temps.1266if (index >= 224 && index < 224 + 16)1267return true;12681269// Nothing else is allowed for the FPU side.1270return false;1271}127212731274