CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
Path: blob/master/Core/MIPS/IR/IRJit.cpp
Views: 1401
// Copyright (c) 2012- PPSSPP Project.12// This program is free software: you can redistribute it and/or modify3// it under the terms of the GNU General Public License as published by4// the Free Software Foundation, version 2.0 or later versions.56// This program is distributed in the hope that it will be useful,7// but WITHOUT ANY WARRANTY; without even the implied warranty of8// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the9// GNU General Public License 2.0 for more details.1011// A copy of the GPL 2.0 should have been included with the program.12// If not, see http://www.gnu.org/licenses/1314// Official git repository and contact information can be found at15// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.1617#include "ppsspp_config.h"18#include <set>19#include <algorithm>2021#include "ext/xxhash.h"22#include "Common/Profiler/Profiler.h"2324#include "Common/Log.h"25#include "Common/Serialize/Serializer.h"26#include "Common/StringUtils.h"2728#include "Core/Config.h"29#include "Core/Core.h"30#include "Core/CoreTiming.h"31#include "Core/HLE/sceKernelMemory.h"32#include "Core/MemMap.h"33#include "Core/MIPS/MIPS.h"34#include "Core/MIPS/MIPSCodeUtils.h"35#include "Core/MIPS/MIPSInt.h"36#include "Core/MIPS/MIPSTables.h"37#include "Core/MIPS/IR/IRRegCache.h"38#include "Core/MIPS/IR/IRInterpreter.h"39#include "Core/MIPS/IR/IRJit.h"40#include "Core/MIPS/IR/IRNativeCommon.h"41#include "Core/MIPS/JitCommon/JitCommon.h"42#include "Core/Reporting.h"43#include "Common/TimeUtil.h"44#include "Core/MIPS/MIPSTracer.h"454647namespace MIPSComp {4849IRJit::IRJit(MIPSState *mipsState, bool actualJit) : frontend_(mipsState->HasDefaultPrefix()), mips_(mipsState), blocks_(actualJit) {50// u32 size = 128 * 1024;51InitIR();5253compileToNative_ = actualJit;5455// If this IRJit instance will be used to drive a "JIT using IR", don't optimize for interpretation.56jo.optimizeForInterpreter = !actualJit;5758IROptions opts{};59opts.disableFlags = g_Config.uJitDisableFlags;60#if PPSSPP_ARCH(RISCV64)61// Assume RISC-V always has very slow unaligned memory accesses.62opts.unalignedLoadStore = false;63opts.unalignedLoadStoreVec4 = true;64opts.preferVec4 = cpu_info.RiscV_V;65#elif PPSSPP_ARCH(ARM) || PPSSPP_ARCH(ARM64)66opts.unalignedLoadStore = (opts.disableFlags & (uint32_t)JitDisable::LSU_UNALIGNED) == 0;67opts.unalignedLoadStoreVec4 = true;68opts.preferVec4 = true;69#else70opts.unalignedLoadStore = (opts.disableFlags & (uint32_t)JitDisable::LSU_UNALIGNED) == 0;71// TODO: Could allow on x86 pretty easily...72opts.unalignedLoadStoreVec4 = false;73opts.preferVec4 = true;74#endif75opts.optimizeForInterpreter = jo.optimizeForInterpreter;76frontend_.SetOptions(opts);77}7879IRJit::~IRJit() {80}8182void IRJit::DoState(PointerWrap &p) {83frontend_.DoState(p);84}8586void IRJit::UpdateFCR31() {87}8889void IRJit::ClearCache() {90INFO_LOG(Log::JIT, "IRJit: Clearing the block cache!");91blocks_.Clear();92}9394void IRJit::InvalidateCacheAt(u32 em_address, int length) {95std::vector<int> numbers = blocks_.FindInvalidatedBlockNumbers(em_address, length);96if (numbers.empty()) {97return;98}99100DEBUG_LOG(Log::JIT, "Invalidating IR block cache at %08x (%d bytes): %d blocks", em_address, length, (int)numbers.size());101102for (int block_num : numbers) {103auto block = blocks_.GetBlock(block_num);104// TODO: We are invalidating a lot of blocks that are already invalid (yu gi oh).105// INFO_LOG(Log::JIT, "Block at %08x invalidated: valid: %d", block->GetOriginalStart(), block->IsValid());106// If we're a native JIT (IR->JIT, not just IR interpreter), we write native offsets into the blocks.107int cookie = compileToNative_ ? block->GetNativeOffset() : block->GetIRArenaOffset();108blocks_.RemoveBlockFromPageLookup(block_num);109block->Destroy(cookie);110}111}112113void IRJit::Compile(u32 em_address) {114_dbg_assert_(compilerEnabled_);115116PROFILE_THIS_SCOPE("jitc");117118if (g_Config.bPreloadFunctions) {119// Look to see if we've preloaded this block.120int block_num = blocks_.FindPreloadBlock(em_address);121if (block_num != -1) {122IRBlock *block = blocks_.GetBlock(block_num);123// Okay, let's link and finalize the block now.124int cookie = compileToNative_ ? block->GetNativeOffset() : block->GetIRArenaOffset();125block->Finalize(cookie);126if (block->IsValid()) {127// Success, we're done.128FinalizeNativeBlock(&blocks_, block_num);129return;130}131}132}133134std::vector<IRInst> instructions;135u32 mipsBytes;136if (!CompileBlock(em_address, instructions, mipsBytes, false)) {137// Ran out of block numbers - need to reset.138ERROR_LOG(Log::JIT, "Ran out of block numbers, clearing cache");139ClearCache();140CompileBlock(em_address, instructions, mipsBytes, false);141}142143if (frontend_.CheckRounding(em_address)) {144// Our assumptions are all wrong so it's clean-slate time.145ClearCache();146CompileBlock(em_address, instructions, mipsBytes, false);147}148}149150// WARNING! This can be called from IRInterpret / the JIT, through the function preload stuff!151bool IRJit::CompileBlock(u32 em_address, std::vector<IRInst> &instructions, u32 &mipsBytes, bool preload) {152_dbg_assert_(compilerEnabled_);153154frontend_.DoJit(em_address, instructions, mipsBytes, preload);155if (instructions.empty()) {156_dbg_assert_(preload);157// We return true when preloading so it doesn't abort.158return preload;159}160161int block_num = blocks_.AllocateBlock(em_address, mipsBytes, instructions);162if ((block_num & ~MIPS_EMUHACK_VALUE_MASK) != 0) {163WARN_LOG(Log::JIT, "Failed to allocate block for %08x (%d instructions)", em_address, (int)instructions.size());164// Out of block numbers. Caller will handle.165return false;166}167168IRBlock *b = blocks_.GetBlock(block_num);169if (preload || mipsTracer.tracing_enabled) {170// Hash, then only update page stats, don't link yet.171// TODO: Should we always hash? Then we can reuse blocks.172b->UpdateHash();173}174175if (!CompileNativeBlock(&blocks_, block_num, preload))176return false;177178if (mipsTracer.tracing_enabled) {179mipsTracer.prepare_block(b, blocks_);180}181182// Updates stats, also patches the first MIPS instruction into an emuhack if 'preload == false'183blocks_.FinalizeBlock(block_num, preload);184if (!preload)185FinalizeNativeBlock(&blocks_, block_num);186return true;187}188189void IRJit::CompileFunction(u32 start_address, u32 length) {190_dbg_assert_(compilerEnabled_);191192PROFILE_THIS_SCOPE("jitc");193194// Note: we don't actually write emuhacks yet, so we can validate hashes.195// This way, if the game changes the code afterward, we'll catch even without icache invalidation.196197// We may go up and down from branches, so track all block starts done here.198std::set<u32> doneAddresses;199std::vector<u32> pendingAddresses;200pendingAddresses.reserve(16);201pendingAddresses.push_back(start_address);202while (!pendingAddresses.empty()) {203u32 em_address = pendingAddresses.back();204pendingAddresses.pop_back();205206// To be safe, also check if a real block is there. This can be a runtime module load.207u32 inst = Memory::ReadUnchecked_U32(em_address);208if (MIPS_IS_RUNBLOCK(inst) || doneAddresses.find(em_address) != doneAddresses.end()) {209// Already compiled this address.210continue;211}212213std::vector<IRInst> instructions;214u32 mipsBytes;215if (!CompileBlock(em_address, instructions, mipsBytes, true)) {216// Ran out of block numbers - let's hope there's no more code it needs to run.217// Will flush when actually compiling.218ERROR_LOG(Log::JIT, "Ran out of block numbers while compiling function");219return;220}221222doneAddresses.insert(em_address);223224for (const IRInst &inst : instructions) {225u32 exit = 0;226227switch (inst.op) {228case IROp::ExitToConst:229case IROp::ExitToConstIfEq:230case IROp::ExitToConstIfNeq:231case IROp::ExitToConstIfGtZ:232case IROp::ExitToConstIfGeZ:233case IROp::ExitToConstIfLtZ:234case IROp::ExitToConstIfLeZ:235case IROp::ExitToConstIfFpTrue:236case IROp::ExitToConstIfFpFalse:237exit = inst.constant;238break;239240case IROp::ExitToPC:241case IROp::Break:242// Don't add any, we'll do block end anyway (for jal, etc.)243exit = 0;244break;245246default:247exit = 0;248break;249}250251// Only follow jumps internal to the function.252if (exit != 0 && exit >= start_address && exit < start_address + length) {253// Even if it's a duplicate, we check at loop start.254pendingAddresses.push_back(exit);255}256}257258// Also include after the block for jal returns.259if (em_address + mipsBytes < start_address + length) {260pendingAddresses.push_back(em_address + mipsBytes);261}262}263}264265void IRJit::RunLoopUntil(u64 globalticks) {266PROFILE_THIS_SCOPE("jit");267268// ApplyRoundingMode(true);269// IR Dispatcher270271while (true) {272// RestoreRoundingMode(true);273CoreTiming::Advance();274// ApplyRoundingMode(true);275if (coreState != 0) {276break;277}278279MIPSState *mips = mips_;280#ifdef _DEBUG281compilerEnabled_ = false;282#endif283while (mips->downcount >= 0) {284u32 inst = Memory::ReadUnchecked_U32(mips->pc);285u32 opcode = inst & 0xFF000000;286if (opcode == MIPS_EMUHACK_OPCODE) {287u32 offset = inst & 0x00FFFFFF; // Alternatively, inst - opcode288const IRInst *instPtr = blocks_.GetArenaPtr() + offset;289// First op is always, except when using breakpoints, downcount, to save one dispatch inside IRInterpret.290// This branch is very cpu-branch-predictor-friendly so this still beats the dispatch.291if (instPtr->op == IROp::Downcount) {292mips->downcount -= instPtr->constant;293instPtr++;294}295#ifdef IR_PROFILING296IRBlock *block = blocks_.GetBlock(blocks_.GetBlockNumFromOffset(offset));297Instant start = Instant::Now();298mips->pc = IRInterpret(mips, instPtr);299int64_t elapsedNanos = start.ElapsedNanos();300block->profileStats_.executions += 1;301block->profileStats_.totalNanos += elapsedNanos;302#else303mips->pc = IRInterpret(mips, instPtr);304#endif305// Note: this will "jump to zero" on a badly constructed block missing exits.306if (!Memory::IsValid4AlignedAddress(mips->pc)) {307int blockNum = blocks_.GetBlockNumFromIRArenaOffset(offset);308IRBlock *block = blocks_.GetBlockUnchecked(blockNum);309Core_ExecException(mips->pc, block->GetOriginalStart(), ExecExceptionType::JUMP);310break;311}312} else {313// RestoreRoundingMode(true);314#ifdef _DEBUG315compilerEnabled_ = true;316#endif317Compile(mips->pc);318#ifdef _DEBUG319compilerEnabled_ = false;320#endif321// ApplyRoundingMode(true);322}323}324#ifdef _DEBUG325compilerEnabled_ = true;326#endif327}328329// RestoreRoundingMode(true);330}331332bool IRJit::DescribeCodePtr(const u8 *ptr, std::string &name) {333// Used in native disassembly viewer.334return false;335}336337void IRJit::LinkBlock(u8 *exitPoint, const u8 *checkedEntry) {338Crash();339}340341void IRJit::UnlinkBlock(u8 *checkedEntry, u32 originalAddress) {342Crash();343}344345void IRBlockCache::Clear() {346for (int i = 0; i < (int)blocks_.size(); ++i) {347int cookie = compileToNative_ ? blocks_[i].GetNativeOffset() : blocks_[i].GetIRArenaOffset();348blocks_[i].Destroy(cookie);349}350blocks_.clear();351byPage_.clear();352arena_.clear();353arena_.shrink_to_fit();354}355356IRBlockCache::IRBlockCache(bool compileToNative) : compileToNative_(compileToNative) {}357358int IRBlockCache::AllocateBlock(int emAddr, u32 origSize, const std::vector<IRInst> &insts) {359// We have 24 bits to represent offsets with.360const u32 MAX_ARENA_SIZE = 0x1000000 - 1;361int offset = (int)arena_.size();362if (offset >= MAX_ARENA_SIZE) {363WARN_LOG(Log::JIT, "Filled JIT arena, restarting");364return -1;365}366// TODO: Use memcpy.367for (int i = 0; i < insts.size(); i++) {368arena_.push_back(insts[i]);369}370int newBlockIndex = (int)blocks_.size();371blocks_.push_back(IRBlock(emAddr, origSize, offset, (u32)insts.size()));372return newBlockIndex;373}374375int IRBlockCache::GetBlockNumFromIRArenaOffset(int offset) const {376// Block offsets are always in rising order (we don't go back and replace them when invalidated). So we can binary search.377int low = 0;378int high = (int)blocks_.size() - 1;379int found = -1;380while (low <= high) {381int mid = low + (high - low) / 2;382const int blockOffset = blocks_[mid].GetIRArenaOffset();383if (blockOffset == offset) {384found = mid;385break;386}387if (blockOffset < offset) {388low = mid + 1;389} else {390high = mid - 1;391}392}393394#ifndef _DEBUG395// Then, in debug builds, cross check the result.396return found;397#else398// TODO: Optimize if we need to call this often.399for (int i = 0; i < (int)blocks_.size(); i++) {400if (blocks_[i].GetIRArenaOffset() == offset) {401_dbg_assert_(i == found);402return i;403}404}405#endif406_dbg_assert_(found == -1);407return -1;408}409410std::vector<int> IRBlockCache::FindInvalidatedBlockNumbers(u32 address, u32 lengthInBytes) {411u32 startPage = AddressToPage(address);412u32 endPage = AddressToPage(address + lengthInBytes);413414std::vector<int> found;415for (u32 page = startPage; page <= endPage; ++page) {416const auto iter = byPage_.find(page);417if (iter == byPage_.end())418continue;419420const std::vector<int> &blocksInPage = iter->second;421for (int i : blocksInPage) {422if (blocks_[i].OverlapsRange(address, lengthInBytes)) {423// We now try to remove these during invalidation.424found.push_back(i);425}426}427}428429return found;430}431432void IRBlockCache::FinalizeBlock(int blockIndex, bool preload) {433// TODO: What's different about preload blocks?434IRBlock &block = blocks_[blockIndex];435if (!preload) {436int cookie = compileToNative_ ? block.GetNativeOffset() : block.GetIRArenaOffset();437block.Finalize(cookie);438}439440u32 startAddr, size;441block.GetRange(&startAddr, &size);442443u32 startPage = AddressToPage(startAddr);444u32 endPage = AddressToPage(startAddr + size);445446for (u32 page = startPage; page <= endPage; ++page) {447byPage_[page].push_back(blockIndex);448}449}450451// Call after Destroy-ing it.452void IRBlockCache::RemoveBlockFromPageLookup(int blockIndex) {453// We need to remove the block from the byPage lookup.454IRBlock &block = blocks_[blockIndex];455456u32 startAddr, size;457block.GetRange(&startAddr, &size);458459u32 startPage = AddressToPage(startAddr);460u32 endPage = AddressToPage(startAddr + size);461462for (u32 page = startPage; page <= endPage; ++page) {463auto iter = std::find(byPage_[page].begin(), byPage_[page].end(), blockIndex);464if (iter != byPage_[page].end()) {465byPage_[page].erase(iter);466} else if (block.IsValid()) {467// If it was previously invalidated, we don't care, hence the above check.468WARN_LOG(Log::JIT, "RemoveBlock: Block at %08x was not found where expected in byPage table.", startAddr);469}470}471472// Additionally, we'd like to zap the block in the IR arena.473// However, this breaks if calling sceKernelIcacheClearAll(), since as soon as we return, we'll be executing garbage.474/*475IRInst bad{ IROp::Bad };476for (int off = block.GetIRArenaOffset(); off < (int)(block.GetIRArenaOffset() + block.GetNumIRInstructions()); off++) {477arena_[off] = bad;478}479*/480}481482u32 IRBlockCache::AddressToPage(u32 addr) const {483// Use relatively small pages since basic blocks are typically small.484return (addr & 0x3FFFFFFF) >> 10;485}486487int IRBlockCache::FindPreloadBlock(u32 em_address) {488u32 page = AddressToPage(em_address);489auto iter = byPage_.find(page);490if (iter == byPage_.end())491return -1;492493const std::vector<int> &blocksInPage = iter->second;494for (int i : blocksInPage) {495if (blocks_[i].GetOriginalStart() == em_address) {496if (blocks_[i].HashMatches()) {497return i;498}499}500}501502return -1;503}504505int IRBlockCache::FindByCookie(int cookie) {506if (blocks_.empty())507return -1;508509// TODO: Maybe a flag to determine native offset mode?510if (!compileToNative_) {511return GetBlockNumFromIRArenaOffset(cookie);512}513514// TODO: This could also use a binary search.515for (int i = 0; i < GetNumBlocks(); ++i) {516int offset = blocks_[i].GetNativeOffset();517if (offset == cookie)518return i;519}520return -1;521}522523std::vector<u32> IRBlockCache::SaveAndClearEmuHackOps() {524std::vector<u32> result;525result.resize(blocks_.size());526527for (int number = 0; number < (int)blocks_.size(); ++number) {528IRBlock &b = blocks_[number];529int cookie = compileToNative_ ? b.GetNativeOffset() : b.GetIRArenaOffset();530if (b.IsValid() && b.RestoreOriginalFirstOp(cookie)) {531result[number] = number;532} else {533result[number] = 0;534}535}536537return result;538}539540void IRBlockCache::RestoreSavedEmuHackOps(const std::vector<u32> &saved) {541if ((int)blocks_.size() != (int)saved.size()) {542ERROR_LOG(Log::JIT, "RestoreSavedEmuHackOps: Wrong saved block size.");543return;544}545546for (int number = 0; number < (int)blocks_.size(); ++number) {547IRBlock &b = blocks_[number];548// Only if we restored it, write it back.549if (b.IsValid() && saved[number] != 0 && b.HasOriginalFirstOp()) {550int cookie = compileToNative_ ? b.GetNativeOffset() : b.GetIRArenaOffset();551b.Finalize(cookie);552}553}554}555556JitBlockDebugInfo IRBlockCache::GetBlockDebugInfo(int blockNum) const {557const IRBlock &ir = blocks_[blockNum];558JitBlockDebugInfo debugInfo{};559uint32_t start, size;560ir.GetRange(&start, &size);561debugInfo.originalAddress = start; // TODO562563debugInfo.origDisasm.reserve(((start + size) - start) / 4);564for (u32 addr = start; addr < start + size; addr += 4) {565char temp[256];566MIPSDisAsm(Memory::Read_Instruction(addr), addr, temp, sizeof(temp), true);567std::string mipsDis = temp;568debugInfo.origDisasm.push_back(mipsDis);569}570571debugInfo.irDisasm.reserve(ir.GetNumIRInstructions());572const IRInst *instructions = GetBlockInstructionPtr(ir);573for (int i = 0; i < ir.GetNumIRInstructions(); i++) {574IRInst inst = instructions[i];575char buffer[256];576DisassembleIR(buffer, sizeof(buffer), inst);577debugInfo.irDisasm.push_back(buffer);578}579return debugInfo;580}581582void IRBlockCache::ComputeStats(BlockCacheStats &bcStats) const {583double totalBloat = 0.0;584double maxBloat = 0.0;585double minBloat = 1000000000.0;586for (const auto &b : blocks_) {587double codeSize = (double)b.GetNumIRInstructions() * 4; // We count bloat in instructions, not bytes. sizeof(IRInst);588if (codeSize == 0)589continue;590u32 origAddr, mipsBytes;591b.GetRange(&origAddr, &mipsBytes);592double origSize = (double)mipsBytes;593double bloat = codeSize / origSize;594if (bloat < minBloat) {595minBloat = bloat;596bcStats.minBloatBlock = origAddr;597}598if (bloat > maxBloat) {599maxBloat = bloat;600bcStats.maxBloatBlock = origAddr;601}602totalBloat += bloat;603}604bcStats.numBlocks = (int)blocks_.size();605bcStats.minBloat = minBloat;606bcStats.maxBloat = maxBloat;607bcStats.avgBloat = totalBloat / (double)blocks_.size();608}609610int IRBlockCache::GetBlockNumberFromStartAddress(u32 em_address, bool realBlocksOnly) const {611u32 page = AddressToPage(em_address);612613const auto iter = byPage_.find(page);614if (iter == byPage_.end())615return -1;616617const std::vector<int> &blocksInPage = iter->second;618int best = -1;619for (int i : blocksInPage) {620if (blocks_[i].GetOriginalStart() == em_address) {621best = i;622if (blocks_[i].IsValid()) {623return i;624}625}626}627return best;628}629630bool IRBlock::HasOriginalFirstOp() const {631return Memory::ReadUnchecked_U32(origAddr_) == origFirstOpcode_.encoding;632}633634bool IRBlock::RestoreOriginalFirstOp(int cookie) {635const u32 emuhack = MIPS_EMUHACK_OPCODE | cookie;636if (Memory::ReadUnchecked_U32(origAddr_) == emuhack) {637Memory::Write_Opcode_JIT(origAddr_, origFirstOpcode_);638return true;639}640return false;641}642643void IRBlock::Finalize(int cookie) {644// Check it wasn't invalidated, in case this is after preload.645// TODO: Allow reusing blocks when the code matches hash_ again, instead.646if (origAddr_) {647origFirstOpcode_ = Memory::Read_Opcode_JIT(origAddr_);648MIPSOpcode opcode = MIPSOpcode(MIPS_EMUHACK_OPCODE | cookie);649Memory::Write_Opcode_JIT(origAddr_, opcode);650} else {651WARN_LOG(Log::JIT, "Finalizing invalid block (cookie: %d)", cookie);652}653}654655void IRBlock::Destroy(int cookie) {656if (origAddr_) {657MIPSOpcode opcode = MIPSOpcode(MIPS_EMUHACK_OPCODE | cookie);658u32 memOp = Memory::ReadUnchecked_U32(origAddr_);659if (memOp == opcode.encoding) {660Memory::Write_Opcode_JIT(origAddr_, origFirstOpcode_);661} else {662// NOTE: This is not an error. Just interesting to log.663DEBUG_LOG(Log::JIT, "IRBlock::Destroy: Note: Block at %08x was overwritten - checked for %08x, got %08x when restoring the MIPS op to %08x", origAddr_, opcode.encoding, memOp, origFirstOpcode_.encoding);664}665// TODO: Also wipe the block in the IR opcode arena.666// Let's mark this invalid so we don't try to clear it again.667origAddr_ = 0;668}669}670671u64 IRBlock::CalculateHash() const {672if (origAddr_) {673// This is unfortunate. In case there are emuhacks, we have to make a copy.674// If we could hash while reading we could avoid this.675std::vector<u32> buffer;676buffer.resize(origSize_ / 4);677size_t pos = 0;678for (u32 off = 0; off < origSize_; off += 4) {679// Let's actually hash the replacement, if any.680MIPSOpcode instr = Memory::ReadUnchecked_Instruction(origAddr_ + off, false);681buffer[pos++] = instr.encoding;682}683return XXH3_64bits(&buffer[0], origSize_);684}685return 0;686}687688bool IRBlock::OverlapsRange(u32 addr, u32 size) const {689addr &= 0x3FFFFFFF;690u32 origAddr = origAddr_ & 0x3FFFFFFF;691return addr + size > origAddr && addr < origAddr + origSize_;692}693694MIPSOpcode IRJit::GetOriginalOp(MIPSOpcode op) {695IRBlock *b = blocks_.GetBlock(blocks_.FindByCookie(op.encoding & 0xFFFFFF));696if (b) {697return b->GetOriginalFirstOp();698}699return op;700}701702} // namespace MIPSComp703704705