Path: blob/master/src/core/cpu_code_cache_private.h
4214 views
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <[email protected]>1// SPDX-License-Identifier: CC-BY-NC-ND-4.023#pragma once45#include "bus.h"6#include "common/bitfield.h"7#include "common/perf_scope.h"8#include "cpu_code_cache.h"9#include "cpu_core_private.h"10#include "cpu_types.h"1112#include <array>13#include <unordered_map>1415namespace CPU::CodeCache {1617enum : u3218{19LUT_TABLE_COUNT = 0x10000,20LUT_TABLE_SIZE = 0x10000 / sizeof(u32), // 16384, one for each PC21LUT_TABLE_SHIFT = 16,2223MAX_BLOCK_EXIT_LINKS = 2,24};2526using CodeLUT = const void**;27using CodeLUTArray = std::array<CodeLUT, LUT_TABLE_COUNT>;28using BlockLinkMap = std::unordered_multimap<u32, void*>; // TODO: try ordered?2930enum RegInfoFlags : u831{32RI_LIVE = (1 << 0),33RI_USED = (1 << 1),34RI_LASTUSE = (1 << 2),35};3637struct InstructionInfo38{39bool is_branch_instruction : 1;40bool is_direct_branch_instruction : 1;41bool is_unconditional_branch_instruction : 1;42bool is_branch_delay_slot : 1;43bool is_load_instruction : 1;44bool is_store_instruction : 1;45bool is_load_delay_slot : 1;46bool is_last_instruction : 1;47bool has_load_delay : 1;4849u8 reg_flags[static_cast<u8>(Reg::count)];50// Reg write_reg[3];51Reg read_reg[3];5253// If unset, values which are not live will not be written back to memory.54// Tends to break stuff at the moment.55static constexpr bool WRITE_DEAD_VALUES = true;5657/// Returns true if the register is used later in the block, and this isn't the last instruction to use it.58/// In other words, the register is worth keeping in a host register/caching it.59inline bool UsedTest(Reg reg) const { return (reg_flags[static_cast<u8>(reg)] & (RI_USED | RI_LASTUSE)) == RI_USED; }6061/// Returns true if the value should be computed/written back.62/// Basically, this means it's either used before it's overwritten, or not overwritten by the end of the block.63inline bool LiveTest(Reg reg) const64{65return WRITE_DEAD_VALUES || ((reg_flags[static_cast<u8>(reg)] & RI_LIVE) != 0);66}6768/// Returns true if the register can be renamed into another.69inline bool RenameTest(Reg reg) const { return (reg == Reg::zero || !UsedTest(reg) || !LiveTest(reg)); }7071/// Returns true if this instruction reads this register.72inline bool ReadsReg(Reg reg) const { return (read_reg[0] == reg || read_reg[1] == reg || read_reg[2] == reg); }73};7475enum class BlockState : u876{77Valid,78Invalidated,79NeedsRecompile,80FallbackToInterpreter81};8283enum class BlockFlags : u884{85None = 0,86ContainsLoadStoreInstructions = (1 << 0),87SpansPages = (1 << 1),88BranchDelaySpansPages = (1 << 2),89IsUsingICache = (1 << 3),90NeedsDynamicFetchTicks = (1 << 4),91};92IMPLEMENT_ENUM_CLASS_BITWISE_OPERATORS(BlockFlags);9394enum class PageProtectionMode : u895{96WriteProtected,97ManualCheck,98Unprotected,99};100101struct BlockMetadata102{103TickCount uncached_fetch_ticks;104u32 icache_line_count;105BlockFlags flags;106};107108struct alignas(16) Block109{110u32 pc;111u32 size; // in guest instructions112const void* host_code;113114// links to previous/next block within page115Block* next_block_in_page;116117BlockLinkMap::iterator exit_links[MAX_BLOCK_EXIT_LINKS];118u8 num_exit_links;119120// TODO: Move up so it's part of the same cache line121BlockState state;122BlockFlags flags;123PageProtectionMode protection;124125TickCount uncached_fetch_ticks;126u32 icache_line_count;127128u32 host_code_size;129u32 compile_frame;130u8 compile_count;131132// followed by Instruction * size, InstructionRegInfo * size133ALWAYS_INLINE const Instruction* Instructions() const { return reinterpret_cast<const Instruction*>(this + 1); }134ALWAYS_INLINE Instruction* Instructions() { return reinterpret_cast<Instruction*>(this + 1); }135136ALWAYS_INLINE const InstructionInfo* InstructionsInfo() const137{138return reinterpret_cast<const InstructionInfo*>(Instructions() + size);139}140ALWAYS_INLINE InstructionInfo* InstructionsInfo()141{142return reinterpret_cast<InstructionInfo*>(Instructions() + size);143}144145// returns true if the block has a given flag146ALWAYS_INLINE bool HasFlag(BlockFlags flag) const { return ((flags & flag) != BlockFlags::None); }147148// returns the page index for the start of the block149ALWAYS_INLINE u32 StartPageIndex() const { return Bus::GetRAMCodePageIndex(pc); }150151// returns the page index for the last instruction in the block (inclusive)152ALWAYS_INLINE u32 EndPageIndex() const { return Bus::GetRAMCodePageIndex(pc + ((size - 1) * sizeof(Instruction))); }153154// returns true if the block spans multiple pages155ALWAYS_INLINE bool SpansPages() const { return StartPageIndex() != EndPageIndex(); }156};157158using BlockLUTArray = std::array<Block**, LUT_TABLE_COUNT>;159160struct LoadstoreBackpatchInfo161{162union163{164struct165{166u32 gpr_bitmask;167u16 cycles;168u16 address_register : 5;169u16 data_register : 5;170u16 size : 2;171u16 is_signed : 1;172u16 is_load : 1;173};174175const void* thunk_address; // only needed for oldrec176};177178u32 guest_pc;179u32 guest_block;180u8 code_size;181182MemoryAccessSize AccessSize() const { return static_cast<MemoryAccessSize>(size); }183u32 AccessSizeInBytes() const { return 1u << size; }184};185#ifdef CPU_ARCH_ARM32186static_assert(sizeof(LoadstoreBackpatchInfo) == 20);187#else188static_assert(sizeof(LoadstoreBackpatchInfo) == 24);189#endif190191static inline bool AddressInRAM(VirtualMemoryAddress pc)192{193return VirtualAddressToPhysical(pc) < Bus::g_ram_size;194}195196struct PageProtectionInfo197{198Block* first_block_in_page;199Block* last_block_in_page;200201PageProtectionMode mode;202u16 invalidate_count;203u32 invalidate_frame;204};205static_assert(sizeof(PageProtectionInfo) == (sizeof(Block*) * 2 + 8));206207template<PGXPMode pgxp_mode>208void InterpretCachedBlock(const Block* block);209210template<PGXPMode pgxp_mode>211void InterpretUncachedBlock();212213void LogCurrentState();214215#if defined(_DEBUG) || defined(_DEVEL) || false216// Enable disassembly of host assembly code.217#define ENABLE_HOST_DISASSEMBLY 1218#endif219220/// Access to normal code allocator.221u8* GetFreeCodePointer();222u32 GetFreeCodeSpace();223void CommitCode(u32 length);224225/// Access to far code allocator.226u8* GetFreeFarCodePointer();227u32 GetFreeFarCodeSpace();228void CommitFarCode(u32 length);229230/// Adjusts the free code pointer to the specified alignment, padding with bytes.231/// Assumes alignment is a power-of-two.232void AlignCode(u32 alignment);233234const void* GetInterpretUncachedBlockFunction();235236void CompileOrRevalidateBlock(u32 start_pc);237void DiscardAndRecompileBlock(u32 start_pc);238const void* CreateBlockLink(Block* from_block, void* code, u32 newpc);239const void* CreateSelfBlockLink(Block* block, void* code, const void* block_start);240241void AddLoadStoreInfo(void* code_address, u32 code_size, u32 guest_pc, const void* thunk_address);242void AddLoadStoreInfo(void* code_address, u32 code_size, u32 guest_pc, u32 guest_block, TickCount cycles,243u32 gpr_bitmask, u8 address_register, u8 data_register, MemoryAccessSize size, bool is_signed,244bool is_load);245bool HasPreviouslyFaultedOnPC(u32 guest_pc);246247u32 EmitASMFunctions(void* code, u32 code_size);248u32 EmitJump(void* code, const void* dst, bool flush_icache);249void EmitAlignmentPadding(void* dst, size_t size);250251void DisassembleAndLogHostCode(const void* start, u32 size);252u32 GetHostInstructionCount(const void* start, u32 size);253254extern CodeLUTArray g_code_lut;255256extern NORETURN_FUNCTION_POINTER void (*g_enter_recompiler)();257extern const void* g_compile_or_revalidate_block;258extern const void* g_run_events_and_dispatch;259extern const void* g_dispatcher;260extern const void* g_block_dispatcher;261extern const void* g_interpret_block;262extern const void* g_discard_and_recompile_block;263264#ifdef ENABLE_RECOMPILER_PROFILING265266extern PerfScope MIPSPerfScope;267268#endif // ENABLE_RECOMPILER_PROFILING269270} // namespace CPU::CodeCache271272273