CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/IR/IRJit.cpp
Views: 1401
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
#include <set>
20
#include <algorithm>
21
22
#include "ext/xxhash.h"
23
#include "Common/Profiler/Profiler.h"
24
25
#include "Common/Log.h"
26
#include "Common/Serialize/Serializer.h"
27
#include "Common/StringUtils.h"
28
29
#include "Core/Config.h"
30
#include "Core/Core.h"
31
#include "Core/CoreTiming.h"
32
#include "Core/HLE/sceKernelMemory.h"
33
#include "Core/MemMap.h"
34
#include "Core/MIPS/MIPS.h"
35
#include "Core/MIPS/MIPSCodeUtils.h"
36
#include "Core/MIPS/MIPSInt.h"
37
#include "Core/MIPS/MIPSTables.h"
38
#include "Core/MIPS/IR/IRRegCache.h"
39
#include "Core/MIPS/IR/IRInterpreter.h"
40
#include "Core/MIPS/IR/IRJit.h"
41
#include "Core/MIPS/IR/IRNativeCommon.h"
42
#include "Core/MIPS/JitCommon/JitCommon.h"
43
#include "Core/Reporting.h"
44
#include "Common/TimeUtil.h"
45
#include "Core/MIPS/MIPSTracer.h"
46
47
48
namespace MIPSComp {
49
50
IRJit::IRJit(MIPSState *mipsState, bool actualJit) : frontend_(mipsState->HasDefaultPrefix()), mips_(mipsState), blocks_(actualJit) {
51
// u32 size = 128 * 1024;
52
InitIR();
53
54
compileToNative_ = actualJit;
55
56
// If this IRJit instance will be used to drive a "JIT using IR", don't optimize for interpretation.
57
jo.optimizeForInterpreter = !actualJit;
58
59
IROptions opts{};
60
opts.disableFlags = g_Config.uJitDisableFlags;
61
#if PPSSPP_ARCH(RISCV64)
62
// Assume RISC-V always has very slow unaligned memory accesses.
63
opts.unalignedLoadStore = false;
64
opts.unalignedLoadStoreVec4 = true;
65
opts.preferVec4 = cpu_info.RiscV_V;
66
#elif PPSSPP_ARCH(ARM) || PPSSPP_ARCH(ARM64)
67
opts.unalignedLoadStore = (opts.disableFlags & (uint32_t)JitDisable::LSU_UNALIGNED) == 0;
68
opts.unalignedLoadStoreVec4 = true;
69
opts.preferVec4 = true;
70
#else
71
opts.unalignedLoadStore = (opts.disableFlags & (uint32_t)JitDisable::LSU_UNALIGNED) == 0;
72
// TODO: Could allow on x86 pretty easily...
73
opts.unalignedLoadStoreVec4 = false;
74
opts.preferVec4 = true;
75
#endif
76
opts.optimizeForInterpreter = jo.optimizeForInterpreter;
77
frontend_.SetOptions(opts);
78
}
79
80
IRJit::~IRJit() {
81
}
82
83
void IRJit::DoState(PointerWrap &p) {
84
frontend_.DoState(p);
85
}
86
87
void IRJit::UpdateFCR31() {
88
}
89
90
void IRJit::ClearCache() {
91
INFO_LOG(Log::JIT, "IRJit: Clearing the block cache!");
92
blocks_.Clear();
93
}
94
95
void IRJit::InvalidateCacheAt(u32 em_address, int length) {
96
std::vector<int> numbers = blocks_.FindInvalidatedBlockNumbers(em_address, length);
97
if (numbers.empty()) {
98
return;
99
}
100
101
DEBUG_LOG(Log::JIT, "Invalidating IR block cache at %08x (%d bytes): %d blocks", em_address, length, (int)numbers.size());
102
103
for (int block_num : numbers) {
104
auto block = blocks_.GetBlock(block_num);
105
// TODO: We are invalidating a lot of blocks that are already invalid (yu gi oh).
106
// INFO_LOG(Log::JIT, "Block at %08x invalidated: valid: %d", block->GetOriginalStart(), block->IsValid());
107
// If we're a native JIT (IR->JIT, not just IR interpreter), we write native offsets into the blocks.
108
int cookie = compileToNative_ ? block->GetNativeOffset() : block->GetIRArenaOffset();
109
blocks_.RemoveBlockFromPageLookup(block_num);
110
block->Destroy(cookie);
111
}
112
}
113
114
void IRJit::Compile(u32 em_address) {
115
_dbg_assert_(compilerEnabled_);
116
117
PROFILE_THIS_SCOPE("jitc");
118
119
if (g_Config.bPreloadFunctions) {
120
// Look to see if we've preloaded this block.
121
int block_num = blocks_.FindPreloadBlock(em_address);
122
if (block_num != -1) {
123
IRBlock *block = blocks_.GetBlock(block_num);
124
// Okay, let's link and finalize the block now.
125
int cookie = compileToNative_ ? block->GetNativeOffset() : block->GetIRArenaOffset();
126
block->Finalize(cookie);
127
if (block->IsValid()) {
128
// Success, we're done.
129
FinalizeNativeBlock(&blocks_, block_num);
130
return;
131
}
132
}
133
}
134
135
std::vector<IRInst> instructions;
136
u32 mipsBytes;
137
if (!CompileBlock(em_address, instructions, mipsBytes, false)) {
138
// Ran out of block numbers - need to reset.
139
ERROR_LOG(Log::JIT, "Ran out of block numbers, clearing cache");
140
ClearCache();
141
CompileBlock(em_address, instructions, mipsBytes, false);
142
}
143
144
if (frontend_.CheckRounding(em_address)) {
145
// Our assumptions are all wrong so it's clean-slate time.
146
ClearCache();
147
CompileBlock(em_address, instructions, mipsBytes, false);
148
}
149
}
150
151
// WARNING! This can be called from IRInterpret / the JIT, through the function preload stuff!
152
bool IRJit::CompileBlock(u32 em_address, std::vector<IRInst> &instructions, u32 &mipsBytes, bool preload) {
153
_dbg_assert_(compilerEnabled_);
154
155
frontend_.DoJit(em_address, instructions, mipsBytes, preload);
156
if (instructions.empty()) {
157
_dbg_assert_(preload);
158
// We return true when preloading so it doesn't abort.
159
return preload;
160
}
161
162
int block_num = blocks_.AllocateBlock(em_address, mipsBytes, instructions);
163
if ((block_num & ~MIPS_EMUHACK_VALUE_MASK) != 0) {
164
WARN_LOG(Log::JIT, "Failed to allocate block for %08x (%d instructions)", em_address, (int)instructions.size());
165
// Out of block numbers. Caller will handle.
166
return false;
167
}
168
169
IRBlock *b = blocks_.GetBlock(block_num);
170
if (preload || mipsTracer.tracing_enabled) {
171
// Hash, then only update page stats, don't link yet.
172
// TODO: Should we always hash? Then we can reuse blocks.
173
b->UpdateHash();
174
}
175
176
if (!CompileNativeBlock(&blocks_, block_num, preload))
177
return false;
178
179
if (mipsTracer.tracing_enabled) {
180
mipsTracer.prepare_block(b, blocks_);
181
}
182
183
// Updates stats, also patches the first MIPS instruction into an emuhack if 'preload == false'
184
blocks_.FinalizeBlock(block_num, preload);
185
if (!preload)
186
FinalizeNativeBlock(&blocks_, block_num);
187
return true;
188
}
189
190
void IRJit::CompileFunction(u32 start_address, u32 length) {
191
_dbg_assert_(compilerEnabled_);
192
193
PROFILE_THIS_SCOPE("jitc");
194
195
// Note: we don't actually write emuhacks yet, so we can validate hashes.
196
// This way, if the game changes the code afterward, we'll catch even without icache invalidation.
197
198
// We may go up and down from branches, so track all block starts done here.
199
std::set<u32> doneAddresses;
200
std::vector<u32> pendingAddresses;
201
pendingAddresses.reserve(16);
202
pendingAddresses.push_back(start_address);
203
while (!pendingAddresses.empty()) {
204
u32 em_address = pendingAddresses.back();
205
pendingAddresses.pop_back();
206
207
// To be safe, also check if a real block is there. This can be a runtime module load.
208
u32 inst = Memory::ReadUnchecked_U32(em_address);
209
if (MIPS_IS_RUNBLOCK(inst) || doneAddresses.find(em_address) != doneAddresses.end()) {
210
// Already compiled this address.
211
continue;
212
}
213
214
std::vector<IRInst> instructions;
215
u32 mipsBytes;
216
if (!CompileBlock(em_address, instructions, mipsBytes, true)) {
217
// Ran out of block numbers - let's hope there's no more code it needs to run.
218
// Will flush when actually compiling.
219
ERROR_LOG(Log::JIT, "Ran out of block numbers while compiling function");
220
return;
221
}
222
223
doneAddresses.insert(em_address);
224
225
for (const IRInst &inst : instructions) {
226
u32 exit = 0;
227
228
switch (inst.op) {
229
case IROp::ExitToConst:
230
case IROp::ExitToConstIfEq:
231
case IROp::ExitToConstIfNeq:
232
case IROp::ExitToConstIfGtZ:
233
case IROp::ExitToConstIfGeZ:
234
case IROp::ExitToConstIfLtZ:
235
case IROp::ExitToConstIfLeZ:
236
case IROp::ExitToConstIfFpTrue:
237
case IROp::ExitToConstIfFpFalse:
238
exit = inst.constant;
239
break;
240
241
case IROp::ExitToPC:
242
case IROp::Break:
243
// Don't add any, we'll do block end anyway (for jal, etc.)
244
exit = 0;
245
break;
246
247
default:
248
exit = 0;
249
break;
250
}
251
252
// Only follow jumps internal to the function.
253
if (exit != 0 && exit >= start_address && exit < start_address + length) {
254
// Even if it's a duplicate, we check at loop start.
255
pendingAddresses.push_back(exit);
256
}
257
}
258
259
// Also include after the block for jal returns.
260
if (em_address + mipsBytes < start_address + length) {
261
pendingAddresses.push_back(em_address + mipsBytes);
262
}
263
}
264
}
265
266
void IRJit::RunLoopUntil(u64 globalticks) {
267
PROFILE_THIS_SCOPE("jit");
268
269
// ApplyRoundingMode(true);
270
// IR Dispatcher
271
272
while (true) {
273
// RestoreRoundingMode(true);
274
CoreTiming::Advance();
275
// ApplyRoundingMode(true);
276
if (coreState != 0) {
277
break;
278
}
279
280
MIPSState *mips = mips_;
281
#ifdef _DEBUG
282
compilerEnabled_ = false;
283
#endif
284
while (mips->downcount >= 0) {
285
u32 inst = Memory::ReadUnchecked_U32(mips->pc);
286
u32 opcode = inst & 0xFF000000;
287
if (opcode == MIPS_EMUHACK_OPCODE) {
288
u32 offset = inst & 0x00FFFFFF; // Alternatively, inst - opcode
289
const IRInst *instPtr = blocks_.GetArenaPtr() + offset;
290
// First op is always, except when using breakpoints, downcount, to save one dispatch inside IRInterpret.
291
// This branch is very cpu-branch-predictor-friendly so this still beats the dispatch.
292
if (instPtr->op == IROp::Downcount) {
293
mips->downcount -= instPtr->constant;
294
instPtr++;
295
}
296
#ifdef IR_PROFILING
297
IRBlock *block = blocks_.GetBlock(blocks_.GetBlockNumFromOffset(offset));
298
Instant start = Instant::Now();
299
mips->pc = IRInterpret(mips, instPtr);
300
int64_t elapsedNanos = start.ElapsedNanos();
301
block->profileStats_.executions += 1;
302
block->profileStats_.totalNanos += elapsedNanos;
303
#else
304
mips->pc = IRInterpret(mips, instPtr);
305
#endif
306
// Note: this will "jump to zero" on a badly constructed block missing exits.
307
if (!Memory::IsValid4AlignedAddress(mips->pc)) {
308
int blockNum = blocks_.GetBlockNumFromIRArenaOffset(offset);
309
IRBlock *block = blocks_.GetBlockUnchecked(blockNum);
310
Core_ExecException(mips->pc, block->GetOriginalStart(), ExecExceptionType::JUMP);
311
break;
312
}
313
} else {
314
// RestoreRoundingMode(true);
315
#ifdef _DEBUG
316
compilerEnabled_ = true;
317
#endif
318
Compile(mips->pc);
319
#ifdef _DEBUG
320
compilerEnabled_ = false;
321
#endif
322
// ApplyRoundingMode(true);
323
}
324
}
325
#ifdef _DEBUG
326
compilerEnabled_ = true;
327
#endif
328
}
329
330
// RestoreRoundingMode(true);
331
}
332
333
bool IRJit::DescribeCodePtr(const u8 *ptr, std::string &name) {
334
// Used in native disassembly viewer.
335
return false;
336
}
337
338
void IRJit::LinkBlock(u8 *exitPoint, const u8 *checkedEntry) {
339
Crash();
340
}
341
342
void IRJit::UnlinkBlock(u8 *checkedEntry, u32 originalAddress) {
343
Crash();
344
}
345
346
void IRBlockCache::Clear() {
347
for (int i = 0; i < (int)blocks_.size(); ++i) {
348
int cookie = compileToNative_ ? blocks_[i].GetNativeOffset() : blocks_[i].GetIRArenaOffset();
349
blocks_[i].Destroy(cookie);
350
}
351
blocks_.clear();
352
byPage_.clear();
353
arena_.clear();
354
arena_.shrink_to_fit();
355
}
356
357
IRBlockCache::IRBlockCache(bool compileToNative) : compileToNative_(compileToNative) {}
358
359
int IRBlockCache::AllocateBlock(int emAddr, u32 origSize, const std::vector<IRInst> &insts) {
360
// We have 24 bits to represent offsets with.
361
const u32 MAX_ARENA_SIZE = 0x1000000 - 1;
362
int offset = (int)arena_.size();
363
if (offset >= MAX_ARENA_SIZE) {
364
WARN_LOG(Log::JIT, "Filled JIT arena, restarting");
365
return -1;
366
}
367
// TODO: Use memcpy.
368
for (int i = 0; i < insts.size(); i++) {
369
arena_.push_back(insts[i]);
370
}
371
int newBlockIndex = (int)blocks_.size();
372
blocks_.push_back(IRBlock(emAddr, origSize, offset, (u32)insts.size()));
373
return newBlockIndex;
374
}
375
376
int IRBlockCache::GetBlockNumFromIRArenaOffset(int offset) const {
377
// Block offsets are always in rising order (we don't go back and replace them when invalidated). So we can binary search.
378
int low = 0;
379
int high = (int)blocks_.size() - 1;
380
int found = -1;
381
while (low <= high) {
382
int mid = low + (high - low) / 2;
383
const int blockOffset = blocks_[mid].GetIRArenaOffset();
384
if (blockOffset == offset) {
385
found = mid;
386
break;
387
}
388
if (blockOffset < offset) {
389
low = mid + 1;
390
} else {
391
high = mid - 1;
392
}
393
}
394
395
#ifndef _DEBUG
396
// Then, in debug builds, cross check the result.
397
return found;
398
#else
399
// TODO: Optimize if we need to call this often.
400
for (int i = 0; i < (int)blocks_.size(); i++) {
401
if (blocks_[i].GetIRArenaOffset() == offset) {
402
_dbg_assert_(i == found);
403
return i;
404
}
405
}
406
#endif
407
_dbg_assert_(found == -1);
408
return -1;
409
}
410
411
std::vector<int> IRBlockCache::FindInvalidatedBlockNumbers(u32 address, u32 lengthInBytes) {
412
u32 startPage = AddressToPage(address);
413
u32 endPage = AddressToPage(address + lengthInBytes);
414
415
std::vector<int> found;
416
for (u32 page = startPage; page <= endPage; ++page) {
417
const auto iter = byPage_.find(page);
418
if (iter == byPage_.end())
419
continue;
420
421
const std::vector<int> &blocksInPage = iter->second;
422
for (int i : blocksInPage) {
423
if (blocks_[i].OverlapsRange(address, lengthInBytes)) {
424
// We now try to remove these during invalidation.
425
found.push_back(i);
426
}
427
}
428
}
429
430
return found;
431
}
432
433
void IRBlockCache::FinalizeBlock(int blockIndex, bool preload) {
434
// TODO: What's different about preload blocks?
435
IRBlock &block = blocks_[blockIndex];
436
if (!preload) {
437
int cookie = compileToNative_ ? block.GetNativeOffset() : block.GetIRArenaOffset();
438
block.Finalize(cookie);
439
}
440
441
u32 startAddr, size;
442
block.GetRange(&startAddr, &size);
443
444
u32 startPage = AddressToPage(startAddr);
445
u32 endPage = AddressToPage(startAddr + size);
446
447
for (u32 page = startPage; page <= endPage; ++page) {
448
byPage_[page].push_back(blockIndex);
449
}
450
}
451
452
// Call after Destroy-ing it.
453
void IRBlockCache::RemoveBlockFromPageLookup(int blockIndex) {
454
// We need to remove the block from the byPage lookup.
455
IRBlock &block = blocks_[blockIndex];
456
457
u32 startAddr, size;
458
block.GetRange(&startAddr, &size);
459
460
u32 startPage = AddressToPage(startAddr);
461
u32 endPage = AddressToPage(startAddr + size);
462
463
for (u32 page = startPage; page <= endPage; ++page) {
464
auto iter = std::find(byPage_[page].begin(), byPage_[page].end(), blockIndex);
465
if (iter != byPage_[page].end()) {
466
byPage_[page].erase(iter);
467
} else if (block.IsValid()) {
468
// If it was previously invalidated, we don't care, hence the above check.
469
WARN_LOG(Log::JIT, "RemoveBlock: Block at %08x was not found where expected in byPage table.", startAddr);
470
}
471
}
472
473
// Additionally, we'd like to zap the block in the IR arena.
474
// However, this breaks if calling sceKernelIcacheClearAll(), since as soon as we return, we'll be executing garbage.
475
/*
476
IRInst bad{ IROp::Bad };
477
for (int off = block.GetIRArenaOffset(); off < (int)(block.GetIRArenaOffset() + block.GetNumIRInstructions()); off++) {
478
arena_[off] = bad;
479
}
480
*/
481
}
482
483
u32 IRBlockCache::AddressToPage(u32 addr) const {
484
// Use relatively small pages since basic blocks are typically small.
485
return (addr & 0x3FFFFFFF) >> 10;
486
}
487
488
int IRBlockCache::FindPreloadBlock(u32 em_address) {
489
u32 page = AddressToPage(em_address);
490
auto iter = byPage_.find(page);
491
if (iter == byPage_.end())
492
return -1;
493
494
const std::vector<int> &blocksInPage = iter->second;
495
for (int i : blocksInPage) {
496
if (blocks_[i].GetOriginalStart() == em_address) {
497
if (blocks_[i].HashMatches()) {
498
return i;
499
}
500
}
501
}
502
503
return -1;
504
}
505
506
int IRBlockCache::FindByCookie(int cookie) {
507
if (blocks_.empty())
508
return -1;
509
510
// TODO: Maybe a flag to determine native offset mode?
511
if (!compileToNative_) {
512
return GetBlockNumFromIRArenaOffset(cookie);
513
}
514
515
// TODO: This could also use a binary search.
516
for (int i = 0; i < GetNumBlocks(); ++i) {
517
int offset = blocks_[i].GetNativeOffset();
518
if (offset == cookie)
519
return i;
520
}
521
return -1;
522
}
523
524
std::vector<u32> IRBlockCache::SaveAndClearEmuHackOps() {
525
std::vector<u32> result;
526
result.resize(blocks_.size());
527
528
for (int number = 0; number < (int)blocks_.size(); ++number) {
529
IRBlock &b = blocks_[number];
530
int cookie = compileToNative_ ? b.GetNativeOffset() : b.GetIRArenaOffset();
531
if (b.IsValid() && b.RestoreOriginalFirstOp(cookie)) {
532
result[number] = number;
533
} else {
534
result[number] = 0;
535
}
536
}
537
538
return result;
539
}
540
541
void IRBlockCache::RestoreSavedEmuHackOps(const std::vector<u32> &saved) {
542
if ((int)blocks_.size() != (int)saved.size()) {
543
ERROR_LOG(Log::JIT, "RestoreSavedEmuHackOps: Wrong saved block size.");
544
return;
545
}
546
547
for (int number = 0; number < (int)blocks_.size(); ++number) {
548
IRBlock &b = blocks_[number];
549
// Only if we restored it, write it back.
550
if (b.IsValid() && saved[number] != 0 && b.HasOriginalFirstOp()) {
551
int cookie = compileToNative_ ? b.GetNativeOffset() : b.GetIRArenaOffset();
552
b.Finalize(cookie);
553
}
554
}
555
}
556
557
JitBlockDebugInfo IRBlockCache::GetBlockDebugInfo(int blockNum) const {
558
const IRBlock &ir = blocks_[blockNum];
559
JitBlockDebugInfo debugInfo{};
560
uint32_t start, size;
561
ir.GetRange(&start, &size);
562
debugInfo.originalAddress = start; // TODO
563
564
debugInfo.origDisasm.reserve(((start + size) - start) / 4);
565
for (u32 addr = start; addr < start + size; addr += 4) {
566
char temp[256];
567
MIPSDisAsm(Memory::Read_Instruction(addr), addr, temp, sizeof(temp), true);
568
std::string mipsDis = temp;
569
debugInfo.origDisasm.push_back(mipsDis);
570
}
571
572
debugInfo.irDisasm.reserve(ir.GetNumIRInstructions());
573
const IRInst *instructions = GetBlockInstructionPtr(ir);
574
for (int i = 0; i < ir.GetNumIRInstructions(); i++) {
575
IRInst inst = instructions[i];
576
char buffer[256];
577
DisassembleIR(buffer, sizeof(buffer), inst);
578
debugInfo.irDisasm.push_back(buffer);
579
}
580
return debugInfo;
581
}
582
583
void IRBlockCache::ComputeStats(BlockCacheStats &bcStats) const {
584
double totalBloat = 0.0;
585
double maxBloat = 0.0;
586
double minBloat = 1000000000.0;
587
for (const auto &b : blocks_) {
588
double codeSize = (double)b.GetNumIRInstructions() * 4; // We count bloat in instructions, not bytes. sizeof(IRInst);
589
if (codeSize == 0)
590
continue;
591
u32 origAddr, mipsBytes;
592
b.GetRange(&origAddr, &mipsBytes);
593
double origSize = (double)mipsBytes;
594
double bloat = codeSize / origSize;
595
if (bloat < minBloat) {
596
minBloat = bloat;
597
bcStats.minBloatBlock = origAddr;
598
}
599
if (bloat > maxBloat) {
600
maxBloat = bloat;
601
bcStats.maxBloatBlock = origAddr;
602
}
603
totalBloat += bloat;
604
}
605
bcStats.numBlocks = (int)blocks_.size();
606
bcStats.minBloat = minBloat;
607
bcStats.maxBloat = maxBloat;
608
bcStats.avgBloat = totalBloat / (double)blocks_.size();
609
}
610
611
int IRBlockCache::GetBlockNumberFromStartAddress(u32 em_address, bool realBlocksOnly) const {
612
u32 page = AddressToPage(em_address);
613
614
const auto iter = byPage_.find(page);
615
if (iter == byPage_.end())
616
return -1;
617
618
const std::vector<int> &blocksInPage = iter->second;
619
int best = -1;
620
for (int i : blocksInPage) {
621
if (blocks_[i].GetOriginalStart() == em_address) {
622
best = i;
623
if (blocks_[i].IsValid()) {
624
return i;
625
}
626
}
627
}
628
return best;
629
}
630
631
bool IRBlock::HasOriginalFirstOp() const {
632
return Memory::ReadUnchecked_U32(origAddr_) == origFirstOpcode_.encoding;
633
}
634
635
bool IRBlock::RestoreOriginalFirstOp(int cookie) {
636
const u32 emuhack = MIPS_EMUHACK_OPCODE | cookie;
637
if (Memory::ReadUnchecked_U32(origAddr_) == emuhack) {
638
Memory::Write_Opcode_JIT(origAddr_, origFirstOpcode_);
639
return true;
640
}
641
return false;
642
}
643
644
void IRBlock::Finalize(int cookie) {
645
// Check it wasn't invalidated, in case this is after preload.
646
// TODO: Allow reusing blocks when the code matches hash_ again, instead.
647
if (origAddr_) {
648
origFirstOpcode_ = Memory::Read_Opcode_JIT(origAddr_);
649
MIPSOpcode opcode = MIPSOpcode(MIPS_EMUHACK_OPCODE | cookie);
650
Memory::Write_Opcode_JIT(origAddr_, opcode);
651
} else {
652
WARN_LOG(Log::JIT, "Finalizing invalid block (cookie: %d)", cookie);
653
}
654
}
655
656
void IRBlock::Destroy(int cookie) {
657
if (origAddr_) {
658
MIPSOpcode opcode = MIPSOpcode(MIPS_EMUHACK_OPCODE | cookie);
659
u32 memOp = Memory::ReadUnchecked_U32(origAddr_);
660
if (memOp == opcode.encoding) {
661
Memory::Write_Opcode_JIT(origAddr_, origFirstOpcode_);
662
} else {
663
// NOTE: This is not an error. Just interesting to log.
664
DEBUG_LOG(Log::JIT, "IRBlock::Destroy: Note: Block at %08x was overwritten - checked for %08x, got %08x when restoring the MIPS op to %08x", origAddr_, opcode.encoding, memOp, origFirstOpcode_.encoding);
665
}
666
// TODO: Also wipe the block in the IR opcode arena.
667
// Let's mark this invalid so we don't try to clear it again.
668
origAddr_ = 0;
669
}
670
}
671
672
u64 IRBlock::CalculateHash() const {
673
if (origAddr_) {
674
// This is unfortunate. In case there are emuhacks, we have to make a copy.
675
// If we could hash while reading we could avoid this.
676
std::vector<u32> buffer;
677
buffer.resize(origSize_ / 4);
678
size_t pos = 0;
679
for (u32 off = 0; off < origSize_; off += 4) {
680
// Let's actually hash the replacement, if any.
681
MIPSOpcode instr = Memory::ReadUnchecked_Instruction(origAddr_ + off, false);
682
buffer[pos++] = instr.encoding;
683
}
684
return XXH3_64bits(&buffer[0], origSize_);
685
}
686
return 0;
687
}
688
689
bool IRBlock::OverlapsRange(u32 addr, u32 size) const {
690
addr &= 0x3FFFFFFF;
691
u32 origAddr = origAddr_ & 0x3FFFFFFF;
692
return addr + size > origAddr && addr < origAddr + origSize_;
693
}
694
695
MIPSOpcode IRJit::GetOriginalOp(MIPSOpcode op) {
696
IRBlock *b = blocks_.GetBlock(blocks_.FindByCookie(op.encoding & 0xFFFFFF));
697
if (b) {
698
return b->GetOriginalFirstOp();
699
}
700
return op;
701
}
702
703
} // namespace MIPSComp
704
705