Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
stenzek
GitHub Repository: stenzek/duckstation
Path: blob/master/src/core/cpu_core.cpp
7328 views
1
// SPDX-FileCopyrightText: 2019-2025 Connor McLaughlin <[email protected]>
2
// SPDX-License-Identifier: CC-BY-NC-ND-4.0
3
4
#include "cpu_core.h"
5
#include "bus.h"
6
#include "cpu_code_cache_private.h"
7
#include "cpu_core_private.h"
8
#include "cpu_disasm.h"
9
#include "cpu_pgxp.h"
10
#include "gte.h"
11
#include "host.h"
12
#include "pcdrv.h"
13
#include "pio.h"
14
#include "settings.h"
15
#include "system.h"
16
#include "timing_event.h"
17
18
#include "util/state_wrapper.h"
19
20
#include "common/align.h"
21
#include "common/fastjmp.h"
22
#include "common/file_system.h"
23
#include "common/log.h"
24
#include "common/path.h"
25
26
#include "fmt/format.h"
27
28
#include <cstdio>
29
30
LOG_CHANNEL(CPU);
31
32
namespace CPU {
33
enum class ExecutionBreakType
34
{
35
None,
36
ExecuteOneInstruction,
37
SingleStep,
38
Breakpoint,
39
};
40
41
static constexpr u32 INVALID_BREAKPOINT_PC = UINT32_C(0xFFFFFFFF);
42
43
static void UpdateLoadDelay();
44
static void Branch(u32 target);
45
static void FlushLoadDelay();
46
static void FlushPipeline();
47
48
static u32 GetExceptionVector(bool debug_exception = false);
49
static void RaiseException(u32 CAUSE_bits, u32 EPC, u32 vector);
50
static void RaiseDataBusException();
51
52
static u32 ReadReg(Reg rs);
53
static void WriteReg(Reg rd, u32 value);
54
static void WriteRegDelayed(Reg rd, u32 value);
55
56
static void DispatchCop0Breakpoint(bool data);
57
static bool IsCop0ExecutionBreakpointUnmasked();
58
static bool Cop0ExecutionBreakpointCheck(u32 pc);
59
template<MemoryAccessType type>
60
static void Cop0DataBreakpointCheck(VirtualMemoryAddress address);
61
62
static BreakpointList& GetBreakpointList(BreakpointType type);
63
static bool CheckBreakpointList(BreakpointType type, VirtualMemoryAddress address);
64
static void ExecutionBreakpointCheck(u32 pc);
65
template<MemoryAccessType type>
66
static void MemoryBreakpointCheck(VirtualMemoryAddress address);
67
68
#ifdef _DEBUG
69
static void TracePrintInstruction();
70
#endif
71
72
static void DisassembleAndPrint(u32 addr, bool regs, const char* prefix);
73
static void PrintInstruction(u32 bits, u32 pc, bool regs, const char* prefix);
74
static void LogInstruction(u32 bits, u32 pc, bool regs);
75
76
static void HandleWriteSyscall();
77
static void HandlePutcSyscall();
78
static void HandlePutsSyscall();
79
80
static void CheckForExecutionModeChange();
81
[[noreturn]] static void ExecuteInterpreter();
82
83
template<PGXPMode pgxp_mode, bool debug>
84
static void ExecuteInstruction();
85
86
template<PGXPMode pgxp_mode, bool debug>
87
[[noreturn]] static void ExecuteImpl();
88
89
static bool FetchInstruction();
90
static bool FetchInstructionForInterpreterFallback();
91
template<bool add_ticks, bool icache_read = false, u32 word_count = 1, bool raise_exceptions>
92
static bool DoInstructionRead(PhysicalMemoryAddress address, u32* data);
93
template<MemoryAccessType type, MemoryAccessSize size>
94
static bool DoSafeMemoryAccess(VirtualMemoryAddress address, u32& value);
95
template<MemoryAccessType type, MemoryAccessSize size>
96
static bool DoAlignmentCheck(VirtualMemoryAddress address);
97
static bool ReadMemoryByte(VirtualMemoryAddress addr, u8* value);
98
static bool ReadMemoryHalfWord(VirtualMemoryAddress addr, u16* value);
99
static bool ReadMemoryWord(VirtualMemoryAddress addr, u32* value);
100
static bool WriteMemoryByte(VirtualMemoryAddress addr, u32 value);
101
static bool WriteMemoryHalfWord(VirtualMemoryAddress addr, u32 value);
102
static bool WriteMemoryWord(VirtualMemoryAddress addr, u32 value);
103
104
struct Locals
105
{
106
ExecutionBreakType break_type = ExecutionBreakType::None;
107
u32 breakpoint_counter = 1;
108
u32 last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
109
CPUExecutionMode current_execution_mode = CPUExecutionMode::Interpreter;
110
std::array<std::vector<Breakpoint>, static_cast<u32>(BreakpointType::Count)> breakpoints;
111
112
std::FILE* log_file = nullptr;
113
bool log_file_opened = false;
114
bool trace_to_log = false;
115
116
fastjmp_buf exit_jmp_buf;
117
};
118
119
ALIGN_TO_CACHE_LINE constinit State g_state;
120
ALIGN_TO_CACHE_LINE static Locals s_locals;
121
122
#ifdef _DEBUG
123
static bool TRACE_EXECUTION = false;
124
#endif
125
126
} // namespace CPU
127
128
bool CPU::IsTraceEnabled()
129
{
130
return s_locals.trace_to_log;
131
}
132
133
void CPU::StartTrace()
134
{
135
if (s_locals.trace_to_log)
136
return;
137
138
s_locals.trace_to_log = true;
139
if (UpdateDebugDispatcherFlag())
140
System::InterruptExecution();
141
}
142
143
void CPU::StopTrace()
144
{
145
if (!s_locals.trace_to_log)
146
return;
147
148
if (s_locals.log_file)
149
std::fclose(s_locals.log_file);
150
151
s_locals.log_file_opened = false;
152
s_locals.trace_to_log = false;
153
if (UpdateDebugDispatcherFlag())
154
System::InterruptExecution();
155
}
156
157
void CPU::WriteToExecutionLog(const char* format, ...)
158
{
159
if (!s_locals.log_file_opened) [[unlikely]]
160
{
161
s_locals.log_file = FileSystem::OpenCFile(Path::Combine(EmuFolders::DataRoot, "cpu_log.txt").c_str(), "wb");
162
s_locals.log_file_opened = true;
163
}
164
165
if (s_locals.log_file)
166
{
167
std::va_list ap;
168
va_start(ap, format);
169
std::vfprintf(s_locals.log_file, format, ap);
170
va_end(ap);
171
172
#ifdef _DEBUG
173
std::fflush(s_locals.log_file);
174
#endif
175
}
176
}
177
178
void CPU::Initialize()
179
{
180
// From nocash spec.
181
g_state.cop0_regs.PRID = UINT32_C(0x00000002);
182
183
s_locals.current_execution_mode = g_settings.cpu_execution_mode;
184
g_state.using_debug_dispatcher = false;
185
g_state.using_interpreter = (s_locals.current_execution_mode == CPUExecutionMode::Interpreter);
186
for (BreakpointList& bps : s_locals.breakpoints)
187
bps.clear();
188
s_locals.breakpoint_counter = 1;
189
s_locals.last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
190
s_locals.break_type = ExecutionBreakType::None;
191
192
UpdateMemoryPointers();
193
UpdateDebugDispatcherFlag();
194
}
195
196
void CPU::Shutdown()
197
{
198
ClearBreakpoints();
199
StopTrace();
200
}
201
202
void CPU::Reset()
203
{
204
g_state.exception_raised = false;
205
g_state.bus_error = false;
206
207
g_state.regs = {};
208
209
g_state.cop0_regs.BPC = 0;
210
g_state.cop0_regs.BDA = 0;
211
g_state.cop0_regs.TAR = 0;
212
g_state.cop0_regs.BadVaddr = 0;
213
g_state.cop0_regs.BDAM = 0;
214
g_state.cop0_regs.BPCM = 0;
215
g_state.cop0_regs.EPC = 0;
216
g_state.cop0_regs.dcic.bits = 0;
217
g_state.cop0_regs.sr.bits = 0;
218
g_state.cop0_regs.cause.bits = 0;
219
220
ClearICache();
221
UpdateMemoryPointers();
222
UpdateDebugDispatcherFlag();
223
224
GTE::Reset();
225
226
if (g_settings.gpu_pgxp_enable)
227
PGXP::Reset();
228
229
// This consumes cycles, so do it first.
230
SetPC(RESET_VECTOR);
231
232
g_state.downcount = 0;
233
g_state.pending_ticks = 0;
234
g_state.gte_completion_tick = 0;
235
g_state.muldiv_completion_tick = 0;
236
}
237
238
bool CPU::DoState(StateWrapper& sw)
239
{
240
sw.Do(&g_state.pending_ticks);
241
sw.Do(&g_state.downcount);
242
sw.DoEx(&g_state.gte_completion_tick, 78, static_cast<u32>(0));
243
sw.DoEx(&g_state.muldiv_completion_tick, 80, static_cast<u32>(0));
244
sw.DoArray(g_state.regs.r, static_cast<u32>(Reg::count));
245
sw.Do(&g_state.pc);
246
sw.Do(&g_state.npc);
247
sw.Do(&g_state.cop0_regs.BPC);
248
sw.Do(&g_state.cop0_regs.BDA);
249
sw.Do(&g_state.cop0_regs.TAR);
250
sw.Do(&g_state.cop0_regs.BadVaddr);
251
sw.Do(&g_state.cop0_regs.BDAM);
252
sw.Do(&g_state.cop0_regs.BPCM);
253
sw.Do(&g_state.cop0_regs.EPC);
254
sw.Do(&g_state.cop0_regs.PRID);
255
sw.Do(&g_state.cop0_regs.sr.bits);
256
sw.Do(&g_state.cop0_regs.cause.bits);
257
sw.Do(&g_state.cop0_regs.dcic.bits);
258
sw.Do(&g_state.next_instruction.bits);
259
sw.Do(&g_state.current_instruction.bits);
260
sw.Do(&g_state.current_instruction_pc);
261
sw.Do(&g_state.current_instruction_in_branch_delay_slot);
262
sw.Do(&g_state.current_instruction_was_branch_taken);
263
sw.Do(&g_state.next_instruction_is_branch_delay_slot);
264
sw.Do(&g_state.branch_was_taken);
265
sw.Do(&g_state.exception_raised);
266
sw.DoEx(&g_state.bus_error, 61, false);
267
if (sw.GetVersion() < 59) [[unlikely]]
268
{
269
bool interrupt_delay;
270
sw.Do(&interrupt_delay);
271
}
272
sw.Do(&g_state.load_delay_reg);
273
sw.Do(&g_state.load_delay_value);
274
sw.Do(&g_state.next_load_delay_reg);
275
sw.Do(&g_state.next_load_delay_value);
276
277
// Compatibility with old states.
278
if (sw.GetVersion() < 59) [[unlikely]]
279
{
280
g_state.load_delay_reg =
281
static_cast<Reg>(std::min(static_cast<u8>(g_state.load_delay_reg), static_cast<u8>(Reg::count)));
282
g_state.next_load_delay_reg =
283
static_cast<Reg>(std::min(static_cast<u8>(g_state.load_delay_reg), static_cast<u8>(Reg::count)));
284
}
285
286
sw.Do(&g_state.cache_control.bits);
287
sw.DoBytes(g_state.scratchpad.data(), g_state.scratchpad.size());
288
289
if (!GTE::DoState(sw)) [[unlikely]]
290
return false;
291
292
if (sw.GetVersion() < 48) [[unlikely]]
293
{
294
DebugAssert(sw.IsReading());
295
ClearICache();
296
}
297
else
298
{
299
sw.Do(&g_state.icache_tags);
300
sw.Do(&g_state.icache_data);
301
}
302
303
sw.DoEx(&g_state.using_interpreter, 67, g_state.using_interpreter);
304
305
if (sw.IsReading())
306
{
307
// Trigger an execution mode change if the state was/wasn't using the interpreter.
308
s_locals.current_execution_mode =
309
g_state.using_interpreter ?
310
CPUExecutionMode::Interpreter :
311
((g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter) ? CPUExecutionMode::CachedInterpreter :
312
g_settings.cpu_execution_mode);
313
g_state.gte_completion_tick = 0;
314
g_state.muldiv_completion_tick = 0;
315
UpdateMemoryPointers();
316
UpdateDebugDispatcherFlag();
317
}
318
319
return !sw.HasError();
320
}
321
322
void CPU::SetPC(u32 new_pc)
323
{
324
DebugAssert(Common::IsAlignedPow2(new_pc, 4));
325
g_state.npc = new_pc;
326
FlushPipeline();
327
}
328
329
ALWAYS_INLINE_RELEASE void CPU::Branch(u32 target)
330
{
331
if (!Common::IsAlignedPow2(target, 4))
332
{
333
// The BadVaddr and EPC must be set to the fetching address, not the instruction about to execute.
334
g_state.cop0_regs.BadVaddr = target;
335
RaiseException(Cop0Registers::CAUSE::MakeValueForException(Exception::AdEL, false, false, 0), target);
336
return;
337
}
338
339
g_state.npc = target;
340
g_state.branch_was_taken = true;
341
}
342
343
ALWAYS_INLINE_RELEASE u32 CPU::GetExceptionVector(bool debug_exception /* = false*/)
344
{
345
const u32 base = g_state.cop0_regs.sr.BEV ? UINT32_C(0xbfc00100) : UINT32_C(0x80000000);
346
return base | (debug_exception ? UINT32_C(0x00000040) : UINT32_C(0x00000080));
347
}
348
349
ALWAYS_INLINE_RELEASE void CPU::RaiseException(u32 CAUSE_bits, u32 EPC, u32 vector)
350
{
351
g_state.cop0_regs.EPC = EPC;
352
g_state.cop0_regs.cause.bits = (g_state.cop0_regs.cause.bits & ~Cop0Registers::CAUSE::EXCEPTION_WRITE_MASK) |
353
(CAUSE_bits & Cop0Registers::CAUSE::EXCEPTION_WRITE_MASK);
354
355
#if defined(_DEBUG) || defined(_DEVEL)
356
if (g_state.cop0_regs.cause.Excode != Exception::INT && g_state.cop0_regs.cause.Excode != Exception::Syscall &&
357
g_state.cop0_regs.cause.Excode != Exception::BP)
358
{
359
DEV_LOG("Exception {} at 0x{:08X} (epc=0x{:08X}, BD={}, CE={})",
360
static_cast<u8>(g_state.cop0_regs.cause.Excode.GetValue()), g_state.current_instruction_pc,
361
g_state.cop0_regs.EPC, g_state.cop0_regs.cause.BD ? "true" : "false",
362
g_state.cop0_regs.cause.CE.GetValue());
363
DisassembleAndPrint(g_state.current_instruction_pc, 4u, 0u);
364
if (s_locals.trace_to_log)
365
{
366
CPU::WriteToExecutionLog("Exception %u at 0x%08X (epc=0x%08X, BD=%s, CE=%u)\n",
367
static_cast<u8>(g_state.cop0_regs.cause.Excode.GetValue()),
368
g_state.current_instruction_pc, g_state.cop0_regs.EPC,
369
g_state.cop0_regs.cause.BD ? "true" : "false", g_state.cop0_regs.cause.CE.GetValue());
370
}
371
}
372
#endif
373
374
if (g_state.cop0_regs.cause.BD)
375
{
376
// TAR is set to the address which was being fetched in this instruction, or the next instruction to execute if the
377
// exception hadn't occurred in the delay slot.
378
g_state.cop0_regs.EPC -= UINT32_C(4);
379
g_state.cop0_regs.TAR = g_state.pc;
380
}
381
382
// current -> previous, switch to kernel mode and disable interrupts
383
g_state.cop0_regs.sr.mode_bits <<= 2;
384
385
// flush the pipeline - we don't want to execute the previously fetched instruction
386
g_state.npc = vector;
387
g_state.exception_raised = true;
388
FlushPipeline();
389
}
390
391
ALWAYS_INLINE_RELEASE void CPU::DispatchCop0Breakpoint(bool data)
392
{
393
// When a breakpoint address match occurs the PSX jumps to 80000040h (ie. unlike normal exceptions, not to 80000080h).
394
// The Excode value in the CAUSE register is set to 09h (same as BREAK opcode), and EPC contains the return address,
395
// as usually. One of the first things to be done in the exception handler is to disable breakpoints (eg. if the
396
// any-jump break is enabled, then it must be disabled BEFORE jumping from 80000040h to the actual exception handler).
397
RaiseException(Cop0Registers::CAUSE::MakeValueForException(
398
Exception::BP, g_state.current_instruction_in_branch_delay_slot,
399
g_state.current_instruction_was_branch_taken, data ? 0 : g_state.current_instruction.cop.cop_n),
400
g_state.current_instruction_pc, GetExceptionVector(true));
401
}
402
403
void CPU::RaiseException(u32 CAUSE_bits, u32 EPC)
404
{
405
RaiseException(CAUSE_bits, EPC, GetExceptionVector());
406
}
407
408
void CPU::RaiseException(Exception excode)
409
{
410
RaiseException(Cop0Registers::CAUSE::MakeValueForException(excode, g_state.current_instruction_in_branch_delay_slot,
411
g_state.current_instruction_was_branch_taken,
412
g_state.current_instruction.cop.cop_n),
413
g_state.current_instruction_pc, GetExceptionVector());
414
}
415
416
void CPU::RaiseBreakException(u32 CAUSE_bits, u32 EPC, u32 instruction_bits)
417
{
418
if (g_settings.pcdrv_enable)
419
{
420
// Load delays need to be flushed, because the break HLE might read a register which
421
// is currently being loaded, and on real hardware there isn't a hazard here.
422
FlushLoadDelay();
423
424
if (PCDrv::HandleSyscall(instruction_bits, g_state.regs))
425
{
426
// immediately return
427
g_state.npc = EPC + 4;
428
FlushPipeline();
429
return;
430
}
431
}
432
else
433
{
434
WARNING_LOG("PCDrv is not enabled, break HLE will not be executed.");
435
}
436
437
// normal exception
438
RaiseException(CAUSE_bits, EPC, GetExceptionVector());
439
}
440
441
void CPU::SetIRQRequest(bool state)
442
{
443
// Only uses bit 10.
444
constexpr u32 bit = (1u << 10);
445
const u32 old_cause = g_state.cop0_regs.cause.bits;
446
g_state.cop0_regs.cause.bits = (g_state.cop0_regs.cause.bits & ~bit) | (state ? bit : 0u);
447
if (old_cause ^ g_state.cop0_regs.cause.bits && state)
448
CheckForPendingInterrupt();
449
}
450
451
ALWAYS_INLINE_RELEASE void CPU::UpdateLoadDelay()
452
{
453
// the old value is needed in case the delay slot instruction overwrites the same register
454
g_state.regs.r[static_cast<u8>(g_state.load_delay_reg)] = g_state.load_delay_value;
455
g_state.load_delay_reg = g_state.next_load_delay_reg;
456
g_state.load_delay_value = g_state.next_load_delay_value;
457
g_state.next_load_delay_reg = Reg::count;
458
}
459
460
ALWAYS_INLINE_RELEASE void CPU::FlushLoadDelay()
461
{
462
g_state.next_load_delay_reg = Reg::count;
463
g_state.regs.r[static_cast<u8>(g_state.load_delay_reg)] = g_state.load_delay_value;
464
g_state.load_delay_reg = Reg::count;
465
}
466
467
ALWAYS_INLINE_RELEASE void CPU::FlushPipeline()
468
{
469
// loads are flushed
470
FlushLoadDelay();
471
472
// not in a branch delay slot
473
g_state.branch_was_taken = false;
474
g_state.next_instruction_is_branch_delay_slot = false;
475
g_state.current_instruction_pc = g_state.pc;
476
477
// prefetch the next instruction
478
FetchInstruction();
479
480
// and set it as the next one to execute
481
g_state.current_instruction.bits = g_state.next_instruction.bits;
482
g_state.current_instruction_in_branch_delay_slot = false;
483
g_state.current_instruction_was_branch_taken = false;
484
}
485
486
ALWAYS_INLINE u32 CPU::ReadReg(Reg rs)
487
{
488
return g_state.regs.r[static_cast<u8>(rs)];
489
}
490
491
ALWAYS_INLINE void CPU::WriteReg(Reg rd, u32 value)
492
{
493
g_state.regs.r[static_cast<u8>(rd)] = value;
494
g_state.load_delay_reg = (rd == g_state.load_delay_reg) ? Reg::count : g_state.load_delay_reg;
495
496
// prevent writes to $zero from going through - better than branching/cmov
497
g_state.regs.zero = 0;
498
}
499
500
ALWAYS_INLINE_RELEASE void CPU::WriteRegDelayed(Reg rd, u32 value)
501
{
502
DebugAssert(g_state.next_load_delay_reg == Reg::count);
503
if (rd == Reg::zero)
504
return;
505
506
// double load delays ignore the first value
507
if (g_state.load_delay_reg == rd)
508
g_state.load_delay_reg = Reg::count;
509
510
// save the old value, if something else overwrites this reg we want to preserve it
511
g_state.next_load_delay_reg = rd;
512
g_state.next_load_delay_value = value;
513
}
514
515
ALWAYS_INLINE_RELEASE bool CPU::IsCop0ExecutionBreakpointUnmasked()
516
{
517
static constexpr const u32 code_address_ranges[][2] = {
518
// KUSEG
519
{Bus::RAM_BASE, Bus::RAM_BASE | Bus::RAM_8MB_MASK},
520
{Bus::BIOS_BASE, Bus::BIOS_BASE | Bus::BIOS_MASK},
521
522
// KSEG0
523
{0x80000000u | Bus::RAM_BASE, 0x80000000u | Bus::RAM_BASE | Bus::RAM_8MB_MASK},
524
{0x80000000u | Bus::BIOS_BASE, 0x80000000u | Bus::BIOS_BASE | Bus::BIOS_MASK},
525
526
// KSEG1
527
{0xA0000000u | Bus::RAM_BASE, 0xA0000000u | Bus::RAM_BASE | Bus::RAM_8MB_MASK},
528
{0xA0000000u | Bus::BIOS_BASE, 0xA0000000u | Bus::BIOS_BASE | Bus::BIOS_MASK},
529
};
530
531
const u32 bpc = g_state.cop0_regs.BPC;
532
const u32 bpcm = g_state.cop0_regs.BPCM;
533
const u32 masked_bpc = bpc & bpcm;
534
for (const auto& [range_start, range_end] : code_address_ranges)
535
{
536
if (masked_bpc >= (range_start & bpcm) && masked_bpc <= (range_end & bpcm))
537
return true;
538
}
539
540
return false;
541
}
542
543
ALWAYS_INLINE_RELEASE bool CPU::Cop0ExecutionBreakpointCheck(u32 pc)
544
{
545
if (!g_state.cop0_regs.dcic.ExecutionBreakpointsEnabled())
546
return false;
547
548
const u32 bpc = g_state.cop0_regs.BPC;
549
const u32 bpcm = g_state.cop0_regs.BPCM;
550
551
// Break condition is "((PC XOR BPC) AND BPCM)=0".
552
if (bpcm == 0 || ((pc ^ bpc) & bpcm) != 0u)
553
return false;
554
555
DEV_LOG("Cop0 execution breakpoint at {:08X}", pc);
556
g_state.cop0_regs.dcic.status_any_break = true;
557
g_state.cop0_regs.dcic.status_bpc_code_break = true;
558
DispatchCop0Breakpoint(false);
559
return true;
560
}
561
562
template<MemoryAccessType type>
563
ALWAYS_INLINE_RELEASE void CPU::Cop0DataBreakpointCheck(VirtualMemoryAddress address)
564
{
565
if constexpr (type == MemoryAccessType::Read)
566
{
567
if (!g_state.cop0_regs.dcic.DataReadBreakpointsEnabled())
568
return;
569
}
570
else
571
{
572
if (!g_state.cop0_regs.dcic.DataWriteBreakpointsEnabled())
573
return;
574
}
575
576
// Break condition is "((addr XOR BDA) AND BDAM)=0".
577
const u32 bda = g_state.cop0_regs.BDA;
578
const u32 bdam = g_state.cop0_regs.BDAM;
579
if (bdam == 0 || ((address ^ bda) & bdam) != 0u)
580
return;
581
582
DEV_LOG("Cop0 data breakpoint for {:08X} at {:08X}", address, g_state.current_instruction_pc);
583
584
g_state.cop0_regs.dcic.status_any_break = true;
585
g_state.cop0_regs.dcic.status_bda_data_break = true;
586
if constexpr (type == MemoryAccessType::Read)
587
g_state.cop0_regs.dcic.status_bda_data_read_break = true;
588
else
589
g_state.cop0_regs.dcic.status_bda_data_write_break = true;
590
591
DispatchCop0Breakpoint(true);
592
}
593
594
#ifdef _DEBUG
595
596
void CPU::TracePrintInstruction()
597
{
598
const u32 pc = g_state.current_instruction_pc;
599
const u32 bits = g_state.current_instruction.bits;
600
601
TinyString instr;
602
TinyString comment;
603
DisassembleInstruction(&instr, pc, bits);
604
DisassembleInstructionComment(&comment, pc, bits);
605
if (!comment.empty())
606
{
607
for (u32 i = instr.length(); i < 30; i++)
608
instr.append(' ');
609
instr.append("; ");
610
instr.append(comment);
611
}
612
613
std::printf("%08x: %08x %s\n", pc, bits, instr.c_str());
614
}
615
616
#endif
617
618
void CPU::PrintInstruction(u32 bits, u32 pc, bool regs, const char* prefix)
619
{
620
TinyString instr;
621
DisassembleInstruction(&instr, pc, bits);
622
if (regs)
623
{
624
TinyString comment;
625
DisassembleInstructionComment(&comment, pc, bits);
626
if (!comment.empty())
627
{
628
for (u32 i = instr.length(); i < 30; i++)
629
instr.append(' ');
630
instr.append("; ");
631
instr.append(comment);
632
}
633
}
634
635
DEV_LOG("{}{:08x}: {:08x} {}", prefix, pc, bits, instr);
636
}
637
638
void CPU::LogInstruction(u32 bits, u32 pc, bool regs)
639
{
640
TinyString instr;
641
DisassembleInstruction(&instr, pc, bits);
642
if (regs)
643
{
644
TinyString comment;
645
DisassembleInstructionComment(&comment, pc, bits);
646
if (!comment.empty())
647
{
648
for (u32 i = instr.length(); i < 30; i++)
649
instr.append(' ');
650
instr.append("; ");
651
instr.append(comment);
652
}
653
}
654
655
WriteToExecutionLog("%08x: %08x %s\n", pc, bits, instr.c_str());
656
}
657
658
void CPU::HandleWriteSyscall()
659
{
660
const auto& regs = g_state.regs;
661
if (regs.a0 != 1) // stdout
662
return;
663
664
u32 addr = regs.a1;
665
const u32 count = regs.a2;
666
for (u32 i = 0; i < count; i++)
667
{
668
u8 value;
669
if (!SafeReadMemoryByte(addr++, &value) || value == 0)
670
break;
671
672
Bus::AddTTYCharacter(static_cast<char>(value));
673
}
674
}
675
676
void CPU::HandlePutcSyscall()
677
{
678
const auto& regs = g_state.regs;
679
if (regs.a0 != 0)
680
Bus::AddTTYCharacter(static_cast<char>(regs.a0));
681
}
682
683
void CPU::HandlePutsSyscall()
684
{
685
const auto& regs = g_state.regs;
686
687
u32 addr = regs.a0;
688
for (u32 i = 0; i < 1024; i++)
689
{
690
u8 value;
691
if (!SafeReadMemoryByte(addr++, &value) || value == 0)
692
break;
693
694
Bus::AddTTYCharacter(static_cast<char>(value));
695
}
696
}
697
698
void CPU::HandleA0Syscall()
699
{
700
const auto& regs = g_state.regs;
701
const u32 call = regs.t1;
702
if (call == 0x03)
703
HandleWriteSyscall();
704
else if (call == 0x09 || call == 0x3c)
705
HandlePutcSyscall();
706
else if (call == 0x3e)
707
HandlePutsSyscall();
708
}
709
710
void CPU::HandleB0Syscall()
711
{
712
const auto& regs = g_state.regs;
713
const u32 call = regs.t1;
714
if (call == 0x35)
715
HandleWriteSyscall();
716
else if (call == 0x3b || call == 0x3d)
717
HandlePutcSyscall();
718
else if (call == 0x3f)
719
HandlePutsSyscall();
720
}
721
722
const std::array<CPU::DebuggerRegisterListEntry, CPU::NUM_DEBUGGER_REGISTER_LIST_ENTRIES>
723
CPU::g_debugger_register_list = {{{"zero", &CPU::g_state.regs.zero},
724
{"at", &CPU::g_state.regs.at},
725
{"v0", &CPU::g_state.regs.v0},
726
{"v1", &CPU::g_state.regs.v1},
727
{"a0", &CPU::g_state.regs.a0},
728
{"a1", &CPU::g_state.regs.a1},
729
{"a2", &CPU::g_state.regs.a2},
730
{"a3", &CPU::g_state.regs.a3},
731
{"t0", &CPU::g_state.regs.t0},
732
{"t1", &CPU::g_state.regs.t1},
733
{"t2", &CPU::g_state.regs.t2},
734
{"t3", &CPU::g_state.regs.t3},
735
{"t4", &CPU::g_state.regs.t4},
736
{"t5", &CPU::g_state.regs.t5},
737
{"t6", &CPU::g_state.regs.t6},
738
{"t7", &CPU::g_state.regs.t7},
739
{"s0", &CPU::g_state.regs.s0},
740
{"s1", &CPU::g_state.regs.s1},
741
{"s2", &CPU::g_state.regs.s2},
742
{"s3", &CPU::g_state.regs.s3},
743
{"s4", &CPU::g_state.regs.s4},
744
{"s5", &CPU::g_state.regs.s5},
745
{"s6", &CPU::g_state.regs.s6},
746
{"s7", &CPU::g_state.regs.s7},
747
{"t8", &CPU::g_state.regs.t8},
748
{"t9", &CPU::g_state.regs.t9},
749
{"k0", &CPU::g_state.regs.k0},
750
{"k1", &CPU::g_state.regs.k1},
751
{"gp", &CPU::g_state.regs.gp},
752
{"sp", &CPU::g_state.regs.sp},
753
{"fp", &CPU::g_state.regs.fp},
754
{"ra", &CPU::g_state.regs.ra},
755
{"hi", &CPU::g_state.regs.hi},
756
{"lo", &CPU::g_state.regs.lo},
757
{"pc", &CPU::g_state.pc},
758
759
{"COP0_SR", &CPU::g_state.cop0_regs.sr.bits},
760
{"COP0_CAUSE", &CPU::g_state.cop0_regs.cause.bits},
761
{"COP0_EPC", &CPU::g_state.cop0_regs.EPC},
762
{"COP0_BadVAddr", &CPU::g_state.cop0_regs.BadVaddr},
763
764
{"V0_XY", &CPU::g_state.gte_regs.r32[0]},
765
{"V0_Z", &CPU::g_state.gte_regs.r32[1]},
766
{"V1_XY", &CPU::g_state.gte_regs.r32[2]},
767
{"V1_Z", &CPU::g_state.gte_regs.r32[3]},
768
{"V2_XY", &CPU::g_state.gte_regs.r32[4]},
769
{"V2_Z", &CPU::g_state.gte_regs.r32[5]},
770
{"RGBC", &CPU::g_state.gte_regs.r32[6]},
771
{"OTZ", &CPU::g_state.gte_regs.r32[7]},
772
{"IR0", &CPU::g_state.gte_regs.r32[8]},
773
{"IR1", &CPU::g_state.gte_regs.r32[9]},
774
{"IR2", &CPU::g_state.gte_regs.r32[10]},
775
{"IR3", &CPU::g_state.gte_regs.r32[11]},
776
{"SXY0", &CPU::g_state.gte_regs.r32[12]},
777
{"SXY1", &CPU::g_state.gte_regs.r32[13]},
778
{"SXY2", &CPU::g_state.gte_regs.r32[14]},
779
{"SXYP", &CPU::g_state.gte_regs.r32[15]},
780
{"SZ0", &CPU::g_state.gte_regs.r32[16]},
781
{"SZ1", &CPU::g_state.gte_regs.r32[17]},
782
{"SZ2", &CPU::g_state.gte_regs.r32[18]},
783
{"SZ3", &CPU::g_state.gte_regs.r32[19]},
784
{"RGB0", &CPU::g_state.gte_regs.r32[20]},
785
{"RGB1", &CPU::g_state.gte_regs.r32[21]},
786
{"RGB2", &CPU::g_state.gte_regs.r32[22]},
787
{"RES1", &CPU::g_state.gte_regs.r32[23]},
788
{"MAC0", &CPU::g_state.gte_regs.r32[24]},
789
{"MAC1", &CPU::g_state.gte_regs.r32[25]},
790
{"MAC2", &CPU::g_state.gte_regs.r32[26]},
791
{"MAC3", &CPU::g_state.gte_regs.r32[27]},
792
{"IRGB", &CPU::g_state.gte_regs.r32[28]},
793
{"ORGB", &CPU::g_state.gte_regs.r32[29]},
794
{"LZCS", &CPU::g_state.gte_regs.r32[30]},
795
{"LZCR", &CPU::g_state.gte_regs.r32[31]},
796
{"RT_0", &CPU::g_state.gte_regs.r32[32]},
797
{"RT_1", &CPU::g_state.gte_regs.r32[33]},
798
{"RT_2", &CPU::g_state.gte_regs.r32[34]},
799
{"RT_3", &CPU::g_state.gte_regs.r32[35]},
800
{"RT_4", &CPU::g_state.gte_regs.r32[36]},
801
{"TRX", &CPU::g_state.gte_regs.r32[37]},
802
{"TRY", &CPU::g_state.gte_regs.r32[38]},
803
{"TRZ", &CPU::g_state.gte_regs.r32[39]},
804
{"LLM_0", &CPU::g_state.gte_regs.r32[40]},
805
{"LLM_1", &CPU::g_state.gte_regs.r32[41]},
806
{"LLM_2", &CPU::g_state.gte_regs.r32[42]},
807
{"LLM_3", &CPU::g_state.gte_regs.r32[43]},
808
{"LLM_4", &CPU::g_state.gte_regs.r32[44]},
809
{"RBK", &CPU::g_state.gte_regs.r32[45]},
810
{"GBK", &CPU::g_state.gte_regs.r32[46]},
811
{"BBK", &CPU::g_state.gte_regs.r32[47]},
812
{"LCM_0", &CPU::g_state.gte_regs.r32[48]},
813
{"LCM_1", &CPU::g_state.gte_regs.r32[49]},
814
{"LCM_2", &CPU::g_state.gte_regs.r32[50]},
815
{"LCM_3", &CPU::g_state.gte_regs.r32[51]},
816
{"LCM_4", &CPU::g_state.gte_regs.r32[52]},
817
{"RFC", &CPU::g_state.gte_regs.r32[53]},
818
{"GFC", &CPU::g_state.gte_regs.r32[54]},
819
{"BFC", &CPU::g_state.gte_regs.r32[55]},
820
{"OFX", &CPU::g_state.gte_regs.r32[56]},
821
{"OFY", &CPU::g_state.gte_regs.r32[57]},
822
{"H", &CPU::g_state.gte_regs.r32[58]},
823
{"DQA", &CPU::g_state.gte_regs.r32[59]},
824
{"DQB", &CPU::g_state.gte_regs.r32[60]},
825
{"ZSF3", &CPU::g_state.gte_regs.r32[61]},
826
{"ZSF4", &CPU::g_state.gte_regs.r32[62]},
827
{"FLAG", &CPU::g_state.gte_regs.r32[63]}}};
828
829
ALWAYS_INLINE static bool AddOverflow(u32 old_value, u32 add_value, u32* new_value)
830
{
831
#if defined(__clang__) || defined(__GNUC__)
832
return __builtin_add_overflow(static_cast<s32>(old_value), static_cast<s32>(add_value),
833
reinterpret_cast<s32*>(new_value));
834
#else
835
*new_value = old_value + add_value;
836
return (((*new_value ^ old_value) & (*new_value ^ add_value)) & UINT32_C(0x80000000)) != 0;
837
#endif
838
}
839
840
ALWAYS_INLINE static bool SubOverflow(u32 old_value, u32 sub_value, u32* new_value)
841
{
842
#if defined(__clang__) || defined(__GNUC__)
843
return __builtin_sub_overflow(static_cast<s32>(old_value), static_cast<s32>(sub_value),
844
reinterpret_cast<s32*>(new_value));
845
#else
846
*new_value = old_value - sub_value;
847
return (((*new_value ^ old_value) & (old_value ^ sub_value)) & UINT32_C(0x80000000)) != 0;
848
#endif
849
}
850
851
void CPU::DisassembleAndPrint(u32 addr, bool regs, const char* prefix)
852
{
853
u32 bits = 0;
854
SafeReadMemoryWord(addr, &bits);
855
PrintInstruction(bits, addr, regs, prefix);
856
}
857
858
void CPU::DisassembleAndPrint(u32 addr, u32 instructions_before /* = 0 */, u32 instructions_after /* = 0 */)
859
{
860
u32 disasm_addr = addr - (instructions_before * sizeof(u32));
861
for (u32 i = 0; i < instructions_before; i++)
862
{
863
DisassembleAndPrint(disasm_addr, false, "");
864
disasm_addr += sizeof(u32);
865
}
866
867
// <= to include the instruction itself
868
for (u32 i = 0; i <= instructions_after; i++)
869
{
870
DisassembleAndPrint(disasm_addr, (i == 0), (i == 0) ? "---->" : "");
871
disasm_addr += sizeof(u32);
872
}
873
}
874
875
template<PGXPMode pgxp_mode, bool debug>
876
ALWAYS_INLINE_RELEASE void CPU::ExecuteInstruction()
877
{
878
restart_instruction:
879
const Instruction inst = g_state.current_instruction;
880
881
#if 0
882
if (g_state.current_instruction_pc == 0x80030000)
883
{
884
TRACE_EXECUTION = true;
885
__debugbreak();
886
}
887
#endif
888
889
#ifdef _DEBUG
890
if (TRACE_EXECUTION)
891
TracePrintInstruction();
892
#endif
893
894
// Skip nops. Makes PGXP-CPU quicker, but also the regular interpreter.
895
if (inst.bits == 0)
896
return;
897
898
switch (inst.op)
899
{
900
case InstructionOp::funct:
901
{
902
switch (inst.r.funct)
903
{
904
case InstructionFunct::sll:
905
{
906
const u32 rtVal = ReadReg(inst.r.rt);
907
const u32 rdVal = rtVal << inst.r.shamt;
908
WriteReg(inst.r.rd, rdVal);
909
910
if constexpr (pgxp_mode >= PGXPMode::CPU)
911
PGXP::CPU_SLL(inst, rtVal);
912
}
913
break;
914
915
case InstructionFunct::srl:
916
{
917
const u32 rtVal = ReadReg(inst.r.rt);
918
const u32 rdVal = rtVal >> inst.r.shamt;
919
WriteReg(inst.r.rd, rdVal);
920
921
if constexpr (pgxp_mode >= PGXPMode::CPU)
922
PGXP::CPU_SRL(inst, rtVal);
923
}
924
break;
925
926
case InstructionFunct::sra:
927
{
928
const u32 rtVal = ReadReg(inst.r.rt);
929
const u32 rdVal = static_cast<u32>(static_cast<s32>(rtVal) >> inst.r.shamt);
930
WriteReg(inst.r.rd, rdVal);
931
932
if constexpr (pgxp_mode >= PGXPMode::CPU)
933
PGXP::CPU_SRA(inst, rtVal);
934
}
935
break;
936
937
case InstructionFunct::sllv:
938
{
939
const u32 rtVal = ReadReg(inst.r.rt);
940
const u32 shamt = ReadReg(inst.r.rs) & UINT32_C(0x1F);
941
const u32 rdVal = rtVal << shamt;
942
if constexpr (pgxp_mode >= PGXPMode::CPU)
943
PGXP::CPU_SLLV(inst, rtVal, shamt);
944
945
WriteReg(inst.r.rd, rdVal);
946
}
947
break;
948
949
case InstructionFunct::srlv:
950
{
951
const u32 rtVal = ReadReg(inst.r.rt);
952
const u32 shamt = ReadReg(inst.r.rs) & UINT32_C(0x1F);
953
const u32 rdVal = rtVal >> shamt;
954
WriteReg(inst.r.rd, rdVal);
955
956
if constexpr (pgxp_mode >= PGXPMode::CPU)
957
PGXP::CPU_SRLV(inst, rtVal, shamt);
958
}
959
break;
960
961
case InstructionFunct::srav:
962
{
963
const u32 rtVal = ReadReg(inst.r.rt);
964
const u32 shamt = ReadReg(inst.r.rs) & UINT32_C(0x1F);
965
const u32 rdVal = static_cast<u32>(static_cast<s32>(rtVal) >> shamt);
966
WriteReg(inst.r.rd, rdVal);
967
968
if constexpr (pgxp_mode >= PGXPMode::CPU)
969
PGXP::CPU_SRAV(inst, rtVal, shamt);
970
}
971
break;
972
973
case InstructionFunct::and_:
974
{
975
const u32 rsVal = ReadReg(inst.r.rs);
976
const u32 rtVal = ReadReg(inst.r.rt);
977
const u32 new_value = rsVal & rtVal;
978
WriteReg(inst.r.rd, new_value);
979
980
if constexpr (pgxp_mode >= PGXPMode::CPU)
981
PGXP::CPU_AND_(inst, rsVal, rtVal);
982
}
983
break;
984
985
case InstructionFunct::or_:
986
{
987
const u32 rsVal = ReadReg(inst.r.rs);
988
const u32 rtVal = ReadReg(inst.r.rt);
989
const u32 new_value = rsVal | rtVal;
990
WriteReg(inst.r.rd, new_value);
991
992
if constexpr (pgxp_mode >= PGXPMode::CPU)
993
PGXP::CPU_OR_(inst, rsVal, rtVal);
994
else if constexpr (pgxp_mode >= PGXPMode::Memory)
995
PGXP::TryMove(inst.r.rd, inst.r.rs, inst.r.rt);
996
}
997
break;
998
999
case InstructionFunct::xor_:
1000
{
1001
const u32 rsVal = ReadReg(inst.r.rs);
1002
const u32 rtVal = ReadReg(inst.r.rt);
1003
const u32 new_value = rsVal ^ rtVal;
1004
WriteReg(inst.r.rd, new_value);
1005
1006
if constexpr (pgxp_mode >= PGXPMode::CPU)
1007
PGXP::CPU_XOR_(inst, rsVal, rtVal);
1008
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1009
PGXP::TryMove(inst.r.rd, inst.r.rs, inst.r.rt);
1010
}
1011
break;
1012
1013
case InstructionFunct::nor:
1014
{
1015
const u32 rsVal = ReadReg(inst.r.rs);
1016
const u32 rtVal = ReadReg(inst.r.rt);
1017
const u32 new_value = ~(rsVal | rtVal);
1018
WriteReg(inst.r.rd, new_value);
1019
1020
if constexpr (pgxp_mode >= PGXPMode::CPU)
1021
PGXP::CPU_NOR(inst, rsVal, rtVal);
1022
}
1023
break;
1024
1025
case InstructionFunct::add:
1026
{
1027
const u32 rsVal = ReadReg(inst.r.rs);
1028
const u32 rtVal = ReadReg(inst.r.rt);
1029
u32 rdVal;
1030
if (AddOverflow(rsVal, rtVal, &rdVal)) [[unlikely]]
1031
{
1032
RaiseException(Exception::Ov);
1033
return;
1034
}
1035
1036
WriteReg(inst.r.rd, rdVal);
1037
1038
if constexpr (pgxp_mode == PGXPMode::CPU)
1039
PGXP::CPU_ADD(inst, rsVal, rtVal);
1040
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1041
PGXP::TryMove(inst.r.rd, inst.r.rs, inst.r.rt);
1042
}
1043
break;
1044
1045
case InstructionFunct::addu:
1046
{
1047
const u32 rsVal = ReadReg(inst.r.rs);
1048
const u32 rtVal = ReadReg(inst.r.rt);
1049
const u32 rdVal = rsVal + rtVal;
1050
WriteReg(inst.r.rd, rdVal);
1051
1052
if constexpr (pgxp_mode >= PGXPMode::CPU)
1053
PGXP::CPU_ADD(inst, rsVal, rtVal);
1054
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1055
PGXP::TryMove(inst.r.rd, inst.r.rs, inst.r.rt);
1056
}
1057
break;
1058
1059
case InstructionFunct::sub:
1060
{
1061
const u32 rsVal = ReadReg(inst.r.rs);
1062
const u32 rtVal = ReadReg(inst.r.rt);
1063
u32 rdVal;
1064
if (SubOverflow(rsVal, rtVal, &rdVal)) [[unlikely]]
1065
{
1066
RaiseException(Exception::Ov);
1067
return;
1068
}
1069
1070
WriteReg(inst.r.rd, rdVal);
1071
1072
if constexpr (pgxp_mode >= PGXPMode::CPU)
1073
PGXP::CPU_SUB(inst, rsVal, rtVal);
1074
}
1075
break;
1076
1077
case InstructionFunct::subu:
1078
{
1079
const u32 rsVal = ReadReg(inst.r.rs);
1080
const u32 rtVal = ReadReg(inst.r.rt);
1081
const u32 rdVal = rsVal - rtVal;
1082
WriteReg(inst.r.rd, rdVal);
1083
1084
if constexpr (pgxp_mode >= PGXPMode::CPU)
1085
PGXP::CPU_SUB(inst, rsVal, rtVal);
1086
}
1087
break;
1088
1089
case InstructionFunct::slt:
1090
{
1091
const u32 rsVal = ReadReg(inst.r.rs);
1092
const u32 rtVal = ReadReg(inst.r.rt);
1093
const u32 result = BoolToUInt32(static_cast<s32>(rsVal) < static_cast<s32>(rtVal));
1094
WriteReg(inst.r.rd, result);
1095
1096
if constexpr (pgxp_mode >= PGXPMode::CPU)
1097
PGXP::CPU_SLT(inst, rsVal, rtVal);
1098
}
1099
break;
1100
1101
case InstructionFunct::sltu:
1102
{
1103
const u32 rsVal = ReadReg(inst.r.rs);
1104
const u32 rtVal = ReadReg(inst.r.rt);
1105
const u32 result = BoolToUInt32(rsVal < rtVal);
1106
WriteReg(inst.r.rd, result);
1107
1108
if constexpr (pgxp_mode >= PGXPMode::CPU)
1109
PGXP::CPU_SLTU(inst, rsVal, rtVal);
1110
}
1111
break;
1112
1113
case InstructionFunct::mfhi:
1114
{
1115
const u32 value = g_state.regs.hi;
1116
WriteReg(inst.r.rd, value);
1117
1118
StallUntilMulDivComplete();
1119
1120
if constexpr (pgxp_mode >= PGXPMode::CPU)
1121
PGXP::CPU_MOVE(static_cast<u32>(inst.r.rd.GetValue()), static_cast<u32>(Reg::hi), value);
1122
}
1123
break;
1124
1125
case InstructionFunct::mthi:
1126
{
1127
const u32 value = ReadReg(inst.r.rs);
1128
g_state.regs.hi = value;
1129
1130
StallUntilMulDivComplete();
1131
1132
if constexpr (pgxp_mode >= PGXPMode::CPU)
1133
PGXP::CPU_MOVE(static_cast<u32>(Reg::hi), static_cast<u32>(inst.r.rs.GetValue()), value);
1134
}
1135
break;
1136
1137
case InstructionFunct::mflo:
1138
{
1139
const u32 value = g_state.regs.lo;
1140
WriteReg(inst.r.rd, value);
1141
1142
StallUntilMulDivComplete();
1143
1144
if constexpr (pgxp_mode >= PGXPMode::CPU)
1145
PGXP::CPU_MOVE(static_cast<u32>(inst.r.rd.GetValue()), static_cast<u32>(Reg::lo), value);
1146
}
1147
break;
1148
1149
case InstructionFunct::mtlo:
1150
{
1151
const u32 value = ReadReg(inst.r.rs);
1152
g_state.regs.lo = value;
1153
1154
StallUntilMulDivComplete();
1155
1156
if constexpr (pgxp_mode == PGXPMode::CPU)
1157
PGXP::CPU_MOVE(static_cast<u32>(Reg::lo), static_cast<u32>(inst.r.rs.GetValue()), value);
1158
}
1159
break;
1160
1161
case InstructionFunct::mult:
1162
{
1163
const u32 lhs = ReadReg(inst.r.rs);
1164
const u32 rhs = ReadReg(inst.r.rt);
1165
const u64 result =
1166
static_cast<u64>(static_cast<s64>(SignExtend64(lhs)) * static_cast<s64>(SignExtend64(rhs)));
1167
1168
g_state.regs.hi = Truncate32(result >> 32);
1169
g_state.regs.lo = Truncate32(result);
1170
1171
StallUntilMulDivComplete();
1172
AddMulDivTicks(GetMultTicks(static_cast<s32>(lhs)));
1173
1174
if constexpr (pgxp_mode >= PGXPMode::CPU)
1175
PGXP::CPU_MULT(inst, lhs, rhs);
1176
}
1177
break;
1178
1179
case InstructionFunct::multu:
1180
{
1181
const u32 lhs = ReadReg(inst.r.rs);
1182
const u32 rhs = ReadReg(inst.r.rt);
1183
const u64 result = ZeroExtend64(lhs) * ZeroExtend64(rhs);
1184
1185
g_state.regs.hi = Truncate32(result >> 32);
1186
g_state.regs.lo = Truncate32(result);
1187
1188
StallUntilMulDivComplete();
1189
AddMulDivTicks(GetMultTicks(lhs));
1190
1191
if constexpr (pgxp_mode >= PGXPMode::CPU)
1192
PGXP::CPU_MULTU(inst, lhs, rhs);
1193
}
1194
break;
1195
1196
case InstructionFunct::div:
1197
{
1198
const s32 num = static_cast<s32>(ReadReg(inst.r.rs));
1199
const s32 denom = static_cast<s32>(ReadReg(inst.r.rt));
1200
1201
if (denom == 0)
1202
{
1203
// divide by zero
1204
g_state.regs.lo = (num >= 0) ? UINT32_C(0xFFFFFFFF) : UINT32_C(1);
1205
g_state.regs.hi = static_cast<u32>(num);
1206
}
1207
else if (static_cast<u32>(num) == UINT32_C(0x80000000) && denom == -1)
1208
{
1209
// unrepresentable
1210
g_state.regs.lo = UINT32_C(0x80000000);
1211
g_state.regs.hi = 0;
1212
}
1213
else
1214
{
1215
g_state.regs.lo = static_cast<u32>(num / denom);
1216
g_state.regs.hi = static_cast<u32>(num % denom);
1217
}
1218
1219
StallUntilMulDivComplete();
1220
AddMulDivTicks(GetDivTicks());
1221
1222
if constexpr (pgxp_mode >= PGXPMode::CPU)
1223
PGXP::CPU_DIV(inst, num, denom);
1224
}
1225
break;
1226
1227
case InstructionFunct::divu:
1228
{
1229
const u32 num = ReadReg(inst.r.rs);
1230
const u32 denom = ReadReg(inst.r.rt);
1231
1232
if (denom == 0)
1233
{
1234
// divide by zero
1235
g_state.regs.lo = UINT32_C(0xFFFFFFFF);
1236
g_state.regs.hi = static_cast<u32>(num);
1237
}
1238
else
1239
{
1240
g_state.regs.lo = num / denom;
1241
g_state.regs.hi = num % denom;
1242
}
1243
1244
StallUntilMulDivComplete();
1245
AddMulDivTicks(GetDivTicks());
1246
1247
if constexpr (pgxp_mode >= PGXPMode::CPU)
1248
PGXP::CPU_DIVU(inst, num, denom);
1249
}
1250
break;
1251
1252
case InstructionFunct::jr:
1253
{
1254
g_state.next_instruction_is_branch_delay_slot = true;
1255
const u32 target = ReadReg(inst.r.rs);
1256
Branch(target);
1257
}
1258
break;
1259
1260
case InstructionFunct::jalr:
1261
{
1262
g_state.next_instruction_is_branch_delay_slot = true;
1263
const u32 target = ReadReg(inst.r.rs);
1264
WriteReg(inst.r.rd, g_state.npc);
1265
Branch(target);
1266
}
1267
break;
1268
1269
case InstructionFunct::syscall:
1270
{
1271
RaiseException(Exception::Syscall);
1272
}
1273
break;
1274
1275
case InstructionFunct::break_:
1276
{
1277
RaiseBreakException(Cop0Registers::CAUSE::MakeValueForException(
1278
Exception::BP, g_state.current_instruction_in_branch_delay_slot,
1279
g_state.current_instruction_was_branch_taken, g_state.current_instruction.cop.cop_n),
1280
g_state.current_instruction_pc, g_state.current_instruction.bits);
1281
}
1282
break;
1283
1284
default:
1285
{
1286
RaiseException(Exception::RI);
1287
break;
1288
}
1289
}
1290
}
1291
break;
1292
1293
case InstructionOp::lui:
1294
{
1295
const u32 value = inst.i.imm_zext32() << 16;
1296
WriteReg(inst.i.rt, value);
1297
1298
if constexpr (pgxp_mode >= PGXPMode::CPU)
1299
PGXP::CPU_LUI(inst);
1300
}
1301
break;
1302
1303
case InstructionOp::andi:
1304
{
1305
const u32 rsVal = ReadReg(inst.i.rs);
1306
const u32 new_value = rsVal & inst.i.imm_zext32();
1307
WriteReg(inst.i.rt, new_value);
1308
1309
if constexpr (pgxp_mode >= PGXPMode::CPU)
1310
PGXP::CPU_ANDI(inst, rsVal);
1311
}
1312
break;
1313
1314
case InstructionOp::ori:
1315
{
1316
const u32 rsVal = ReadReg(inst.i.rs);
1317
const u32 imm = inst.i.imm_zext32();
1318
const u32 rtVal = rsVal | imm;
1319
WriteReg(inst.i.rt, rtVal);
1320
1321
if constexpr (pgxp_mode >= PGXPMode::CPU)
1322
PGXP::CPU_ORI(inst, rsVal);
1323
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1324
PGXP::TryMoveImm(inst.r.rd, inst.r.rs, imm);
1325
}
1326
break;
1327
1328
case InstructionOp::xori:
1329
{
1330
const u32 rsVal = ReadReg(inst.i.rs);
1331
const u32 imm = inst.i.imm_zext32();
1332
const u32 new_value = ReadReg(inst.i.rs) ^ imm;
1333
WriteReg(inst.i.rt, new_value);
1334
1335
if constexpr (pgxp_mode >= PGXPMode::CPU)
1336
PGXP::CPU_XORI(inst, rsVal);
1337
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1338
PGXP::TryMoveImm(inst.r.rd, inst.r.rs, imm);
1339
}
1340
break;
1341
1342
case InstructionOp::addi:
1343
{
1344
const u32 rsVal = ReadReg(inst.i.rs);
1345
const u32 imm = inst.i.imm_sext32();
1346
u32 rtVal;
1347
if (AddOverflow(rsVal, imm, &rtVal)) [[unlikely]]
1348
{
1349
RaiseException(Exception::Ov);
1350
return;
1351
}
1352
1353
WriteReg(inst.i.rt, rtVal);
1354
1355
if constexpr (pgxp_mode >= PGXPMode::CPU)
1356
PGXP::CPU_ADDI(inst, rsVal);
1357
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1358
PGXP::TryMoveImm(inst.r.rd, inst.r.rs, imm);
1359
}
1360
break;
1361
1362
case InstructionOp::addiu:
1363
{
1364
const u32 rsVal = ReadReg(inst.i.rs);
1365
const u32 imm = inst.i.imm_sext32();
1366
const u32 rtVal = rsVal + imm;
1367
WriteReg(inst.i.rt, rtVal);
1368
1369
if constexpr (pgxp_mode >= PGXPMode::CPU)
1370
PGXP::CPU_ADDI(inst, rsVal);
1371
else if constexpr (pgxp_mode >= PGXPMode::Memory)
1372
PGXP::TryMoveImm(inst.r.rd, inst.r.rs, imm);
1373
}
1374
break;
1375
1376
case InstructionOp::slti:
1377
{
1378
const u32 rsVal = ReadReg(inst.i.rs);
1379
const u32 result = BoolToUInt32(static_cast<s32>(rsVal) < static_cast<s32>(inst.i.imm_sext32()));
1380
WriteReg(inst.i.rt, result);
1381
1382
if constexpr (pgxp_mode >= PGXPMode::CPU)
1383
PGXP::CPU_SLTI(inst, rsVal);
1384
}
1385
break;
1386
1387
case InstructionOp::sltiu:
1388
{
1389
const u32 result = BoolToUInt32(ReadReg(inst.i.rs) < inst.i.imm_sext32());
1390
WriteReg(inst.i.rt, result);
1391
1392
if constexpr (pgxp_mode >= PGXPMode::CPU)
1393
PGXP::CPU_SLTIU(inst, ReadReg(inst.i.rs));
1394
}
1395
break;
1396
1397
case InstructionOp::lb:
1398
{
1399
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1400
if constexpr (debug)
1401
{
1402
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
1403
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
1404
}
1405
1406
u8 value;
1407
if (!ReadMemoryByte(addr, &value)) [[unlikely]]
1408
return;
1409
1410
const u32 sxvalue = SignExtend32(value);
1411
1412
WriteRegDelayed(inst.i.rt, sxvalue);
1413
1414
if constexpr (pgxp_mode >= PGXPMode::Memory)
1415
PGXP::CPU_LBx(inst, addr, sxvalue);
1416
}
1417
break;
1418
1419
case InstructionOp::lh:
1420
{
1421
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1422
if constexpr (debug)
1423
{
1424
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
1425
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
1426
}
1427
1428
u16 value;
1429
if (!ReadMemoryHalfWord(addr, &value)) [[unlikely]]
1430
return;
1431
1432
const u32 sxvalue = SignExtend32(value);
1433
WriteRegDelayed(inst.i.rt, sxvalue);
1434
1435
if constexpr (pgxp_mode >= PGXPMode::Memory)
1436
PGXP::CPU_LH(inst, addr, sxvalue);
1437
}
1438
break;
1439
1440
case InstructionOp::lw:
1441
{
1442
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1443
if constexpr (debug)
1444
{
1445
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
1446
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
1447
}
1448
1449
u32 value;
1450
if (!ReadMemoryWord(addr, &value)) [[unlikely]]
1451
return;
1452
1453
WriteRegDelayed(inst.i.rt, value);
1454
1455
if constexpr (pgxp_mode >= PGXPMode::Memory)
1456
PGXP::CPU_LW(inst, addr, value);
1457
}
1458
break;
1459
1460
case InstructionOp::lbu:
1461
{
1462
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1463
if constexpr (debug)
1464
{
1465
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
1466
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
1467
}
1468
1469
u8 value;
1470
if (!ReadMemoryByte(addr, &value)) [[unlikely]]
1471
return;
1472
1473
const u32 zxvalue = ZeroExtend32(value);
1474
WriteRegDelayed(inst.i.rt, zxvalue);
1475
1476
if constexpr (pgxp_mode >= PGXPMode::Memory)
1477
PGXP::CPU_LBx(inst, addr, zxvalue);
1478
}
1479
break;
1480
1481
case InstructionOp::lhu:
1482
{
1483
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1484
if constexpr (debug)
1485
{
1486
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
1487
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
1488
}
1489
1490
u16 value;
1491
if (!ReadMemoryHalfWord(addr, &value)) [[unlikely]]
1492
return;
1493
1494
const u32 zxvalue = ZeroExtend32(value);
1495
WriteRegDelayed(inst.i.rt, zxvalue);
1496
1497
if constexpr (pgxp_mode >= PGXPMode::Memory)
1498
PGXP::CPU_LHU(inst, addr, zxvalue);
1499
}
1500
break;
1501
1502
case InstructionOp::lwl:
1503
case InstructionOp::lwr:
1504
{
1505
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1506
const VirtualMemoryAddress aligned_addr = addr & ~UINT32_C(3);
1507
if constexpr (debug)
1508
{
1509
Cop0DataBreakpointCheck<MemoryAccessType::Read>(aligned_addr);
1510
MemoryBreakpointCheck<MemoryAccessType::Read>(aligned_addr);
1511
}
1512
1513
u32 aligned_value;
1514
if (!ReadMemoryWord(aligned_addr, &aligned_value)) [[unlikely]]
1515
return;
1516
1517
// Bypasses load delay. No need to check the old value since this is the delay slot or it's not relevant.
1518
const u32 existing_value = (inst.i.rt == g_state.load_delay_reg) ? g_state.load_delay_value : ReadReg(inst.i.rt);
1519
if constexpr (pgxp_mode >= PGXPMode::Memory)
1520
PGXP::CPU_LWx(inst, addr, existing_value);
1521
1522
const u8 shift = (Truncate8(addr) & u8(3)) * u8(8);
1523
u32 new_value;
1524
if (inst.op == InstructionOp::lwl)
1525
{
1526
const u32 mask = UINT32_C(0x00FFFFFF) >> shift;
1527
new_value = (existing_value & mask) | (aligned_value << (24 - shift));
1528
}
1529
else
1530
{
1531
const u32 mask = UINT32_C(0xFFFFFF00) << (24 - shift);
1532
new_value = (existing_value & mask) | (aligned_value >> shift);
1533
}
1534
1535
WriteRegDelayed(inst.i.rt, new_value);
1536
}
1537
break;
1538
1539
case InstructionOp::sb:
1540
{
1541
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1542
if constexpr (debug)
1543
{
1544
Cop0DataBreakpointCheck<MemoryAccessType::Write>(addr);
1545
MemoryBreakpointCheck<MemoryAccessType::Write>(addr);
1546
}
1547
1548
const u32 value = ReadReg(inst.i.rt);
1549
WriteMemoryByte(addr, value);
1550
1551
if constexpr (pgxp_mode >= PGXPMode::Memory)
1552
PGXP::CPU_SB(inst, addr, value);
1553
}
1554
break;
1555
1556
case InstructionOp::sh:
1557
{
1558
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1559
if constexpr (debug)
1560
{
1561
Cop0DataBreakpointCheck<MemoryAccessType::Write>(addr);
1562
MemoryBreakpointCheck<MemoryAccessType::Write>(addr);
1563
}
1564
1565
const u32 value = ReadReg(inst.i.rt);
1566
WriteMemoryHalfWord(addr, value);
1567
1568
if constexpr (pgxp_mode >= PGXPMode::Memory)
1569
PGXP::CPU_SH(inst, addr, value);
1570
}
1571
break;
1572
1573
case InstructionOp::sw:
1574
{
1575
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1576
if constexpr (debug)
1577
{
1578
Cop0DataBreakpointCheck<MemoryAccessType::Write>(addr);
1579
MemoryBreakpointCheck<MemoryAccessType::Write>(addr);
1580
}
1581
1582
const u32 value = ReadReg(inst.i.rt);
1583
WriteMemoryWord(addr, value);
1584
1585
if constexpr (pgxp_mode >= PGXPMode::Memory)
1586
PGXP::CPU_SW(inst, addr, value);
1587
}
1588
break;
1589
1590
case InstructionOp::swl:
1591
case InstructionOp::swr:
1592
{
1593
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1594
const VirtualMemoryAddress aligned_addr = addr & ~UINT32_C(3);
1595
if constexpr (debug)
1596
{
1597
Cop0DataBreakpointCheck<MemoryAccessType::Write>(aligned_addr);
1598
MemoryBreakpointCheck<MemoryAccessType::Write>(aligned_addr);
1599
}
1600
1601
const u32 reg_value = ReadReg(inst.i.rt);
1602
u32 mem_value;
1603
if (!ReadMemoryWord(aligned_addr, &mem_value)) [[unlikely]]
1604
return;
1605
1606
if constexpr (pgxp_mode >= PGXPMode::Memory)
1607
PGXP::CPU_SWx(inst, addr, reg_value);
1608
1609
const u8 shift = (Truncate8(addr) & u8(3)) * u8(8);
1610
u32 new_value;
1611
if (inst.op == InstructionOp::swl)
1612
{
1613
const u32 mem_mask = UINT32_C(0xFFFFFF00) << shift;
1614
new_value = (mem_value & mem_mask) | (reg_value >> (24 - shift));
1615
}
1616
else
1617
{
1618
const u32 mem_mask = UINT32_C(0x00FFFFFF) >> (24 - shift);
1619
new_value = (mem_value & mem_mask) | (reg_value << shift);
1620
}
1621
1622
WriteMemoryWord(aligned_addr, new_value);
1623
}
1624
break;
1625
1626
case InstructionOp::j:
1627
{
1628
g_state.next_instruction_is_branch_delay_slot = true;
1629
Branch((g_state.pc & UINT32_C(0xF0000000)) | (inst.j.target << 2));
1630
}
1631
break;
1632
1633
case InstructionOp::jal:
1634
{
1635
WriteReg(Reg::ra, g_state.npc);
1636
g_state.next_instruction_is_branch_delay_slot = true;
1637
Branch((g_state.pc & UINT32_C(0xF0000000)) | (inst.j.target << 2));
1638
}
1639
break;
1640
1641
case InstructionOp::beq:
1642
{
1643
// We're still flagged as a branch delay slot even if the branch isn't taken.
1644
g_state.next_instruction_is_branch_delay_slot = true;
1645
const bool branch = (ReadReg(inst.i.rs) == ReadReg(inst.i.rt));
1646
if (branch)
1647
Branch(g_state.pc + (inst.i.imm_sext32() << 2));
1648
}
1649
break;
1650
1651
case InstructionOp::bne:
1652
{
1653
g_state.next_instruction_is_branch_delay_slot = true;
1654
const bool branch = (ReadReg(inst.i.rs) != ReadReg(inst.i.rt));
1655
if (branch)
1656
Branch(g_state.pc + (inst.i.imm_sext32() << 2));
1657
}
1658
break;
1659
1660
case InstructionOp::bgtz:
1661
{
1662
g_state.next_instruction_is_branch_delay_slot = true;
1663
const bool branch = (static_cast<s32>(ReadReg(inst.i.rs)) > 0);
1664
if (branch)
1665
Branch(g_state.pc + (inst.i.imm_sext32() << 2));
1666
}
1667
break;
1668
1669
case InstructionOp::blez:
1670
{
1671
g_state.next_instruction_is_branch_delay_slot = true;
1672
const bool branch = (static_cast<s32>(ReadReg(inst.i.rs)) <= 0);
1673
if (branch)
1674
Branch(g_state.pc + (inst.i.imm_sext32() << 2));
1675
}
1676
break;
1677
1678
case InstructionOp::b:
1679
{
1680
g_state.next_instruction_is_branch_delay_slot = true;
1681
const u8 rt = static_cast<u8>(inst.i.rt.GetValue());
1682
1683
// bgez is the inverse of bltz, so simply do ltz and xor the result
1684
const bool bgez = ConvertToBoolUnchecked(rt & u8(1));
1685
const bool branch = (static_cast<s32>(ReadReg(inst.i.rs)) < 0) ^ bgez;
1686
1687
// register is still linked even if the branch isn't taken
1688
const bool link = (rt & u8(0x1E)) == u8(0x10);
1689
if (link)
1690
WriteReg(Reg::ra, g_state.npc);
1691
1692
if (branch)
1693
Branch(g_state.pc + (inst.i.imm_sext32() << 2));
1694
}
1695
break;
1696
1697
case InstructionOp::cop0:
1698
{
1699
if (InUserMode() && !g_state.cop0_regs.sr.CU0)
1700
{
1701
WARNING_LOG("Coprocessor 0 not present in user mode");
1702
RaiseException(Exception::CpU);
1703
return;
1704
}
1705
1706
if (inst.cop.IsCommonInstruction())
1707
{
1708
switch (inst.cop.CommonOp())
1709
{
1710
case CopCommonInstruction::mfcn:
1711
{
1712
u32 value;
1713
1714
switch (static_cast<Cop0Reg>(inst.r.rd.GetValue()))
1715
{
1716
case Cop0Reg::BPC:
1717
value = g_state.cop0_regs.BPC;
1718
break;
1719
1720
case Cop0Reg::BPCM:
1721
value = g_state.cop0_regs.BPCM;
1722
break;
1723
1724
case Cop0Reg::BDA:
1725
value = g_state.cop0_regs.BDA;
1726
break;
1727
1728
case Cop0Reg::BDAM:
1729
value = g_state.cop0_regs.BDAM;
1730
break;
1731
1732
case Cop0Reg::DCIC:
1733
value = g_state.cop0_regs.dcic.bits;
1734
break;
1735
1736
case Cop0Reg::JUMPDEST:
1737
value = g_state.cop0_regs.TAR;
1738
break;
1739
1740
case Cop0Reg::BadVaddr:
1741
value = g_state.cop0_regs.BadVaddr;
1742
break;
1743
1744
case Cop0Reg::SR:
1745
value = g_state.cop0_regs.sr.bits;
1746
break;
1747
1748
case Cop0Reg::CAUSE:
1749
value = g_state.cop0_regs.cause.bits;
1750
break;
1751
1752
case Cop0Reg::EPC:
1753
value = g_state.cop0_regs.EPC;
1754
break;
1755
1756
case Cop0Reg::PRID:
1757
value = g_state.cop0_regs.PRID;
1758
break;
1759
1760
default:
1761
RaiseException(Exception::RI);
1762
return;
1763
}
1764
1765
WriteRegDelayed(inst.r.rt, value);
1766
1767
if constexpr (pgxp_mode == PGXPMode::CPU)
1768
PGXP::CPU_MFC0(inst, value);
1769
}
1770
break;
1771
1772
case CopCommonInstruction::mtcn:
1773
{
1774
u32 value = ReadReg(inst.r.rt);
1775
[[maybe_unused]] const u32 orig_value = value;
1776
1777
switch (static_cast<Cop0Reg>(inst.r.rd.GetValue()))
1778
{
1779
case Cop0Reg::BPC:
1780
{
1781
g_state.cop0_regs.BPC = value;
1782
DEV_LOG("COP0 BPC <- {:08X}", value);
1783
}
1784
break;
1785
1786
case Cop0Reg::BPCM:
1787
{
1788
g_state.cop0_regs.BPCM = value;
1789
DEV_LOG("COP0 BPCM <- {:08X}", value);
1790
if (UpdateDebugDispatcherFlag())
1791
ExitExecution();
1792
}
1793
break;
1794
1795
case Cop0Reg::BDA:
1796
{
1797
g_state.cop0_regs.BDA = value;
1798
DEV_LOG("COP0 BDA <- {:08X}", value);
1799
}
1800
break;
1801
1802
case Cop0Reg::BDAM:
1803
{
1804
g_state.cop0_regs.BDAM = value;
1805
DEV_LOG("COP0 BDAM <- {:08X}", value);
1806
}
1807
break;
1808
1809
case Cop0Reg::DCIC:
1810
{
1811
g_state.cop0_regs.dcic.bits = (g_state.cop0_regs.dcic.bits & ~Cop0Registers::DCIC::WRITE_MASK) |
1812
(value & Cop0Registers::DCIC::WRITE_MASK);
1813
DEV_LOG("COP0 DCIC <- {:08X} (now {:08X})", value, g_state.cop0_regs.dcic.bits);
1814
value = g_state.cop0_regs.dcic.bits;
1815
if (UpdateDebugDispatcherFlag())
1816
ExitExecution();
1817
}
1818
break;
1819
1820
case Cop0Reg::SR:
1821
{
1822
g_state.cop0_regs.sr.bits = (g_state.cop0_regs.sr.bits & ~Cop0Registers::SR::WRITE_MASK) |
1823
(value & Cop0Registers::SR::WRITE_MASK);
1824
DEBUG_LOG("COP0 SR <- {:08X} (now {:08X})", value, g_state.cop0_regs.sr.bits);
1825
value = g_state.cop0_regs.sr.bits;
1826
UpdateMemoryPointers();
1827
CheckForPendingInterrupt();
1828
}
1829
break;
1830
1831
case Cop0Reg::CAUSE:
1832
{
1833
g_state.cop0_regs.cause.bits = (g_state.cop0_regs.cause.bits & ~Cop0Registers::CAUSE::WRITE_MASK) |
1834
(value & Cop0Registers::CAUSE::WRITE_MASK);
1835
DEBUG_LOG("COP0 CAUSE <- {:08X} (now {:08X})", value, g_state.cop0_regs.cause.bits);
1836
value = g_state.cop0_regs.cause.bits;
1837
CheckForPendingInterrupt();
1838
}
1839
break;
1840
1841
case Cop0Reg::JUMPDEST:
1842
case Cop0Reg::BadVaddr:
1843
case Cop0Reg::EPC:
1844
{
1845
WARNING_LOG("Ignoring write to COP0 register {} value 0x{:08X}",
1846
GetCop0RegisterName(static_cast<u8>(inst.r.rd.GetValue())), value);
1847
}
1848
break;
1849
1850
[[unlikely]] default:
1851
RaiseException(Exception::RI);
1852
return;
1853
}
1854
1855
if constexpr (pgxp_mode == PGXPMode::CPU)
1856
PGXP::CPU_MTC0(inst, value, orig_value);
1857
}
1858
break;
1859
1860
default:
1861
[[unlikely]] ERROR_LOG("Unhandled instruction at {:08X}: {:08X}", g_state.current_instruction_pc,
1862
inst.bits);
1863
break;
1864
}
1865
}
1866
else
1867
{
1868
switch (inst.cop.Cop0Op())
1869
{
1870
case Cop0Instruction::rfe:
1871
{
1872
// restore mode
1873
g_state.cop0_regs.sr.mode_bits =
1874
(g_state.cop0_regs.sr.mode_bits & UINT32_C(0b110000)) | (g_state.cop0_regs.sr.mode_bits >> 2);
1875
CheckForPendingInterrupt();
1876
}
1877
break;
1878
1879
case Cop0Instruction::tlbr:
1880
case Cop0Instruction::tlbwi:
1881
case Cop0Instruction::tlbwr:
1882
case Cop0Instruction::tlbp:
1883
RaiseException(Exception::RI);
1884
return;
1885
1886
default:
1887
[[unlikely]] ERROR_LOG("Unhandled instruction at {:08X}: {:08X}", g_state.current_instruction_pc,
1888
inst.bits);
1889
break;
1890
}
1891
}
1892
}
1893
break;
1894
1895
case InstructionOp::cop2:
1896
{
1897
if (!g_state.cop0_regs.sr.CE2) [[unlikely]]
1898
{
1899
WARNING_LOG("Coprocessor 2 not enabled");
1900
RaiseException(Exception::CpU);
1901
return;
1902
}
1903
1904
if (inst.cop.IsCommonInstruction())
1905
{
1906
// TODO: Combine with cop0.
1907
switch (inst.cop.CommonOp())
1908
{
1909
case CopCommonInstruction::cfcn:
1910
{
1911
StallUntilGTEComplete();
1912
1913
const u32 value = GTE::ReadRegister(static_cast<u32>(inst.r.rd.GetValue()) + 32);
1914
WriteRegDelayed(inst.r.rt, value);
1915
1916
if constexpr (pgxp_mode >= PGXPMode::Memory)
1917
PGXP::CPU_MFC2(inst, value);
1918
}
1919
break;
1920
1921
case CopCommonInstruction::ctcn:
1922
{
1923
const u32 value = ReadReg(inst.r.rt);
1924
GTE::WriteRegister(static_cast<u32>(inst.r.rd.GetValue()) + 32, value);
1925
1926
if constexpr (pgxp_mode >= PGXPMode::Memory)
1927
PGXP::CPU_MTC2(inst, value);
1928
}
1929
break;
1930
1931
case CopCommonInstruction::mfcn:
1932
{
1933
StallUntilGTEComplete();
1934
1935
const u32 value = GTE::ReadRegister(static_cast<u32>(inst.r.rd.GetValue()));
1936
WriteRegDelayed(inst.r.rt, value);
1937
1938
if constexpr (pgxp_mode >= PGXPMode::Memory)
1939
PGXP::CPU_MFC2(inst, value);
1940
}
1941
break;
1942
1943
case CopCommonInstruction::mtcn:
1944
{
1945
const u32 value = ReadReg(inst.r.rt);
1946
GTE::WriteRegister(static_cast<u32>(inst.r.rd.GetValue()), value);
1947
1948
if constexpr (pgxp_mode >= PGXPMode::Memory)
1949
PGXP::CPU_MTC2(inst, value);
1950
}
1951
break;
1952
1953
default:
1954
[[unlikely]] ERROR_LOG("Unhandled instruction at {:08X}: {:08X}", g_state.current_instruction_pc,
1955
inst.bits);
1956
break;
1957
}
1958
}
1959
else
1960
{
1961
StallUntilGTEComplete();
1962
GTE::ExecuteInstruction(inst.bits);
1963
}
1964
}
1965
break;
1966
1967
case InstructionOp::lwc2:
1968
{
1969
if (!g_state.cop0_regs.sr.CE2) [[unlikely]]
1970
{
1971
WARNING_LOG("Coprocessor 2 not enabled");
1972
RaiseException(Exception::CpU);
1973
return;
1974
}
1975
1976
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
1977
if constexpr (debug)
1978
{
1979
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
1980
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
1981
}
1982
1983
u32 value;
1984
if (!ReadMemoryWord(addr, &value))
1985
return;
1986
1987
GTE::WriteRegister(ZeroExtend32(static_cast<u8>(inst.i.rt.GetValue())), value);
1988
1989
if constexpr (pgxp_mode >= PGXPMode::Memory)
1990
PGXP::CPU_LWC2(inst, addr, value);
1991
}
1992
break;
1993
1994
case InstructionOp::swc2:
1995
{
1996
if (!g_state.cop0_regs.sr.CE2) [[unlikely]]
1997
{
1998
WARNING_LOG("Coprocessor 2 not enabled");
1999
RaiseException(Exception::CpU);
2000
return;
2001
}
2002
2003
StallUntilGTEComplete();
2004
2005
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
2006
if constexpr (debug)
2007
{
2008
Cop0DataBreakpointCheck<MemoryAccessType::Write>(addr);
2009
MemoryBreakpointCheck<MemoryAccessType::Write>(addr);
2010
}
2011
2012
const u32 value = GTE::ReadRegister(ZeroExtend32(static_cast<u8>(inst.i.rt.GetValue())));
2013
WriteMemoryWord(addr, value);
2014
2015
if constexpr (pgxp_mode >= PGXPMode::Memory)
2016
PGXP::CPU_SWC2(inst, addr, value);
2017
}
2018
break;
2019
2020
// cop1/cop3 are essentially no-ops
2021
case InstructionOp::cop1:
2022
case InstructionOp::cop3:
2023
{
2024
}
2025
break;
2026
2027
case InstructionOp::lwc0:
2028
case InstructionOp::lwc1:
2029
case InstructionOp::lwc3:
2030
{
2031
// todo: check enable
2032
// lwc0/1/3 should still perform the memory read, but discard the result
2033
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
2034
if constexpr (debug)
2035
{
2036
Cop0DataBreakpointCheck<MemoryAccessType::Read>(addr);
2037
MemoryBreakpointCheck<MemoryAccessType::Read>(addr);
2038
}
2039
2040
u32 value;
2041
ReadMemoryWord(addr, &value);
2042
}
2043
break;
2044
2045
break;
2046
case InstructionOp::swc0:
2047
case InstructionOp::swc1:
2048
case InstructionOp::swc3:
2049
{
2050
// todo: check enable
2051
// lwc0/1/3 should still perform the memory read, but discard the result
2052
const VirtualMemoryAddress addr = ReadReg(inst.i.rs) + inst.i.imm_sext32();
2053
if constexpr (debug)
2054
{
2055
Cop0DataBreakpointCheck<MemoryAccessType::Write>(addr);
2056
MemoryBreakpointCheck<MemoryAccessType::Write>(addr);
2057
}
2058
2059
WriteMemoryWord(addr, 0);
2060
}
2061
break;
2062
2063
// everything else is reserved/invalid
2064
[[unlikely]]
2065
default:
2066
{
2067
u32 ram_value;
2068
if (SafeReadInstruction(g_state.current_instruction_pc, &ram_value) &&
2069
ram_value != g_state.current_instruction.bits) [[unlikely]]
2070
{
2071
ERROR_LOG("Stale icache at 0x{:08X} - ICache: {:08X} RAM: {:08X}", g_state.current_instruction_pc,
2072
g_state.current_instruction.bits, ram_value);
2073
g_state.current_instruction.bits = ram_value;
2074
goto restart_instruction;
2075
}
2076
2077
RaiseException(Exception::RI);
2078
}
2079
break;
2080
}
2081
}
2082
2083
void CPU::DispatchInterrupt()
2084
{
2085
// The GTE is a co-processor, therefore it executes the instruction even if we're servicing an exception.
2086
// The exception handlers should recognize this and increment the PC if the EPC was a cop2 instruction.
2087
SafeReadInstruction(g_state.pc, &g_state.next_instruction.bits);
2088
if (g_state.next_instruction.op == InstructionOp::cop2 && !g_state.next_instruction.cop.IsCommonInstruction())
2089
{
2090
StallUntilGTEComplete();
2091
GTE::ExecuteInstruction(g_state.next_instruction.bits);
2092
}
2093
2094
// Interrupt raising occurs before the start of the instruction.
2095
RaiseException(
2096
Cop0Registers::CAUSE::MakeValueForException(Exception::INT, g_state.next_instruction_is_branch_delay_slot,
2097
g_state.branch_was_taken, g_state.next_instruction.cop.cop_n),
2098
g_state.pc);
2099
2100
// Fix up downcount, the pending IRQ set it to zero.
2101
TimingEvents::UpdateCPUDowncount();
2102
}
2103
2104
CPUExecutionMode CPU::GetCurrentExecutionMode()
2105
{
2106
return s_locals.current_execution_mode;
2107
}
2108
2109
bool CPU::UpdateDebugDispatcherFlag()
2110
{
2111
const bool has_any_breakpoints = (HasAnyBreakpoints() || s_locals.break_type == ExecutionBreakType::SingleStep);
2112
2113
const auto& dcic = g_state.cop0_regs.dcic;
2114
const bool has_cop0_breakpoints = dcic.super_master_enable_1 && dcic.super_master_enable_2 &&
2115
dcic.execution_breakpoint_enable && IsCop0ExecutionBreakpointUnmasked();
2116
2117
const bool use_debug_dispatcher =
2118
has_any_breakpoints || has_cop0_breakpoints || s_locals.trace_to_log ||
2119
(g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter && g_settings.bios_tty_logging);
2120
if (use_debug_dispatcher == g_state.using_debug_dispatcher)
2121
return false;
2122
2123
DEV_LOG("{} debug dispatcher", use_debug_dispatcher ? "Now using" : "No longer using");
2124
g_state.using_debug_dispatcher = use_debug_dispatcher;
2125
return true;
2126
}
2127
2128
void CPU::CheckForExecutionModeChange()
2129
{
2130
// Currently, any breakpoints require the interpreter.
2131
const CPUExecutionMode new_execution_mode =
2132
(g_state.using_debug_dispatcher ? CPUExecutionMode::Interpreter : g_settings.cpu_execution_mode);
2133
if (s_locals.current_execution_mode == new_execution_mode) [[likely]]
2134
{
2135
DebugAssert(g_state.using_interpreter == (s_locals.current_execution_mode == CPUExecutionMode::Interpreter));
2136
return;
2137
}
2138
2139
WARNING_LOG("Execution mode changed from {} to {}",
2140
Settings::GetCPUExecutionModeName(s_locals.current_execution_mode),
2141
Settings::GetCPUExecutionModeName(new_execution_mode));
2142
2143
// Clear bus error flag, it can get set in the rec and we don't want to fire it later in the int.
2144
g_state.bus_error = false;
2145
2146
const bool new_interpreter = (new_execution_mode == CPUExecutionMode::Interpreter);
2147
if (g_state.using_interpreter != new_interpreter)
2148
{
2149
// Have to clear out the icache too, only the tags are valid in the recs.
2150
ClearICache();
2151
2152
if (new_interpreter)
2153
{
2154
// Switching to interpreter. Set up the pipeline.
2155
// We'll also need to fetch the next instruction to execute.
2156
if (!SafeReadInstruction(g_state.pc, &g_state.next_instruction.bits)) [[unlikely]]
2157
{
2158
g_state.next_instruction.bits = 0;
2159
ERROR_LOG("Failed to read current instruction from 0x{:08X}", g_state.pc);
2160
}
2161
2162
g_state.npc = g_state.pc + sizeof(Instruction);
2163
}
2164
else
2165
{
2166
// Switching to recompiler. We can't start a rec block in a branch delay slot, so we need to execute the
2167
// instruction if we're currently in one.
2168
if (g_state.next_instruction_is_branch_delay_slot) [[unlikely]]
2169
{
2170
while (g_state.next_instruction_is_branch_delay_slot)
2171
{
2172
WARNING_LOG("EXECMODE: Executing instruction at 0x{:08X} because it is in a branch delay slot.", g_state.pc);
2173
if (fastjmp_set(&s_locals.exit_jmp_buf) == 0)
2174
{
2175
s_locals.break_type = ExecutionBreakType::ExecuteOneInstruction;
2176
g_state.using_debug_dispatcher = true;
2177
ExecuteInterpreter();
2178
}
2179
}
2180
2181
// Need to restart the whole process again, because the branch slot could change the debug flag.
2182
UpdateDebugDispatcherFlag();
2183
CheckForExecutionModeChange();
2184
return;
2185
}
2186
}
2187
}
2188
2189
s_locals.current_execution_mode = new_execution_mode;
2190
g_state.using_interpreter = new_interpreter;
2191
2192
// Wipe out code cache when switching modes.
2193
if (!new_interpreter)
2194
CPU::CodeCache::Reset();
2195
}
2196
2197
[[noreturn]] void CPU::ExitExecution()
2198
{
2199
// can't exit while running events without messing things up
2200
DebugAssert(!TimingEvents::IsRunningEvents());
2201
fastjmp_jmp(&s_locals.exit_jmp_buf, 1);
2202
}
2203
2204
bool CPU::HasAnyBreakpoints()
2205
{
2206
return (GetBreakpointList(BreakpointType::Execute).size() + GetBreakpointList(BreakpointType::Read).size() +
2207
GetBreakpointList(BreakpointType::Write).size()) > 0;
2208
}
2209
2210
ALWAYS_INLINE CPU::BreakpointList& CPU::GetBreakpointList(BreakpointType type)
2211
{
2212
return s_locals.breakpoints[static_cast<size_t>(type)];
2213
}
2214
2215
const char* CPU::GetBreakpointTypeName(BreakpointType type)
2216
{
2217
static constexpr std::array<const char*, static_cast<u32>(BreakpointType::Count)> names = {{
2218
"Execute",
2219
"Read",
2220
"Write",
2221
}};
2222
return names[static_cast<size_t>(type)];
2223
}
2224
2225
bool CPU::HasBreakpointAtAddress(BreakpointType type, VirtualMemoryAddress address)
2226
{
2227
for (Breakpoint& bp : GetBreakpointList(type))
2228
{
2229
if (bp.enabled && (bp.address & 0x0FFFFFFFu) == (address & 0x0FFFFFFFu))
2230
{
2231
bp.hit_count++;
2232
return true;
2233
}
2234
}
2235
2236
return false;
2237
}
2238
2239
CPU::BreakpointList CPU::CopyBreakpointList(bool include_auto_clear, bool include_callbacks)
2240
{
2241
BreakpointList bps;
2242
2243
size_t total = 0;
2244
for (const BreakpointList& bplist : s_locals.breakpoints)
2245
total += bplist.size();
2246
2247
bps.reserve(total);
2248
2249
for (const BreakpointList& bplist : s_locals.breakpoints)
2250
{
2251
for (const Breakpoint& bp : bplist)
2252
{
2253
if (bp.callback && !include_callbacks)
2254
continue;
2255
if (bp.auto_clear && !include_auto_clear)
2256
continue;
2257
2258
bps.push_back(bp);
2259
}
2260
}
2261
2262
return bps;
2263
}
2264
2265
bool CPU::AddBreakpoint(BreakpointType type, VirtualMemoryAddress address, bool auto_clear, bool enabled)
2266
{
2267
if (HasBreakpointAtAddress(type, address))
2268
return false;
2269
2270
INFO_LOG("Adding {} breakpoint at {:08X}, auto clear = {}", GetBreakpointTypeName(type), address,
2271
static_cast<unsigned>(auto_clear));
2272
2273
Breakpoint bp{address, nullptr, auto_clear ? 0 : s_locals.breakpoint_counter++, 0, type, auto_clear, enabled};
2274
GetBreakpointList(type).push_back(std::move(bp));
2275
if (UpdateDebugDispatcherFlag())
2276
System::InterruptExecution();
2277
2278
if (!auto_clear)
2279
Host::ReportDebuggerEvent(DebuggerEvent::Message, fmt::format("Added breakpoint at 0x{:08X}.", address));
2280
2281
return true;
2282
}
2283
2284
bool CPU::AddBreakpointWithCallback(BreakpointType type, VirtualMemoryAddress address, BreakpointCallback callback)
2285
{
2286
if (HasBreakpointAtAddress(type, address))
2287
return false;
2288
2289
INFO_LOG("Adding {} breakpoint with callback at {:08X}", GetBreakpointTypeName(type), address);
2290
2291
Breakpoint bp{address, callback, 0, 0, type, false, true};
2292
GetBreakpointList(type).push_back(std::move(bp));
2293
if (UpdateDebugDispatcherFlag())
2294
System::InterruptExecution();
2295
return true;
2296
}
2297
2298
bool CPU::SetBreakpointEnabled(BreakpointType type, VirtualMemoryAddress address, bool enabled)
2299
{
2300
BreakpointList& bplist = GetBreakpointList(type);
2301
auto it =
2302
std::find_if(bplist.begin(), bplist.end(), [address](const Breakpoint& bp) { return bp.address == address; });
2303
if (it == bplist.end())
2304
return false;
2305
2306
Host::ReportDebuggerEvent(DebuggerEvent::Message,
2307
fmt::format("{} {} breakpoint at 0x{:08X}.", enabled ? "Enabled" : "Disabled",
2308
GetBreakpointTypeName(type), address));
2309
it->enabled = enabled;
2310
2311
if (UpdateDebugDispatcherFlag())
2312
System::InterruptExecution();
2313
2314
if (address == s_locals.last_breakpoint_check_pc && !enabled)
2315
s_locals.last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
2316
2317
return true;
2318
}
2319
2320
bool CPU::RemoveBreakpoint(BreakpointType type, VirtualMemoryAddress address)
2321
{
2322
BreakpointList& bplist = GetBreakpointList(type);
2323
auto it =
2324
std::find_if(bplist.begin(), bplist.end(), [address](const Breakpoint& bp) { return bp.address == address; });
2325
if (it == bplist.end())
2326
return false;
2327
2328
Host::ReportDebuggerEvent(DebuggerEvent::Message,
2329
fmt::format("Removed {} breakpoint at 0x{:08X}.", GetBreakpointTypeName(type), address));
2330
2331
bplist.erase(it);
2332
if (UpdateDebugDispatcherFlag())
2333
System::InterruptExecution();
2334
2335
if (address == s_locals.last_breakpoint_check_pc)
2336
s_locals.last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
2337
2338
return true;
2339
}
2340
2341
void CPU::ClearBreakpoints()
2342
{
2343
for (BreakpointList& bplist : s_locals.breakpoints)
2344
bplist.clear();
2345
s_locals.breakpoint_counter = 0;
2346
s_locals.last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
2347
if (UpdateDebugDispatcherFlag())
2348
System::InterruptExecution();
2349
}
2350
2351
bool CPU::AddStepOverBreakpoint()
2352
{
2353
u32 bp_pc = g_state.pc;
2354
2355
Instruction inst;
2356
if (!SafeReadInstruction(bp_pc, &inst.bits))
2357
return false;
2358
2359
bp_pc += sizeof(Instruction);
2360
2361
if (!IsCallInstruction(inst))
2362
{
2363
Host::ReportDebuggerEvent(DebuggerEvent::Message, fmt::format("0x{:08X} is not a call instruction.", g_state.pc));
2364
return false;
2365
}
2366
2367
if (!SafeReadInstruction(bp_pc, &inst.bits))
2368
return false;
2369
2370
if (IsBranchInstruction(inst))
2371
{
2372
Host::ReportDebuggerEvent(DebuggerEvent::Message,
2373
fmt::format("Can't step over double branch at 0x{:08X}", g_state.pc));
2374
return false;
2375
}
2376
2377
// skip the delay slot
2378
bp_pc += sizeof(Instruction);
2379
2380
Host::ReportDebuggerEvent(DebuggerEvent::Message, fmt::format("Stepping over to 0x{:08X}.", bp_pc));
2381
2382
return AddBreakpoint(BreakpointType::Execute, bp_pc, true);
2383
}
2384
2385
bool CPU::AddStepOutBreakpoint(u32 max_instructions_to_search)
2386
{
2387
// find the branch-to-ra instruction.
2388
u32 ret_pc = g_state.pc;
2389
for (u32 i = 0; i < max_instructions_to_search; i++)
2390
{
2391
ret_pc += sizeof(Instruction);
2392
2393
Instruction inst;
2394
if (!SafeReadInstruction(ret_pc, &inst.bits))
2395
{
2396
Host::ReportDebuggerEvent(
2397
DebuggerEvent::Message,
2398
fmt::format("Instruction read failed at {:08X} while searching for function end.", ret_pc));
2399
return false;
2400
}
2401
2402
if (IsReturnInstruction(inst))
2403
{
2404
Host::ReportDebuggerEvent(DebuggerEvent::Message, fmt::format("Stepping out to 0x{:08X}.", ret_pc));
2405
return AddBreakpoint(BreakpointType::Execute, ret_pc, true);
2406
}
2407
}
2408
2409
Host::ReportDebuggerEvent(DebuggerEvent::Message,
2410
fmt::format("No return instruction found after {} instructions for step-out at {:08X}.",
2411
max_instructions_to_search, g_state.pc));
2412
2413
return false;
2414
}
2415
2416
ALWAYS_INLINE_RELEASE bool CPU::CheckBreakpointList(BreakpointType type, VirtualMemoryAddress address)
2417
{
2418
BreakpointList& bplist = GetBreakpointList(type);
2419
size_t count = bplist.size();
2420
if (count == 0) [[likely]]
2421
return false;
2422
2423
for (size_t i = 0; i < count;)
2424
{
2425
Breakpoint& bp = bplist[i];
2426
if (!bp.enabled || (bp.address & 0x0FFFFFFFu) != (address & 0x0FFFFFFFu))
2427
{
2428
i++;
2429
continue;
2430
}
2431
2432
bp.hit_count++;
2433
2434
const u32 pc = g_state.pc;
2435
2436
if (bp.callback)
2437
{
2438
// if callback returns false, the bp is no longer recorded
2439
if (!bp.callback(BreakpointType::Execute, pc, address))
2440
{
2441
bplist.erase(bplist.begin() + i);
2442
count--;
2443
UpdateDebugDispatcherFlag();
2444
}
2445
else
2446
{
2447
i++;
2448
}
2449
}
2450
else
2451
{
2452
System::PauseSystem(true);
2453
2454
TinyString msg;
2455
if (bp.auto_clear)
2456
{
2457
msg.format("Stopped execution at 0x{:08X}.", pc);
2458
Host::ReportDebuggerEvent(DebuggerEvent::Message, msg);
2459
bplist.erase(bplist.begin() + i);
2460
count--;
2461
UpdateDebugDispatcherFlag();
2462
}
2463
else
2464
{
2465
msg.format("Hit {} breakpoint {} at 0x{:08X}, Hit Count {}.", GetBreakpointTypeName(type), bp.number, address,
2466
bp.hit_count);
2467
Host::ReportDebuggerEvent(DebuggerEvent::BreakpointHit, msg);
2468
i++;
2469
}
2470
2471
return true;
2472
}
2473
}
2474
2475
return false;
2476
}
2477
2478
ALWAYS_INLINE_RELEASE void CPU::ExecutionBreakpointCheck(u32 pc)
2479
{
2480
if (s_locals.breakpoints[static_cast<u32>(BreakpointType::Execute)].empty()) [[likely]]
2481
return;
2482
2483
if (pc == s_locals.last_breakpoint_check_pc || s_locals.break_type == ExecutionBreakType::ExecuteOneInstruction)
2484
[[unlikely]]
2485
{
2486
// we don't want to trigger the same breakpoint which just paused us repeatedly.
2487
return;
2488
}
2489
2490
s_locals.last_breakpoint_check_pc = pc;
2491
2492
if (CheckBreakpointList(BreakpointType::Execute, pc)) [[unlikely]]
2493
{
2494
s_locals.break_type = ExecutionBreakType::None;
2495
ExitExecution();
2496
}
2497
}
2498
2499
template<MemoryAccessType type>
2500
ALWAYS_INLINE_RELEASE void CPU::MemoryBreakpointCheck(VirtualMemoryAddress address)
2501
{
2502
const BreakpointType bptype = (type == MemoryAccessType::Read) ? BreakpointType::Read : BreakpointType::Write;
2503
if (CheckBreakpointList(bptype, address)) [[unlikely]]
2504
s_locals.break_type = ExecutionBreakType::Breakpoint;
2505
}
2506
2507
template<PGXPMode pgxp_mode, bool debug>
2508
[[noreturn]] void CPU::ExecuteImpl()
2509
{
2510
if (g_state.pending_ticks >= g_state.downcount)
2511
TimingEvents::RunEvents();
2512
2513
for (;;)
2514
{
2515
do
2516
{
2517
if constexpr (debug)
2518
ExecutionBreakpointCheck(g_state.pc);
2519
2520
g_state.pending_ticks++;
2521
2522
// now executing the instruction we previously fetched
2523
g_state.current_instruction.bits = g_state.next_instruction.bits;
2524
g_state.current_instruction_pc = g_state.pc;
2525
g_state.current_instruction_in_branch_delay_slot = g_state.next_instruction_is_branch_delay_slot;
2526
g_state.current_instruction_was_branch_taken = g_state.branch_was_taken;
2527
g_state.next_instruction_is_branch_delay_slot = false;
2528
g_state.branch_was_taken = false;
2529
2530
if constexpr (debug)
2531
{
2532
if (Cop0ExecutionBreakpointCheck(g_state.current_instruction_pc))
2533
continue;
2534
}
2535
2536
// fetch the next instruction - even if this fails, it'll still refetch on the flush so we can continue
2537
if (!FetchInstruction())
2538
continue;
2539
2540
// trace functionality
2541
if constexpr (debug)
2542
{
2543
if (s_locals.trace_to_log)
2544
LogInstruction(g_state.current_instruction.bits, g_state.current_instruction_pc, true);
2545
2546
// handle all mirrors of the syscall trampoline. will catch 200000A0 etc, but those aren't fetchable anyway
2547
const u32 masked_pc = (g_state.current_instruction_pc & KSEG_MASK);
2548
if (masked_pc == 0xA0) [[unlikely]]
2549
HandleA0Syscall();
2550
else if (masked_pc == 0xB0) [[unlikely]]
2551
HandleB0Syscall();
2552
}
2553
2554
#if 0 // GTE flag test debugging
2555
if (g_state.m_current_instruction_pc == 0x8002cdf4)
2556
{
2557
if (g_state.m_regs.v1 != g_state.m_regs.v0)
2558
printf("Got %08X Expected? %08X\n", g_state.m_regs.v1, g_state.m_regs.v0);
2559
}
2560
#endif
2561
2562
// execute the instruction we previously fetched
2563
ExecuteInstruction<pgxp_mode, debug>();
2564
2565
// next load delay
2566
UpdateLoadDelay();
2567
2568
if constexpr (debug)
2569
{
2570
if (s_locals.break_type != ExecutionBreakType::None) [[unlikely]]
2571
{
2572
const ExecutionBreakType break_type = std::exchange(s_locals.break_type, ExecutionBreakType::None);
2573
if (break_type >= ExecutionBreakType::SingleStep)
2574
System::PauseSystem(true);
2575
2576
UpdateDebugDispatcherFlag();
2577
ExitExecution();
2578
}
2579
}
2580
} while (g_state.pending_ticks < g_state.downcount);
2581
2582
TimingEvents::RunEvents();
2583
}
2584
}
2585
2586
void CPU::ExecuteInterpreter()
2587
{
2588
if (g_state.using_debug_dispatcher)
2589
{
2590
if (g_settings.gpu_pgxp_enable)
2591
{
2592
if (g_settings.gpu_pgxp_cpu)
2593
ExecuteImpl<PGXPMode::CPU, true>();
2594
else
2595
ExecuteImpl<PGXPMode::Memory, true>();
2596
}
2597
else
2598
{
2599
ExecuteImpl<PGXPMode::Disabled, true>();
2600
}
2601
}
2602
else
2603
{
2604
if (g_settings.gpu_pgxp_enable)
2605
{
2606
if (g_settings.gpu_pgxp_cpu)
2607
ExecuteImpl<PGXPMode::CPU, false>();
2608
else
2609
ExecuteImpl<PGXPMode::Memory, false>();
2610
}
2611
else
2612
{
2613
ExecuteImpl<PGXPMode::Disabled, false>();
2614
}
2615
}
2616
}
2617
2618
fastjmp_buf* CPU::GetExecutionJmpBuf()
2619
{
2620
return &s_locals.exit_jmp_buf;
2621
}
2622
2623
void CPU::Execute()
2624
{
2625
CheckForExecutionModeChange();
2626
2627
if (fastjmp_set(&s_locals.exit_jmp_buf) != 0)
2628
return;
2629
2630
if (g_state.using_interpreter)
2631
ExecuteInterpreter();
2632
else
2633
CodeCache::Execute();
2634
}
2635
2636
void CPU::SetSingleStepFlag()
2637
{
2638
s_locals.break_type = ExecutionBreakType::SingleStep;
2639
if (UpdateDebugDispatcherFlag())
2640
System::InterruptExecution();
2641
}
2642
2643
template<PGXPMode pgxp_mode>
2644
void CPU::CodeCache::InterpretCachedBlock(const Block* block)
2645
{
2646
// set up the state so we've already fetched the instruction
2647
DebugAssert(g_state.pc == block->pc);
2648
g_state.npc = block->pc + 4;
2649
g_state.exception_raised = false;
2650
2651
const Instruction* instruction = block->Instructions();
2652
const Instruction* end_instruction = instruction + block->size;
2653
const CodeCache::InstructionInfo* info = block->InstructionsInfo();
2654
2655
do
2656
{
2657
g_state.pending_ticks++;
2658
2659
// now executing the instruction we previously fetched
2660
g_state.current_instruction.bits = instruction->bits;
2661
g_state.current_instruction_pc = g_state.pc;
2662
g_state.current_instruction_in_branch_delay_slot = info->is_branch_delay_slot; // TODO: let int set it instead
2663
g_state.current_instruction_was_branch_taken = g_state.branch_was_taken;
2664
g_state.branch_was_taken = false;
2665
2666
// update pc
2667
g_state.pc = g_state.npc;
2668
g_state.npc += 4;
2669
2670
// execute the instruction we previously fetched
2671
ExecuteInstruction<pgxp_mode, false>();
2672
2673
// next load delay
2674
UpdateLoadDelay();
2675
2676
if (g_state.exception_raised)
2677
break;
2678
2679
instruction++;
2680
info++;
2681
} while (instruction != end_instruction);
2682
2683
// cleanup so the interpreter can kick in if needed
2684
g_state.next_instruction_is_branch_delay_slot = false;
2685
}
2686
2687
template void CPU::CodeCache::InterpretCachedBlock<PGXPMode::Disabled>(const Block* block);
2688
template void CPU::CodeCache::InterpretCachedBlock<PGXPMode::Memory>(const Block* block);
2689
template void CPU::CodeCache::InterpretCachedBlock<PGXPMode::CPU>(const Block* block);
2690
2691
template<PGXPMode pgxp_mode>
2692
void CPU::CodeCache::InterpretUncachedBlock()
2693
{
2694
g_state.npc = g_state.pc;
2695
g_state.exception_raised = false;
2696
g_state.bus_error = false;
2697
if (!FetchInstructionForInterpreterFallback())
2698
return;
2699
2700
// At this point, pc contains the last address executed (in the previous block). The instruction has not been fetched
2701
// yet. pc shouldn't be updated until the fetch occurs, that way the exception occurs in the delay slot.
2702
bool in_branch_delay_slot = false;
2703
for (;;)
2704
{
2705
g_state.pending_ticks++;
2706
2707
// now executing the instruction we previously fetched
2708
g_state.current_instruction.bits = g_state.next_instruction.bits;
2709
g_state.current_instruction_pc = g_state.pc;
2710
g_state.current_instruction_in_branch_delay_slot = g_state.next_instruction_is_branch_delay_slot;
2711
g_state.current_instruction_was_branch_taken = g_state.branch_was_taken;
2712
g_state.next_instruction_is_branch_delay_slot = false;
2713
g_state.branch_was_taken = false;
2714
2715
// Fetch the next instruction, except if we're in a branch delay slot. The "fetch" is done in the next block.
2716
const bool branch = IsBranchInstruction(g_state.current_instruction);
2717
if (!g_state.current_instruction_in_branch_delay_slot || branch)
2718
{
2719
if (!FetchInstructionForInterpreterFallback())
2720
break;
2721
}
2722
else
2723
{
2724
g_state.pc = g_state.npc;
2725
}
2726
2727
// execute the instruction we previously fetched
2728
ExecuteInstruction<pgxp_mode, false>();
2729
2730
// next load delay
2731
UpdateLoadDelay();
2732
2733
if (g_state.exception_raised || (!branch && in_branch_delay_slot) ||
2734
IsExitBlockInstruction(g_state.current_instruction))
2735
{
2736
break;
2737
}
2738
else if ((g_state.current_instruction.bits & 0xFFC0FFFFu) == 0x40806000u && HasPendingInterrupt())
2739
{
2740
// mtc0 rt, sr - Jackie Chan Stuntmaster, MTV Sports games.
2741
// Pain in the ass games trigger a software interrupt by writing to SR.Im.
2742
break;
2743
}
2744
2745
in_branch_delay_slot = branch;
2746
}
2747
}
2748
2749
template void CPU::CodeCache::InterpretUncachedBlock<PGXPMode::Disabled>();
2750
template void CPU::CodeCache::InterpretUncachedBlock<PGXPMode::Memory>();
2751
template void CPU::CodeCache::InterpretUncachedBlock<PGXPMode::CPU>();
2752
2753
bool CPU::RecompilerThunks::InterpretInstruction()
2754
{
2755
g_state.exception_raised = false;
2756
g_state.bus_error = false;
2757
ExecuteInstruction<PGXPMode::Disabled, false>();
2758
return g_state.exception_raised;
2759
}
2760
2761
bool CPU::RecompilerThunks::InterpretInstructionPGXP()
2762
{
2763
g_state.exception_raised = false;
2764
g_state.bus_error = false;
2765
ExecuteInstruction<PGXPMode::Memory, false>();
2766
return g_state.exception_raised;
2767
}
2768
2769
ALWAYS_INLINE_RELEASE Bus::MemoryReadHandler CPU::GetMemoryReadHandler(VirtualMemoryAddress address,
2770
MemoryAccessSize size)
2771
{
2772
Bus::MemoryReadHandler* base =
2773
Bus::OffsetHandlerArray<Bus::MemoryReadHandler>(g_state.memory_handlers, size, MemoryAccessType::Read);
2774
return base[address >> Bus::MEMORY_LUT_PAGE_SHIFT];
2775
}
2776
2777
ALWAYS_INLINE_RELEASE Bus::MemoryWriteHandler CPU::GetMemoryWriteHandler(VirtualMemoryAddress address,
2778
MemoryAccessSize size)
2779
{
2780
Bus::MemoryWriteHandler* base =
2781
Bus::OffsetHandlerArray<Bus::MemoryWriteHandler>(g_state.memory_handlers, size, MemoryAccessType::Write);
2782
return base[address >> Bus::MEMORY_LUT_PAGE_SHIFT];
2783
}
2784
2785
void CPU::UpdateMemoryPointers()
2786
{
2787
g_state.memory_handlers = Bus::GetMemoryHandlers(g_state.cop0_regs.sr.Isc, g_state.cop0_regs.sr.Swc);
2788
g_state.fastmem_base = Bus::GetFastmemBase(g_state.cop0_regs.sr.Isc);
2789
}
2790
2791
template<bool add_ticks, bool icache_read, u32 word_count, bool raise_exceptions>
2792
ALWAYS_INLINE_RELEASE bool CPU::DoInstructionRead(PhysicalMemoryAddress address, u32* data)
2793
{
2794
using namespace Bus;
2795
2796
// We can shortcut around VirtualAddressToPhysical() here because we're never going to be
2797
// calling with an out-of-range address.
2798
DebugAssert(VirtualAddressToPhysical(address) == (address & KSEG_MASK));
2799
address &= KSEG_MASK;
2800
2801
if (address < RAM_MIRROR_END)
2802
{
2803
std::memcpy(data, &g_ram[address & g_ram_mask], sizeof(u32) * word_count);
2804
if constexpr (add_ticks)
2805
g_state.pending_ticks += (icache_read ? 1 : RAM_READ_TICKS) * word_count;
2806
2807
return true;
2808
}
2809
else if (address >= BIOS_BASE && address < (BIOS_BASE + BIOS_SIZE))
2810
{
2811
std::memcpy(data, &g_bios[(address - BIOS_BASE) & BIOS_MASK], sizeof(u32) * word_count);
2812
if constexpr (add_ticks)
2813
g_state.pending_ticks += g_bios_access_time[static_cast<u32>(MemoryAccessSize::Word)] * word_count;
2814
2815
return true;
2816
}
2817
else if (address >= EXP1_BASE && address < (EXP1_BASE + EXP1_SIZE))
2818
{
2819
g_pio_device->CodeReadHandler(address & EXP1_MASK, data, word_count);
2820
if constexpr (add_ticks)
2821
g_state.pending_ticks += g_exp1_access_time[static_cast<u32>(MemoryAccessSize::Word)] * word_count;
2822
2823
return true;
2824
}
2825
else [[unlikely]]
2826
{
2827
if (raise_exceptions)
2828
{
2829
g_state.cop0_regs.BadVaddr = address;
2830
RaiseException(Cop0Registers::CAUSE::MakeValueForException(Exception::IBE, false, false, 0), address);
2831
}
2832
2833
std::memset(data, 0, sizeof(u32) * word_count);
2834
return false;
2835
}
2836
}
2837
2838
TickCount CPU::GetInstructionReadTicks(VirtualMemoryAddress address)
2839
{
2840
using namespace Bus;
2841
2842
DebugAssert(VirtualAddressToPhysical(address) == (address & KSEG_MASK));
2843
address &= KSEG_MASK;
2844
2845
if (address < RAM_MIRROR_END)
2846
{
2847
return RAM_READ_TICKS;
2848
}
2849
else if (address >= BIOS_BASE && address < (BIOS_BASE + BIOS_MIRROR_SIZE))
2850
{
2851
return g_bios_access_time[static_cast<u32>(MemoryAccessSize::Word)];
2852
}
2853
else
2854
{
2855
return 0;
2856
}
2857
}
2858
2859
TickCount CPU::GetICacheFillTicks(VirtualMemoryAddress address)
2860
{
2861
using namespace Bus;
2862
2863
DebugAssert(VirtualAddressToPhysical(address) == (address & KSEG_MASK));
2864
address &= KSEG_MASK;
2865
2866
if (address < RAM_MIRROR_END)
2867
{
2868
return 1 * ((ICACHE_LINE_SIZE - (address & (ICACHE_LINE_SIZE - 1))) / sizeof(u32));
2869
}
2870
else if (address >= BIOS_BASE && address < (BIOS_BASE + BIOS_MIRROR_SIZE))
2871
{
2872
return g_bios_access_time[static_cast<u32>(MemoryAccessSize::Word)] *
2873
((ICACHE_LINE_SIZE - (address & (ICACHE_LINE_SIZE - 1))) / sizeof(u32));
2874
}
2875
else
2876
{
2877
return 0;
2878
}
2879
}
2880
2881
void CPU::CheckAndUpdateICacheTags(u32 line_count)
2882
{
2883
VirtualMemoryAddress current_pc = g_state.pc & ICACHE_TAG_ADDRESS_MASK;
2884
2885
TickCount ticks = 0;
2886
TickCount cached_ticks_per_line = GetICacheFillTicks(current_pc);
2887
for (u32 i = 0; i < line_count; i++, current_pc += ICACHE_LINE_SIZE)
2888
{
2889
const u32 line = GetICacheLine(current_pc);
2890
if (g_state.icache_tags[line] != current_pc)
2891
{
2892
g_state.icache_tags[line] = current_pc;
2893
ticks += cached_ticks_per_line;
2894
}
2895
}
2896
2897
g_state.pending_ticks += ticks;
2898
}
2899
2900
u32 CPU::FillICache(VirtualMemoryAddress address)
2901
{
2902
const u32 line = GetICacheLine(address);
2903
const u32 line_word_offset = GetICacheLineWordOffset(address);
2904
u32* const line_data = g_state.icache_data.data() + (line * ICACHE_WORDS_PER_LINE);
2905
u32* const offset_line_data = line_data + line_word_offset;
2906
u32 line_tag;
2907
switch (line_word_offset)
2908
{
2909
case 0:
2910
DoInstructionRead<true, true, 4, false>(address & ~(ICACHE_LINE_SIZE - 1u), offset_line_data);
2911
line_tag = GetICacheTagForAddress(address);
2912
break;
2913
case 1:
2914
DoInstructionRead<true, true, 3, false>(address & (~(ICACHE_LINE_SIZE - 1u) | 0x4), offset_line_data);
2915
line_tag = GetICacheTagForAddress(address) | 0x1;
2916
break;
2917
case 2:
2918
DoInstructionRead<true, true, 2, false>(address & (~(ICACHE_LINE_SIZE - 1u) | 0x8), offset_line_data);
2919
line_tag = GetICacheTagForAddress(address) | 0x3;
2920
break;
2921
case 3:
2922
default:
2923
DoInstructionRead<true, true, 1, false>(address & (~(ICACHE_LINE_SIZE - 1u) | 0xC), offset_line_data);
2924
line_tag = GetICacheTagForAddress(address) | 0x7;
2925
break;
2926
}
2927
2928
g_state.icache_tags[line] = line_tag;
2929
return offset_line_data[0];
2930
}
2931
2932
void CPU::ClearICache()
2933
{
2934
std::memset(g_state.icache_data.data(), 0, ICACHE_SIZE);
2935
g_state.icache_tags.fill(ICACHE_INVALID_BITS);
2936
}
2937
2938
namespace CPU {
2939
ALWAYS_INLINE_RELEASE static u32 ReadICache(VirtualMemoryAddress address)
2940
{
2941
const u32 line = GetICacheLine(address);
2942
const u32 line_word_offset = GetICacheLineWordOffset(address);
2943
const u32* const line_data = g_state.icache_data.data() + (line * ICACHE_WORDS_PER_LINE);
2944
return line_data[line_word_offset];
2945
}
2946
} // namespace CPU
2947
2948
ALWAYS_INLINE_RELEASE bool CPU::FetchInstruction()
2949
{
2950
DebugAssert(Common::IsAlignedPow2(g_state.npc, 4));
2951
2952
const PhysicalMemoryAddress address = g_state.npc;
2953
switch (address >> 29)
2954
{
2955
case 0x00: // KUSEG 0M-512M
2956
case 0x04: // KSEG0 - physical memory cached
2957
{
2958
#if 0
2959
DoInstructionRead<true, false, 1, false>(address, &g_state.next_instruction.bits);
2960
#else
2961
if (CompareICacheTag(address))
2962
g_state.next_instruction.bits = ReadICache(address);
2963
else
2964
g_state.next_instruction.bits = FillICache(address);
2965
#endif
2966
}
2967
break;
2968
2969
case 0x05: // KSEG1 - physical memory uncached
2970
{
2971
if (!DoInstructionRead<true, false, 1, true>(address, &g_state.next_instruction.bits))
2972
return false;
2973
}
2974
break;
2975
2976
case 0x01: // KUSEG 512M-1024M
2977
case 0x02: // KUSEG 1024M-1536M
2978
case 0x03: // KUSEG 1536M-2048M
2979
case 0x06: // KSEG2
2980
case 0x07: // KSEG2
2981
default:
2982
{
2983
CPU::RaiseException(Cop0Registers::CAUSE::MakeValueForException(Exception::IBE, false, false, 0), address);
2984
return false;
2985
}
2986
}
2987
2988
g_state.pc = g_state.npc;
2989
g_state.npc += sizeof(g_state.next_instruction.bits);
2990
return true;
2991
}
2992
2993
bool CPU::FetchInstructionForInterpreterFallback()
2994
{
2995
if (!Common::IsAlignedPow2(g_state.npc, 4)) [[unlikely]]
2996
{
2997
// The BadVaddr and EPC must be set to the fetching address, not the instruction about to execute.
2998
g_state.cop0_regs.BadVaddr = g_state.npc;
2999
RaiseException(Cop0Registers::CAUSE::MakeValueForException(Exception::AdEL, false, false, 0), g_state.npc);
3000
return false;
3001
}
3002
3003
const PhysicalMemoryAddress address = g_state.npc;
3004
switch (address >> 29)
3005
{
3006
case 0x00: // KUSEG 0M-512M
3007
case 0x04: // KSEG0 - physical memory cached
3008
case 0x05: // KSEG1 - physical memory uncached
3009
{
3010
// We don't use the icache when doing interpreter fallbacks, because it's probably stale.
3011
if (!DoInstructionRead<false, false, 1, true>(address, &g_state.next_instruction.bits)) [[unlikely]]
3012
return false;
3013
}
3014
break;
3015
3016
case 0x01: // KUSEG 512M-1024M
3017
case 0x02: // KUSEG 1024M-1536M
3018
case 0x03: // KUSEG 1536M-2048M
3019
case 0x06: // KSEG2
3020
case 0x07: // KSEG2
3021
default:
3022
{
3023
CPU::RaiseException(Cop0Registers::CAUSE::MakeValueForException(Exception::IBE,
3024
g_state.current_instruction_in_branch_delay_slot,
3025
g_state.current_instruction_was_branch_taken, 0),
3026
address);
3027
return false;
3028
}
3029
}
3030
3031
g_state.pc = g_state.npc;
3032
g_state.npc += sizeof(g_state.next_instruction.bits);
3033
return true;
3034
}
3035
3036
bool CPU::SafeReadInstruction(VirtualMemoryAddress addr, u32* value)
3037
{
3038
switch (addr >> 29)
3039
{
3040
case 0x00: // KUSEG 0M-512M
3041
case 0x04: // KSEG0 - physical memory cached
3042
case 0x05: // KSEG1 - physical memory uncached
3043
{
3044
// TODO: Check icache.
3045
return DoInstructionRead<false, false, 1, false>(addr, value);
3046
}
3047
3048
case 0x01: // KUSEG 512M-1024M
3049
case 0x02: // KUSEG 1024M-1536M
3050
case 0x03: // KUSEG 1536M-2048M
3051
case 0x06: // KSEG2
3052
case 0x07: // KSEG2
3053
default:
3054
{
3055
return false;
3056
}
3057
}
3058
}
3059
3060
template<MemoryAccessType type, MemoryAccessSize size>
3061
ALWAYS_INLINE bool CPU::DoSafeMemoryAccess(VirtualMemoryAddress address, u32& value)
3062
{
3063
using namespace Bus;
3064
3065
switch (address >> 29)
3066
{
3067
case 0x00: // KUSEG 0M-512M
3068
case 0x04: // KSEG0 - physical memory cached
3069
{
3070
if ((address & SCRATCHPAD_ADDR_MASK) == SCRATCHPAD_ADDR)
3071
{
3072
const u32 offset = address & SCRATCHPAD_OFFSET_MASK;
3073
3074
if constexpr (type == MemoryAccessType::Read)
3075
{
3076
if constexpr (size == MemoryAccessSize::Byte)
3077
{
3078
value = CPU::g_state.scratchpad[offset];
3079
}
3080
else if constexpr (size == MemoryAccessSize::HalfWord)
3081
{
3082
u16 temp;
3083
std::memcpy(&temp, &CPU::g_state.scratchpad[offset], sizeof(u16));
3084
value = ZeroExtend32(temp);
3085
}
3086
else if constexpr (size == MemoryAccessSize::Word)
3087
{
3088
std::memcpy(&value, &CPU::g_state.scratchpad[offset], sizeof(u32));
3089
}
3090
}
3091
else
3092
{
3093
if constexpr (size == MemoryAccessSize::Byte)
3094
{
3095
CPU::g_state.scratchpad[offset] = Truncate8(value);
3096
}
3097
else if constexpr (size == MemoryAccessSize::HalfWord)
3098
{
3099
std::memcpy(&CPU::g_state.scratchpad[offset], &value, sizeof(u16));
3100
}
3101
else if constexpr (size == MemoryAccessSize::Word)
3102
{
3103
std::memcpy(&CPU::g_state.scratchpad[offset], &value, sizeof(u32));
3104
}
3105
}
3106
3107
return true;
3108
}
3109
3110
address &= KSEG_MASK;
3111
}
3112
break;
3113
3114
case 0x01: // KUSEG 512M-1024M
3115
case 0x02: // KUSEG 1024M-1536M
3116
case 0x03: // KUSEG 1536M-2048M
3117
case 0x06: // KSEG2
3118
case 0x07: // KSEG2
3119
{
3120
// Above 512mb raises an exception.
3121
return false;
3122
}
3123
3124
case 0x05: // KSEG1 - physical memory uncached
3125
{
3126
address &= KSEG_MASK;
3127
}
3128
break;
3129
}
3130
3131
if (address < RAM_MIRROR_END)
3132
{
3133
const u32 offset = address & g_ram_mask;
3134
if constexpr (type == MemoryAccessType::Read)
3135
{
3136
if constexpr (size == MemoryAccessSize::Byte)
3137
{
3138
value = g_unprotected_ram[offset];
3139
}
3140
else if constexpr (size == MemoryAccessSize::HalfWord)
3141
{
3142
u16 temp;
3143
std::memcpy(&temp, &g_unprotected_ram[offset], sizeof(temp));
3144
value = ZeroExtend32(temp);
3145
}
3146
else if constexpr (size == MemoryAccessSize::Word)
3147
{
3148
std::memcpy(&value, &g_unprotected_ram[offset], sizeof(u32));
3149
}
3150
}
3151
else
3152
{
3153
const u32 page_index = offset >> HOST_PAGE_SHIFT;
3154
3155
if constexpr (size == MemoryAccessSize::Byte)
3156
{
3157
if (g_unprotected_ram[offset] != Truncate8(value))
3158
{
3159
g_unprotected_ram[offset] = Truncate8(value);
3160
if (g_ram_code_bits[page_index])
3161
CPU::CodeCache::InvalidateBlocksWithPageIndex(page_index);
3162
}
3163
}
3164
else if constexpr (size == MemoryAccessSize::HalfWord)
3165
{
3166
const u16 new_value = Truncate16(value);
3167
u16 old_value;
3168
std::memcpy(&old_value, &g_unprotected_ram[offset], sizeof(old_value));
3169
if (old_value != new_value)
3170
{
3171
std::memcpy(&g_unprotected_ram[offset], &new_value, sizeof(u16));
3172
if (g_ram_code_bits[page_index])
3173
CPU::CodeCache::InvalidateBlocksWithPageIndex(page_index);
3174
}
3175
}
3176
else if constexpr (size == MemoryAccessSize::Word)
3177
{
3178
u32 old_value;
3179
std::memcpy(&old_value, &g_unprotected_ram[offset], sizeof(u32));
3180
if (old_value != value)
3181
{
3182
std::memcpy(&g_unprotected_ram[offset], &value, sizeof(u32));
3183
if (g_ram_code_bits[page_index])
3184
CPU::CodeCache::InvalidateBlocksWithPageIndex(page_index);
3185
}
3186
}
3187
}
3188
3189
return true;
3190
}
3191
if constexpr (type == MemoryAccessType::Read)
3192
{
3193
if (address >= BIOS_BASE && address < (BIOS_BASE + BIOS_SIZE))
3194
{
3195
const u32 offset = (address & BIOS_MASK);
3196
if constexpr (size == MemoryAccessSize::Byte)
3197
{
3198
value = ZeroExtend32(g_bios[offset]);
3199
}
3200
else if constexpr (size == MemoryAccessSize::HalfWord)
3201
{
3202
u16 halfword;
3203
std::memcpy(&halfword, &g_bios[offset], sizeof(u16));
3204
value = ZeroExtend32(halfword);
3205
}
3206
else
3207
{
3208
std::memcpy(&value, &g_bios[offset], sizeof(u32));
3209
}
3210
3211
return true;
3212
}
3213
}
3214
return false;
3215
}
3216
3217
bool CPU::SafeReadMemoryByte(VirtualMemoryAddress addr, u8* value)
3218
{
3219
u32 temp = 0;
3220
if (!DoSafeMemoryAccess<MemoryAccessType::Read, MemoryAccessSize::Byte>(addr, temp))
3221
return false;
3222
3223
*value = Truncate8(temp);
3224
return true;
3225
}
3226
3227
bool CPU::SafeReadMemoryHalfWord(VirtualMemoryAddress addr, u16* value)
3228
{
3229
if ((addr & 1) == 0)
3230
{
3231
u32 temp = 0;
3232
if (!DoSafeMemoryAccess<MemoryAccessType::Read, MemoryAccessSize::HalfWord>(addr, temp))
3233
return false;
3234
3235
*value = Truncate16(temp);
3236
return true;
3237
}
3238
3239
u8 low, high;
3240
if (!SafeReadMemoryByte(addr, &low) || !SafeReadMemoryByte(addr + 1, &high))
3241
return false;
3242
3243
*value = (ZeroExtend16(high) << 8) | ZeroExtend16(low);
3244
return true;
3245
}
3246
3247
bool CPU::SafeReadMemoryWord(VirtualMemoryAddress addr, u32* value)
3248
{
3249
if ((addr & 3) == 0)
3250
return DoSafeMemoryAccess<MemoryAccessType::Read, MemoryAccessSize::Word>(addr, *value);
3251
3252
u16 low, high;
3253
if (!SafeReadMemoryHalfWord(addr, &low) || !SafeReadMemoryHalfWord(addr + 2, &high))
3254
return false;
3255
3256
*value = (ZeroExtend32(high) << 16) | ZeroExtend32(low);
3257
return true;
3258
}
3259
3260
bool CPU::SafeReadMemoryCString(VirtualMemoryAddress addr, SmallStringBase* value, u32 max_length /*= 1024*/)
3261
{
3262
value->clear();
3263
3264
u8 ch;
3265
while (SafeReadMemoryByte(addr, &ch))
3266
{
3267
if (ch == 0)
3268
return true;
3269
3270
value->push_back(ch);
3271
if (value->length() >= max_length)
3272
return true;
3273
3274
addr++;
3275
}
3276
3277
value->clear();
3278
return false;
3279
}
3280
3281
bool CPU::SafeWriteMemoryByte(VirtualMemoryAddress addr, u8 value)
3282
{
3283
u32 temp = ZeroExtend32(value);
3284
return DoSafeMemoryAccess<MemoryAccessType::Write, MemoryAccessSize::Byte>(addr, temp);
3285
}
3286
3287
bool CPU::SafeWriteMemoryHalfWord(VirtualMemoryAddress addr, u16 value)
3288
{
3289
if ((addr & 1) == 0)
3290
{
3291
u32 temp = ZeroExtend32(value);
3292
return DoSafeMemoryAccess<MemoryAccessType::Write, MemoryAccessSize::HalfWord>(addr, temp);
3293
}
3294
3295
return SafeWriteMemoryByte(addr, Truncate8(value)) && SafeWriteMemoryByte(addr + 1, Truncate8(value >> 8));
3296
}
3297
3298
bool CPU::SafeWriteMemoryWord(VirtualMemoryAddress addr, u32 value)
3299
{
3300
if ((addr & 3) == 0)
3301
return DoSafeMemoryAccess<MemoryAccessType::Write, MemoryAccessSize::Word>(addr, value);
3302
3303
return SafeWriteMemoryHalfWord(addr, Truncate16(value)) && SafeWriteMemoryHalfWord(addr + 2, Truncate16(value >> 16));
3304
}
3305
3306
bool CPU::SafeReadMemoryBytes(VirtualMemoryAddress addr, void* data, u32 length)
3307
{
3308
using namespace Bus;
3309
3310
const u32 seg = (addr >> 29);
3311
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & KSEG_MASK) >= RAM_MIRROR_END) ||
3312
(((addr & g_ram_mask) + length) > g_ram_size))
3313
{
3314
u8* ptr = static_cast<u8*>(data);
3315
u8* const ptr_end = ptr + length;
3316
while (ptr != ptr_end)
3317
{
3318
if (!SafeReadMemoryByte(addr++, ptr++))
3319
return false;
3320
}
3321
3322
return true;
3323
}
3324
3325
// Fast path: all in RAM, no wraparound.
3326
std::memcpy(data, &g_ram[addr & g_ram_mask], length);
3327
return true;
3328
}
3329
3330
bool CPU::SafeWriteMemoryBytes(VirtualMemoryAddress addr, const void* data, u32 length)
3331
{
3332
using namespace Bus;
3333
3334
const u32 seg = (addr >> 29);
3335
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & KSEG_MASK) >= RAM_MIRROR_END) ||
3336
(((addr & g_ram_mask) + length) > g_ram_size))
3337
{
3338
const u8* ptr = static_cast<const u8*>(data);
3339
const u8* const ptr_end = ptr + length;
3340
while (ptr != ptr_end)
3341
{
3342
if (!SafeWriteMemoryByte(addr++, *(ptr++)))
3343
return false;
3344
}
3345
3346
return true;
3347
}
3348
3349
// Fast path: all in RAM, no wraparound.
3350
std::memcpy(&g_ram[addr & g_ram_mask], data, length);
3351
return true;
3352
}
3353
3354
bool CPU::SafeWriteMemoryBytes(VirtualMemoryAddress addr, const std::span<const u8> data)
3355
{
3356
return SafeWriteMemoryBytes(addr, data.data(), static_cast<u32>(data.size()));
3357
}
3358
3359
bool CPU::SafeZeroMemoryBytes(VirtualMemoryAddress addr, u32 length)
3360
{
3361
using namespace Bus;
3362
3363
const u32 seg = (addr >> 29);
3364
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & KSEG_MASK) >= RAM_MIRROR_END) ||
3365
(((addr & g_ram_mask) + length) > g_ram_size))
3366
{
3367
while ((addr & 3u) != 0 && length > 0)
3368
{
3369
if (!CPU::SafeWriteMemoryByte(addr, 0)) [[unlikely]]
3370
return false;
3371
3372
addr++;
3373
length--;
3374
}
3375
while (length >= 4)
3376
{
3377
if (!CPU::SafeWriteMemoryWord(addr, 0)) [[unlikely]]
3378
return false;
3379
3380
addr += 4;
3381
length -= 4;
3382
}
3383
while (length > 0)
3384
{
3385
if (!CPU::SafeWriteMemoryByte(addr, 0)) [[unlikely]]
3386
return false;
3387
3388
addr++;
3389
length--;
3390
}
3391
3392
return true;
3393
}
3394
3395
// Fast path: all in RAM, no wraparound.
3396
std::memset(&g_ram[addr & g_ram_mask], 0, length);
3397
return true;
3398
}
3399
3400
void* CPU::GetDirectReadMemoryPointer(VirtualMemoryAddress address, MemoryAccessSize size, TickCount* read_ticks)
3401
{
3402
using namespace Bus;
3403
3404
const u32 seg = (address >> 29);
3405
if (seg != 0 && seg != 4 && seg != 5)
3406
return nullptr;
3407
3408
const PhysicalMemoryAddress paddr = VirtualAddressToPhysical(address);
3409
if (paddr < RAM_MIRROR_END)
3410
{
3411
if (read_ticks)
3412
*read_ticks = RAM_READ_TICKS;
3413
3414
return &g_ram[paddr & g_ram_mask];
3415
}
3416
3417
if ((paddr & SCRATCHPAD_ADDR_MASK) == SCRATCHPAD_ADDR)
3418
{
3419
if (read_ticks)
3420
*read_ticks = 0;
3421
3422
return &g_state.scratchpad[paddr & SCRATCHPAD_OFFSET_MASK];
3423
}
3424
3425
if (paddr >= BIOS_BASE && paddr < (BIOS_BASE + BIOS_SIZE))
3426
{
3427
if (read_ticks)
3428
*read_ticks = g_bios_access_time[static_cast<u32>(size)];
3429
3430
return &g_bios[paddr & BIOS_MASK];
3431
}
3432
3433
return nullptr;
3434
}
3435
3436
void* CPU::GetDirectWriteMemoryPointer(VirtualMemoryAddress address, MemoryAccessSize size)
3437
{
3438
using namespace Bus;
3439
3440
const u32 seg = (address >> 29);
3441
if (seg != 0 && seg != 4 && seg != 5)
3442
return nullptr;
3443
3444
const PhysicalMemoryAddress paddr = address & KSEG_MASK;
3445
3446
if (paddr < RAM_MIRROR_END)
3447
return &g_ram[paddr & g_ram_mask];
3448
3449
if ((paddr & SCRATCHPAD_ADDR_MASK) == SCRATCHPAD_ADDR)
3450
return &g_state.scratchpad[paddr & SCRATCHPAD_OFFSET_MASK];
3451
3452
return nullptr;
3453
}
3454
3455
template<MemoryAccessType type, MemoryAccessSize size>
3456
ALWAYS_INLINE_RELEASE bool CPU::DoAlignmentCheck(VirtualMemoryAddress address)
3457
{
3458
if constexpr (size == MemoryAccessSize::HalfWord)
3459
{
3460
if (Common::IsAlignedPow2(address, 2))
3461
return true;
3462
}
3463
else if constexpr (size == MemoryAccessSize::Word)
3464
{
3465
if (Common::IsAlignedPow2(address, 4))
3466
return true;
3467
}
3468
else
3469
{
3470
return true;
3471
}
3472
3473
g_state.cop0_regs.BadVaddr = address;
3474
RaiseException(type == MemoryAccessType::Read ? Exception::AdEL : Exception::AdES);
3475
return false;
3476
}
3477
3478
ALWAYS_INLINE_RELEASE void CPU::RaiseDataBusException()
3479
{
3480
RaiseException(Cop0Registers::CAUSE::MakeValueForException(Exception::DBE,
3481
g_state.current_instruction_in_branch_delay_slot,
3482
g_state.current_instruction_was_branch_taken, 0),
3483
g_state.current_instruction_pc);
3484
}
3485
3486
#if 0
3487
static void MemoryBreakpoint(MemoryAccessType type, MemoryAccessSize size, VirtualMemoryAddress addr, u32 value)
3488
{
3489
static constexpr const char* sizes[3] = { "byte", "halfword", "word" };
3490
static constexpr const char* types[2] = { "read", "write" };
3491
3492
const u32 cycle = TimingEvents::GetGlobalTickCounter() + CPU::g_state.pending_ticks;
3493
if (cycle == 3301006373)
3494
__debugbreak();
3495
3496
#if 0
3497
static std::FILE* fp = nullptr;
3498
if (!fp)
3499
fp = std::fopen("D:\\memory.txt", "wb");
3500
if (fp)
3501
{
3502
std::fprintf(fp, "%u %s %s %08X %08X\n", cycle, types[static_cast<u32>(type)], sizes[static_cast<u32>(size)], addr, value);
3503
std::fflush(fp);
3504
}
3505
#endif
3506
3507
#if 0
3508
if (type == MemoryAccessType::Read && addr == 0x1F000084)
3509
__debugbreak();
3510
#endif
3511
#if 0
3512
if (type == MemoryAccessType::Write && addr == 0x000000B0 /*&& value == 0x3C080000*/)
3513
__debugbreak();
3514
#endif
3515
3516
#if 0 // TODO: MEMBP
3517
if (type == MemoryAccessType::Write && address == 0x80113028)
3518
{
3519
if ((TimingEvents::GetGlobalTickCounter() + CPU::g_state.pending_ticks) == 5051485)
3520
__debugbreak();
3521
3522
Log_WarningPrintf("VAL %08X @ %u", value, (TimingEvents::GetGlobalTickCounter() + CPU::g_state.pending_ticks));
3523
}
3524
#endif
3525
}
3526
#define MEMORY_BREAKPOINT(type, size, addr, value) MemoryBreakpoint((type), (size), (addr), (value))
3527
#else
3528
#define MEMORY_BREAKPOINT(type, size, addr, value)
3529
#endif
3530
3531
bool CPU::ReadMemoryByte(VirtualMemoryAddress addr, u8* value)
3532
{
3533
*value = Truncate8(GetMemoryReadHandler(addr, MemoryAccessSize::Byte)(addr));
3534
if (g_state.bus_error) [[unlikely]]
3535
{
3536
g_state.bus_error = false;
3537
RaiseDataBusException();
3538
return false;
3539
}
3540
3541
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::Byte, addr, *value);
3542
return true;
3543
}
3544
3545
bool CPU::ReadMemoryHalfWord(VirtualMemoryAddress addr, u16* value)
3546
{
3547
if (!DoAlignmentCheck<MemoryAccessType::Read, MemoryAccessSize::HalfWord>(addr))
3548
return false;
3549
3550
*value = Truncate16(GetMemoryReadHandler(addr, MemoryAccessSize::HalfWord)(addr));
3551
if (g_state.bus_error) [[unlikely]]
3552
{
3553
g_state.bus_error = false;
3554
RaiseDataBusException();
3555
return false;
3556
}
3557
3558
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::HalfWord, addr, *value);
3559
return true;
3560
}
3561
3562
bool CPU::ReadMemoryWord(VirtualMemoryAddress addr, u32* value)
3563
{
3564
if (!DoAlignmentCheck<MemoryAccessType::Read, MemoryAccessSize::Word>(addr))
3565
return false;
3566
3567
*value = GetMemoryReadHandler(addr, MemoryAccessSize::Word)(addr);
3568
if (g_state.bus_error) [[unlikely]]
3569
{
3570
g_state.bus_error = false;
3571
RaiseDataBusException();
3572
return false;
3573
}
3574
3575
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::Word, addr, *value);
3576
return true;
3577
}
3578
3579
bool CPU::WriteMemoryByte(VirtualMemoryAddress addr, u32 value)
3580
{
3581
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::Byte, addr, value);
3582
3583
GetMemoryWriteHandler(addr, MemoryAccessSize::Byte)(addr, value);
3584
if (g_state.bus_error) [[unlikely]]
3585
{
3586
g_state.bus_error = false;
3587
RaiseDataBusException();
3588
return false;
3589
}
3590
3591
return true;
3592
}
3593
3594
bool CPU::WriteMemoryHalfWord(VirtualMemoryAddress addr, u32 value)
3595
{
3596
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::HalfWord, addr, value);
3597
3598
if (!DoAlignmentCheck<MemoryAccessType::Write, MemoryAccessSize::HalfWord>(addr))
3599
return false;
3600
3601
GetMemoryWriteHandler(addr, MemoryAccessSize::HalfWord)(addr, value);
3602
if (g_state.bus_error) [[unlikely]]
3603
{
3604
g_state.bus_error = false;
3605
RaiseDataBusException();
3606
return false;
3607
}
3608
3609
return true;
3610
}
3611
3612
bool CPU::WriteMemoryWord(VirtualMemoryAddress addr, u32 value)
3613
{
3614
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::Word, addr, value);
3615
3616
if (!DoAlignmentCheck<MemoryAccessType::Write, MemoryAccessSize::Word>(addr))
3617
return false;
3618
3619
GetMemoryWriteHandler(addr, MemoryAccessSize::Word)(addr, value);
3620
if (g_state.bus_error) [[unlikely]]
3621
{
3622
g_state.bus_error = false;
3623
RaiseDataBusException();
3624
return false;
3625
}
3626
3627
return true;
3628
}
3629
3630
u64 CPU::RecompilerThunks::ReadMemoryByte(u32 address)
3631
{
3632
const u32 value = GetMemoryReadHandler(address, MemoryAccessSize::Byte)(address);
3633
if (g_state.bus_error) [[unlikely]]
3634
{
3635
g_state.bus_error = false;
3636
return static_cast<u64>(-static_cast<s64>(Exception::DBE));
3637
}
3638
3639
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::Byte, address, value);
3640
return ZeroExtend64(value);
3641
}
3642
3643
u64 CPU::RecompilerThunks::ReadMemoryHalfWord(u32 address)
3644
{
3645
if (!Common::IsAlignedPow2(address, 2)) [[unlikely]]
3646
{
3647
g_state.cop0_regs.BadVaddr = address;
3648
return static_cast<u64>(-static_cast<s64>(Exception::AdEL));
3649
}
3650
3651
const u32 value = GetMemoryReadHandler(address, MemoryAccessSize::HalfWord)(address);
3652
if (g_state.bus_error) [[unlikely]]
3653
{
3654
g_state.bus_error = false;
3655
return static_cast<u64>(-static_cast<s64>(Exception::DBE));
3656
}
3657
3658
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::HalfWord, address, value);
3659
return ZeroExtend64(value);
3660
}
3661
3662
u64 CPU::RecompilerThunks::ReadMemoryWord(u32 address)
3663
{
3664
if (!Common::IsAlignedPow2(address, 4)) [[unlikely]]
3665
{
3666
g_state.cop0_regs.BadVaddr = address;
3667
return static_cast<u64>(-static_cast<s64>(Exception::AdEL));
3668
}
3669
3670
const u32 value = GetMemoryReadHandler(address, MemoryAccessSize::Word)(address);
3671
if (g_state.bus_error) [[unlikely]]
3672
{
3673
g_state.bus_error = false;
3674
return static_cast<u64>(-static_cast<s64>(Exception::DBE));
3675
}
3676
3677
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::Word, address, value);
3678
return ZeroExtend64(value);
3679
}
3680
3681
u32 CPU::RecompilerThunks::WriteMemoryByte(u32 address, u32 value)
3682
{
3683
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::Byte, address, value);
3684
3685
GetMemoryWriteHandler(address, MemoryAccessSize::Byte)(address, value);
3686
if (g_state.bus_error) [[unlikely]]
3687
{
3688
g_state.bus_error = false;
3689
return static_cast<u32>(Exception::DBE);
3690
}
3691
3692
return 0;
3693
}
3694
3695
u32 CPU::RecompilerThunks::WriteMemoryHalfWord(u32 address, u32 value)
3696
{
3697
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::HalfWord, address, value);
3698
3699
if (!Common::IsAlignedPow2(address, 2)) [[unlikely]]
3700
{
3701
g_state.cop0_regs.BadVaddr = address;
3702
return static_cast<u32>(Exception::AdES);
3703
}
3704
3705
GetMemoryWriteHandler(address, MemoryAccessSize::HalfWord)(address, value);
3706
if (g_state.bus_error) [[unlikely]]
3707
{
3708
g_state.bus_error = false;
3709
return static_cast<u32>(Exception::DBE);
3710
}
3711
3712
return 0;
3713
}
3714
3715
u32 CPU::RecompilerThunks::WriteMemoryWord(u32 address, u32 value)
3716
{
3717
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::Word, address, value);
3718
3719
if (!Common::IsAlignedPow2(address, 4)) [[unlikely]]
3720
{
3721
g_state.cop0_regs.BadVaddr = address;
3722
return static_cast<u32>(Exception::AdES);
3723
}
3724
3725
GetMemoryWriteHandler(address, MemoryAccessSize::Word)(address, value);
3726
if (g_state.bus_error) [[unlikely]]
3727
{
3728
g_state.bus_error = false;
3729
return static_cast<u32>(Exception::DBE);
3730
}
3731
3732
return 0;
3733
}
3734
3735
u32 CPU::RecompilerThunks::UncheckedReadMemoryByte(u32 address)
3736
{
3737
const u32 value = GetMemoryReadHandler(address, MemoryAccessSize::Byte)(address);
3738
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::Byte, address, value);
3739
return value;
3740
}
3741
3742
u32 CPU::RecompilerThunks::UncheckedReadMemoryHalfWord(u32 address)
3743
{
3744
const u32 value = GetMemoryReadHandler(address, MemoryAccessSize::HalfWord)(address);
3745
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::HalfWord, address, value);
3746
return value;
3747
}
3748
3749
u32 CPU::RecompilerThunks::UncheckedReadMemoryWord(u32 address)
3750
{
3751
const u32 value = GetMemoryReadHandler(address, MemoryAccessSize::Word)(address);
3752
MEMORY_BREAKPOINT(MemoryAccessType::Read, MemoryAccessSize::Word, address, value);
3753
return value;
3754
}
3755
3756
void CPU::RecompilerThunks::UncheckedWriteMemoryByte(u32 address, u32 value)
3757
{
3758
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::Byte, address, value);
3759
GetMemoryWriteHandler(address, MemoryAccessSize::Byte)(address, value);
3760
}
3761
3762
void CPU::RecompilerThunks::UncheckedWriteMemoryHalfWord(u32 address, u32 value)
3763
{
3764
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::HalfWord, address, value);
3765
GetMemoryWriteHandler(address, MemoryAccessSize::HalfWord)(address, value);
3766
}
3767
3768
void CPU::RecompilerThunks::UncheckedWriteMemoryWord(u32 address, u32 value)
3769
{
3770
MEMORY_BREAKPOINT(MemoryAccessType::Write, MemoryAccessSize::Word, address, value);
3771
GetMemoryWriteHandler(address, MemoryAccessSize::Word)(address, value);
3772
}
3773
3774
#undef MEMORY_BREAKPOINT
3775
3776