CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/ARM64/Arm64IRCompSystem.cpp
Views: 1401
1
// Copyright (c) 2023- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
// In other words, PPSSPP_ARCH(ARM64) || DISASM_ALL.
20
#if PPSSPP_ARCH(ARM64) || (PPSSPP_PLATFORM(WINDOWS) && !defined(__LIBRETRO__))
21
22
#include "Common/Profiler/Profiler.h"
23
#include "Core/Core.h"
24
#include "Core/Debugger/Breakpoints.h"
25
#include "Core/HLE/HLE.h"
26
#include "Core/HLE/ReplaceTables.h"
27
#include "Core/MemMap.h"
28
#include "Core/MIPS/MIPSAnalyst.h"
29
#include "Core/MIPS/IR/IRInterpreter.h"
30
#include "Core/MIPS/ARM64/Arm64IRJit.h"
31
#include "Core/MIPS/ARM64/Arm64IRRegCache.h"
32
33
// This file contains compilation for basic PC/downcount accounting, syscalls, debug funcs, etc.
34
//
35
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
36
// Currently known non working ones should have DISABLE. No flags because that's in IR already.
37
38
// #define CONDITIONAL_DISABLE { CompIR_Generic(inst); return; }
39
#define CONDITIONAL_DISABLE {}
40
#define DISABLE { CompIR_Generic(inst); return; }
41
#define INVALIDOP { _assert_msg_(false, "Invalid IR inst %d", (int)inst.op); CompIR_Generic(inst); return; }
42
43
namespace MIPSComp {
44
45
using namespace Arm64Gen;
46
using namespace Arm64IRJitConstants;
47
48
void Arm64JitBackend::CompIR_Basic(IRInst inst) {
49
CONDITIONAL_DISABLE;
50
51
switch (inst.op) {
52
case IROp::Downcount:
53
SUBI2R(DOWNCOUNTREG, DOWNCOUNTREG, (s64)(s32)inst.constant, SCRATCH1);
54
break;
55
56
case IROp::SetConst:
57
regs_.SetGPRImm(inst.dest, inst.constant);
58
break;
59
60
case IROp::SetConstF:
61
{
62
regs_.Map(inst);
63
float f;
64
memcpy(&f, &inst.constant, sizeof(f));
65
fp_.MOVI2F(regs_.F(inst.dest), f, SCRATCH1);
66
break;
67
}
68
69
case IROp::SetPC:
70
regs_.Map(inst);
71
MovToPC(regs_.R(inst.src1));
72
break;
73
74
case IROp::SetPCConst:
75
lastConstPC_ = inst.constant;
76
MOVI2R(SCRATCH1, inst.constant);
77
MovToPC(SCRATCH1);
78
break;
79
80
default:
81
INVALIDOP;
82
break;
83
}
84
}
85
86
void Arm64JitBackend::CompIR_Breakpoint(IRInst inst) {
87
CONDITIONAL_DISABLE;
88
89
switch (inst.op) {
90
case IROp::Breakpoint:
91
{
92
FlushAll();
93
// Note: the constant could be a delay slot.
94
MOVI2R(W0, inst.constant);
95
QuickCallFunction(SCRATCH2_64, &IRRunBreakpoint);
96
97
ptrdiff_t distance = dispatcherCheckCoreState_ - GetCodePointer();
98
if (distance >= -0x100000 && distance < 0x100000) {
99
CBNZ(W0, dispatcherCheckCoreState_);
100
} else {
101
FixupBranch keepOnKeepingOn = CBZ(W0);
102
B(dispatcherCheckCoreState_);
103
SetJumpTarget(keepOnKeepingOn);
104
}
105
break;
106
}
107
108
case IROp::MemoryCheck:
109
if (regs_.IsGPRImm(inst.src1)) {
110
uint32_t iaddr = regs_.GetGPRImm(inst.src1) + inst.constant;
111
uint32_t checkedPC = lastConstPC_ + inst.dest;
112
int size = MIPSAnalyst::OpMemoryAccessSize(checkedPC);
113
if (size == 0) {
114
checkedPC += 4;
115
size = MIPSAnalyst::OpMemoryAccessSize(checkedPC);
116
}
117
bool isWrite = MIPSAnalyst::IsOpMemoryWrite(checkedPC);
118
119
MemCheck check;
120
if (CBreakPoints::GetMemCheckInRange(iaddr, size, &check)) {
121
if (!(check.cond & MEMCHECK_READ) && !isWrite)
122
break;
123
if (!(check.cond & (MEMCHECK_WRITE | MEMCHECK_WRITE_ONCHANGE)) && isWrite)
124
break;
125
126
// We need to flush, or conditions and log expressions will see old register values.
127
FlushAll();
128
129
MOVI2R(W0, checkedPC);
130
MOVI2R(W1, iaddr);
131
QuickCallFunction(SCRATCH2_64, &IRRunMemCheck);
132
133
ptrdiff_t distance = dispatcherCheckCoreState_ - GetCodePointer();
134
if (distance >= -0x100000 && distance < 0x100000) {
135
CBNZ(W0, dispatcherCheckCoreState_);
136
} else {
137
FixupBranch keepOnKeepingOn = CBZ(W0);
138
B(dispatcherCheckCoreState_);
139
SetJumpTarget(keepOnKeepingOn);
140
}
141
}
142
} else {
143
uint32_t checkedPC = lastConstPC_ + inst.dest;
144
int size = MIPSAnalyst::OpMemoryAccessSize(checkedPC);
145
if (size == 0) {
146
checkedPC += 4;
147
size = MIPSAnalyst::OpMemoryAccessSize(checkedPC);
148
}
149
bool isWrite = MIPSAnalyst::IsOpMemoryWrite(checkedPC);
150
151
const auto memchecks = CBreakPoints::GetMemCheckRanges(isWrite);
152
// We can trivially skip if there are no checks for this type (i.e. read vs write.)
153
if (memchecks.empty())
154
break;
155
156
ARM64Reg addrBase = regs_.MapGPR(inst.src1);
157
ADDI2R(SCRATCH1, addrBase, inst.constant, SCRATCH2);
158
159
// We need to flush, or conditions and log expressions will see old register values.
160
FlushAll();
161
162
std::vector<FixupBranch> hitChecks;
163
for (auto it : memchecks) {
164
if (it.end != 0) {
165
CMPI2R(SCRATCH1, it.start - size, SCRATCH2);
166
MOVI2R(SCRATCH2, it.end);
167
CCMP(SCRATCH1, SCRATCH2, 0xF, CC_HI);
168
hitChecks.push_back(B(CC_LO));
169
} else {
170
CMPI2R(SCRATCH1, it.start, SCRATCH2);
171
hitChecks.push_back(B(CC_EQ));
172
}
173
}
174
175
FixupBranch noHits = B();
176
177
// Okay, now land any hit here.
178
for (auto &fixup : hitChecks)
179
SetJumpTarget(fixup);
180
hitChecks.clear();
181
182
MOVI2R(W0, checkedPC);
183
MOV(W1, SCRATCH1);
184
QuickCallFunction(SCRATCH2_64, &IRRunMemCheck);
185
186
ptrdiff_t distance = dispatcherCheckCoreState_ - GetCodePointer();
187
if (distance >= -0x100000 && distance < 0x100000) {
188
CBNZ(W0, dispatcherCheckCoreState_);
189
} else {
190
FixupBranch keepOnKeepingOn = CBZ(W0);
191
B(dispatcherCheckCoreState_);
192
SetJumpTarget(keepOnKeepingOn);
193
}
194
195
SetJumpTarget(noHits);
196
}
197
break;
198
199
default:
200
INVALIDOP;
201
break;
202
}
203
}
204
205
void Arm64JitBackend::CompIR_System(IRInst inst) {
206
CONDITIONAL_DISABLE;
207
208
switch (inst.op) {
209
case IROp::Syscall:
210
FlushAll();
211
SaveStaticRegisters();
212
213
WriteDebugProfilerStatus(IRProfilerStatus::SYSCALL);
214
#ifdef USE_PROFILER
215
// When profiling, we can't skip CallSyscall, since it times syscalls.
216
MOVI2R(W0, inst.constant);
217
QuickCallFunction(SCRATCH2_64, &CallSyscall);
218
#else
219
// Skip the CallSyscall where possible.
220
{
221
MIPSOpcode op(inst.constant);
222
void *quickFunc = GetQuickSyscallFunc(op);
223
if (quickFunc) {
224
MOVP2R(X0, GetSyscallFuncPointer(op));
225
QuickCallFunction(SCRATCH2_64, (const u8 *)quickFunc);
226
} else {
227
MOVI2R(W0, inst.constant);
228
QuickCallFunction(SCRATCH2_64, &CallSyscall);
229
}
230
}
231
#endif
232
233
WriteDebugProfilerStatus(IRProfilerStatus::IN_JIT);
234
LoadStaticRegisters();
235
// This is always followed by an ExitToPC, where we check coreState.
236
break;
237
238
case IROp::CallReplacement:
239
FlushAll();
240
SaveStaticRegisters();
241
WriteDebugProfilerStatus(IRProfilerStatus::REPLACEMENT);
242
QuickCallFunction(SCRATCH2_64, GetReplacementFunc(inst.constant)->replaceFunc);
243
WriteDebugProfilerStatus(IRProfilerStatus::IN_JIT);
244
LoadStaticRegisters();
245
246
// Absolute value the result and subtract.
247
CMP(W0, 0);
248
CSNEG(SCRATCH1, W0, W0, CC_PL);
249
SUB(DOWNCOUNTREG, DOWNCOUNTREG, SCRATCH1);
250
251
// W0 might be the mapped reg, but there's only one.
252
// Set dest reg to the sign of the result.
253
regs_.Map(inst);
254
ASR(regs_.R(inst.dest), W0, 31);
255
break;
256
257
case IROp::Break:
258
FlushAll();
259
// This doesn't naturally have restore/apply around it.
260
RestoreRoundingMode(true);
261
SaveStaticRegisters();
262
MovFromPC(W0);
263
QuickCallFunction(SCRATCH2_64, &Core_Break);
264
LoadStaticRegisters();
265
ApplyRoundingMode(true);
266
MovFromPC(SCRATCH1);
267
ADDI2R(SCRATCH1, SCRATCH1, 4, SCRATCH2);
268
B(dispatcherPCInSCRATCH1_);
269
break;
270
271
default:
272
INVALIDOP;
273
break;
274
}
275
}
276
277
void Arm64JitBackend::CompIR_Transfer(IRInst inst) {
278
CONDITIONAL_DISABLE;
279
280
switch (inst.op) {
281
case IROp::SetCtrlVFPU:
282
regs_.SetGPRImm(IRREG_VFPU_CTRL_BASE + inst.dest, inst.constant);
283
break;
284
285
case IROp::SetCtrlVFPUReg:
286
regs_.Map(inst);
287
MOV(regs_.R(IRREG_VFPU_CTRL_BASE + inst.dest), regs_.R(inst.src1));
288
break;
289
290
case IROp::SetCtrlVFPUFReg:
291
regs_.Map(inst);
292
fp_.FMOV(regs_.R(IRREG_VFPU_CTRL_BASE + inst.dest), regs_.F(inst.src1));
293
break;
294
295
case IROp::FpCondFromReg:
296
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
297
MOV(regs_.R(IRREG_FPCOND), regs_.R(inst.src1));
298
break;
299
300
case IROp::FpCondToReg:
301
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::INIT } });
302
MOV(regs_.R(inst.dest), regs_.R(IRREG_FPCOND));
303
break;
304
305
case IROp::FpCtrlFromReg:
306
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
307
ANDI2R(SCRATCH1, regs_.R(inst.src1), 0x0181FFFF, SCRATCH2);
308
// Extract the new fpcond value.
309
UBFX(regs_.R(IRREG_FPCOND), SCRATCH1, 23, 1);
310
STR(INDEX_UNSIGNED, SCRATCH1, CTXREG, IRREG_FCR31 * 4);
311
break;
312
313
case IROp::FpCtrlToReg:
314
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::INIT } });
315
// Load fcr31 and clear the fpcond bit.
316
LDR(INDEX_UNSIGNED, regs_.R(inst.dest), CTXREG, IRREG_FCR31 * 4);
317
BFI(regs_.R(inst.dest), regs_.R(IRREG_FPCOND), 23, 1);
318
// Also update mips->fcr31 while we're here.
319
STR(INDEX_UNSIGNED, regs_.R(inst.dest), CTXREG, IRREG_FCR31 * 4);
320
break;
321
322
case IROp::VfpuCtrlToReg:
323
regs_.Map(inst);
324
MOV(regs_.R(inst.dest), regs_.R(IRREG_VFPU_CTRL_BASE + inst.src1));
325
break;
326
327
case IROp::FMovFromGPR:
328
if (regs_.IsGPRImm(inst.src1) && regs_.GetGPRImm(inst.src1) == 0) {
329
regs_.MapFPR(inst.dest, MIPSMap::NOINIT);
330
fp_.MOVI2F(regs_.F(inst.dest), 0.0f, SCRATCH1);
331
} else {
332
regs_.Map(inst);
333
fp_.FMOV(regs_.F(inst.dest), regs_.R(inst.src1));
334
}
335
break;
336
337
case IROp::FMovToGPR:
338
regs_.Map(inst);
339
fp_.FMOV(regs_.R(inst.dest), regs_.F(inst.src1));
340
break;
341
342
default:
343
INVALIDOP;
344
break;
345
}
346
}
347
348
void Arm64JitBackend::CompIR_ValidateAddress(IRInst inst) {
349
CONDITIONAL_DISABLE;
350
351
bool isWrite = inst.src2 & 1;
352
int alignment = 0;
353
switch (inst.op) {
354
case IROp::ValidateAddress8:
355
alignment = 1;
356
break;
357
358
case IROp::ValidateAddress16:
359
alignment = 2;
360
break;
361
362
case IROp::ValidateAddress32:
363
alignment = 4;
364
break;
365
366
case IROp::ValidateAddress128:
367
alignment = 16;
368
break;
369
370
default:
371
INVALIDOP;
372
break;
373
}
374
375
if (regs_.IsGPRMappedAsPointer(inst.src1)) {
376
if (!jo.enablePointerify) {
377
SUB(SCRATCH1_64, regs_.RPtr(inst.src1), MEMBASEREG);
378
ADDI2R(SCRATCH1, SCRATCH1, inst.constant, SCRATCH2);
379
} else {
380
ADDI2R(SCRATCH1, regs_.R(inst.src1), inst.constant, SCRATCH2);
381
}
382
} else {
383
regs_.Map(inst);
384
ADDI2R(SCRATCH1, regs_.R(inst.src1), inst.constant, SCRATCH2);
385
}
386
ANDI2R(SCRATCH1, SCRATCH1, 0x3FFFFFFF, SCRATCH2);
387
388
std::vector<FixupBranch> validJumps;
389
390
FixupBranch unaligned;
391
if (alignment == 2) {
392
unaligned = TBNZ(SCRATCH1, 0);
393
} else if (alignment != 1) {
394
TSTI2R(SCRATCH1, alignment - 1, SCRATCH2);
395
unaligned = B(CC_NEQ);
396
}
397
398
CMPI2R(SCRATCH1, PSP_GetUserMemoryEnd() - alignment, SCRATCH2);
399
FixupBranch tooHighRAM = B(CC_HI);
400
CMPI2R(SCRATCH1, PSP_GetKernelMemoryBase(), SCRATCH2);
401
validJumps.push_back(B(CC_HS));
402
403
CMPI2R(SCRATCH1, PSP_GetVidMemEnd() - alignment, SCRATCH2);
404
FixupBranch tooHighVid = B(CC_HI);
405
CMPI2R(SCRATCH1, PSP_GetVidMemBase(), SCRATCH2);
406
validJumps.push_back(B(CC_HS));
407
408
CMPI2R(SCRATCH1, PSP_GetScratchpadMemoryEnd() - alignment, SCRATCH2);
409
FixupBranch tooHighScratch = B(CC_HI);
410
CMPI2R(SCRATCH1, PSP_GetScratchpadMemoryBase(), SCRATCH2);
411
validJumps.push_back(B(CC_HS));
412
413
if (alignment != 1)
414
SetJumpTarget(unaligned);
415
SetJumpTarget(tooHighRAM);
416
SetJumpTarget(tooHighVid);
417
SetJumpTarget(tooHighScratch);
418
419
// If we got here, something unusual and bad happened, so we'll always go back to the dispatcher.
420
// Because of that, we can avoid flushing outside this case.
421
auto regsCopy = regs_;
422
regsCopy.FlushAll();
423
424
// Ignores the return value, always returns to the dispatcher.
425
// Otherwise would need a thunk to restore regs.
426
MOV(W0, SCRATCH1);
427
MOVI2R(W1, alignment);
428
MOVI2R(W2, isWrite ? 1 : 0);
429
QuickCallFunction(SCRATCH2, &ReportBadAddress);
430
B(dispatcherCheckCoreState_);
431
432
for (FixupBranch &b : validJumps)
433
SetJumpTarget(b);
434
}
435
436
} // namespace MIPSComp
437
438
#endif
439
440