CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/x86/JitSafeMem.cpp
Views: 1401
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
20
#if PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
21
22
#include "Core/Config.h"
23
#include "Core/Debugger/Breakpoints.h"
24
#include "Core/MemMap.h"
25
#include "Core/MIPS/JitCommon/JitCommon.h"
26
#include "Core/MIPS/MIPSAnalyst.h"
27
#include "Core/MIPS/x86/Jit.h"
28
#include "Core/MIPS/x86/JitSafeMem.h"
29
#include "Core/System.h"
30
31
namespace MIPSComp
32
{
33
using namespace Gen;
34
using namespace X64JitConstants;
35
36
JitSafeMem::JitSafeMem(Jit *jit, MIPSGPReg raddr, s32 offset, u32 alignMask)
37
: jit_(jit), raddr_(raddr), offset_(offset), needsCheck_(false), needsSkip_(false), alignMask_(alignMask)
38
{
39
// Mask out the kernel RAM bit, because we'll end up with a negative offset to MEMBASEREG.
40
if (jit_->gpr.IsImm(raddr_))
41
iaddr_ = (jit_->gpr.GetImm(raddr_) + offset_) & 0x7FFFFFFF;
42
else
43
iaddr_ = (u32) -1;
44
45
fast_ = g_Config.bFastMemory || raddr == MIPS_REG_SP;
46
47
// If raddr_ is going to get loaded soon, load it now for more optimal code.
48
// We assume that it was already locked.
49
const int LOOKAHEAD_OPS = 3;
50
if (!jit_->gpr.R(raddr_).IsImm() && MIPSAnalyst::IsRegisterUsed(raddr_, jit_->GetCompilerPC() + 4, LOOKAHEAD_OPS))
51
jit_->gpr.MapReg(raddr_, true, false);
52
}
53
54
bool JitSafeMem::PrepareWrite(OpArg &dest, int size)
55
{
56
size_ = size;
57
// If it's an immediate, we can do the write if valid.
58
if (iaddr_ != (u32) -1)
59
{
60
if (ImmValid())
61
{
62
u32 addr = (iaddr_ & alignMask_);
63
#ifdef MASKED_PSP_MEMORY
64
addr &= Memory::MEMVIEW32_MASK;
65
#endif
66
67
#if PPSSPP_ARCH(32BIT)
68
dest = M(Memory::base + addr); // 32-bit only
69
#else
70
dest = MDisp(MEMBASEREG, addr);
71
#endif
72
return true;
73
}
74
else
75
return false;
76
}
77
// Otherwise, we always can do the write (conditionally.)
78
else
79
dest = PrepareMemoryOpArg(MEM_WRITE);
80
return true;
81
}
82
83
bool JitSafeMem::PrepareRead(OpArg &src, int size)
84
{
85
size_ = size;
86
if (iaddr_ != (u32) -1)
87
{
88
if (ImmValid())
89
{
90
u32 addr = (iaddr_ & alignMask_);
91
#ifdef MASKED_PSP_MEMORY
92
addr &= Memory::MEMVIEW32_MASK;
93
#endif
94
95
#if PPSSPP_ARCH(32BIT)
96
src = M(Memory::base + addr); // 32-bit only
97
#else
98
src = MDisp(MEMBASEREG, addr);
99
#endif
100
return true;
101
}
102
else
103
return false;
104
}
105
else
106
src = PrepareMemoryOpArg(MEM_READ);
107
return true;
108
}
109
110
OpArg JitSafeMem::NextFastAddress(int suboffset)
111
{
112
if (iaddr_ != (u32) -1)
113
{
114
u32 addr = (iaddr_ + suboffset) & alignMask_;
115
#ifdef MASKED_PSP_MEMORY
116
addr &= Memory::MEMVIEW32_MASK;
117
#endif
118
119
#if PPSSPP_ARCH(32BIT)
120
return M(Memory::base + addr); // 32-bit only
121
#else
122
return MDisp(MEMBASEREG, addr);
123
#endif
124
}
125
126
_dbg_assert_msg_((suboffset & alignMask_) == suboffset, "suboffset must be aligned");
127
128
#if PPSSPP_ARCH(32BIT)
129
return MDisp(xaddr_, (u32) Memory::base + offset_ + suboffset);
130
#else
131
return MComplex(MEMBASEREG, xaddr_, SCALE_1, offset_ + suboffset);
132
#endif
133
}
134
135
OpArg JitSafeMem::PrepareMemoryOpArg(MemoryOpType type)
136
{
137
// We may not even need to move into EAX as a temporary.
138
bool needTemp = alignMask_ != 0xFFFFFFFF;
139
140
#ifdef MASKED_PSP_MEMORY
141
bool needMask = true; // raddr_ != MIPS_REG_SP; // Commented out this speedhack due to low impact
142
// We always mask on 32 bit in fast memory mode.
143
needTemp = needTemp || (fast_ && needMask);
144
#endif
145
146
if (jit_->gpr.R(raddr_).IsSimpleReg() && !needTemp)
147
{
148
jit_->gpr.MapReg(raddr_, true, false);
149
xaddr_ = jit_->gpr.RX(raddr_);
150
}
151
else
152
{
153
jit_->MOV(32, R(EAX), jit_->gpr.R(raddr_));
154
xaddr_ = EAX;
155
}
156
157
if (!fast_)
158
{
159
// Is it in physical ram?
160
jit_->CMP(32, R(xaddr_), Imm32(PSP_GetKernelMemoryBase() - offset_));
161
tooLow_ = jit_->J_CC(CC_B);
162
jit_->CMP(32, R(xaddr_), Imm32(PSP_GetUserMemoryEnd() - offset_ - (size_ - 1)));
163
tooHigh_ = jit_->J_CC(CC_AE);
164
165
// We may need to jump back up here.
166
safe_ = jit_->GetCodePtr();
167
}
168
else
169
{
170
#ifdef MASKED_PSP_MEMORY
171
if (needMask) {
172
jit_->AND(32, R(EAX), Imm32(Memory::MEMVIEW32_MASK));
173
}
174
#endif
175
}
176
177
// TODO: This could be more optimal, but the common case is that we want xaddr_ not to include offset_.
178
// Since we need to align them after add, we add and subtract.
179
if (alignMask_ != 0xFFFFFFFF)
180
{
181
jit_->ADD(32, R(xaddr_), Imm32(offset_));
182
jit_->AND(32, R(xaddr_), Imm32(alignMask_));
183
jit_->SUB(32, R(xaddr_), Imm32(offset_));
184
}
185
186
#if PPSSPP_ARCH(32BIT)
187
return MDisp(xaddr_, (u32) Memory::base + offset_);
188
#else
189
return MComplex(MEMBASEREG, xaddr_, SCALE_1, offset_);
190
#endif
191
}
192
193
void JitSafeMem::PrepareSlowAccess()
194
{
195
// Skip the fast path (which the caller wrote just now.)
196
skip_ = jit_->J(true);
197
needsSkip_ = true;
198
jit_->SetJumpTarget(tooLow_);
199
jit_->SetJumpTarget(tooHigh_);
200
201
// Might also be the scratchpad.
202
jit_->CMP(32, R(xaddr_), Imm32(PSP_GetScratchpadMemoryBase() - offset_));
203
FixupBranch tooLow = jit_->J_CC(CC_B);
204
jit_->CMP(32, R(xaddr_), Imm32(PSP_GetScratchpadMemoryEnd() - offset_ - (size_ - 1)));
205
jit_->J_CC(CC_B, safe_);
206
jit_->SetJumpTarget(tooLow);
207
}
208
209
bool JitSafeMem::PrepareSlowWrite()
210
{
211
// If it's immediate, we only need a slow write on invalid.
212
if (iaddr_ != (u32) -1)
213
return !fast_ && !ImmValid();
214
215
if (!fast_)
216
{
217
PrepareSlowAccess();
218
return true;
219
}
220
else
221
return false;
222
}
223
224
void JitSafeMem::DoSlowWrite(const void *safeFunc, const OpArg &src, int suboffset) {
225
_dbg_assert_msg_(safeFunc != nullptr, "Safe func cannot be null");
226
227
if (iaddr_ != (u32) -1)
228
jit_->MOV(32, R(EAX), Imm32((iaddr_ + suboffset) & alignMask_));
229
else
230
{
231
jit_->LEA(32, EAX, MDisp(xaddr_, offset_ + suboffset));
232
if (alignMask_ != 0xFFFFFFFF)
233
jit_->AND(32, R(EAX), Imm32(alignMask_));
234
}
235
236
#if PPSSPP_ARCH(32BIT)
237
jit_->PUSH(EDX);
238
#endif
239
if (!src.IsSimpleReg(EDX)) {
240
jit_->MOV(32, R(EDX), src);
241
}
242
if (!g_Config.bIgnoreBadMemAccess) {
243
jit_->MOV(32, MIPSSTATE_VAR(pc), Imm32(jit_->GetCompilerPC()));
244
}
245
// This is a special jit-ABI'd function.
246
if (jit_->CanCALLDirect(safeFunc)) {
247
jit_->CALL(safeFunc);
248
} else {
249
// We can't safely flush a reg, but this shouldn't be normal.
250
IndirectCALL(safeFunc);
251
}
252
#if PPSSPP_ARCH(32BIT)
253
jit_->POP(EDX);
254
#endif
255
needsCheck_ = true;
256
}
257
258
bool JitSafeMem::PrepareSlowRead(const void *safeFunc) {
259
_dbg_assert_msg_(safeFunc != nullptr, "Safe func cannot be null");
260
if (!fast_) {
261
if (iaddr_ != (u32) -1) {
262
// No slow read necessary.
263
if (ImmValid())
264
return false;
265
jit_->MOV(32, R(EAX), Imm32(iaddr_ & alignMask_));
266
} else {
267
PrepareSlowAccess();
268
jit_->LEA(32, EAX, MDisp(xaddr_, offset_));
269
if (alignMask_ != 0xFFFFFFFF)
270
jit_->AND(32, R(EAX), Imm32(alignMask_));
271
}
272
273
if (!g_Config.bIgnoreBadMemAccess) {
274
jit_->MOV(32, MIPSSTATE_VAR(pc), Imm32(jit_->GetCompilerPC()));
275
}
276
// This is a special jit-ABI'd function.
277
if (jit_->CanCALLDirect(safeFunc)) {
278
jit_->CALL(safeFunc);
279
} else {
280
// We can't safely flush a reg, but this shouldn't be normal.
281
IndirectCALL(safeFunc);
282
}
283
needsCheck_ = true;
284
return true;
285
}
286
else
287
return false;
288
}
289
290
void JitSafeMem::NextSlowRead(const void *safeFunc, int suboffset) {
291
_dbg_assert_msg_(safeFunc != nullptr, "Safe func cannot be null");
292
_dbg_assert_msg_(!fast_, "NextSlowRead() called in fast memory mode?");
293
294
// For simplicity, do nothing for 0. We already read in PrepareSlowRead().
295
if (suboffset == 0)
296
return;
297
298
if (jit_->gpr.IsImm(raddr_))
299
{
300
_dbg_assert_msg_(!Memory::IsValidAddress(iaddr_ + suboffset), "NextSlowRead() for an invalid immediate address?");
301
302
jit_->MOV(32, R(EAX), Imm32((iaddr_ + suboffset) & alignMask_));
303
}
304
// For GPR, if xaddr_ was the dest register, this will be wrong. Don't use in GPR.
305
else
306
{
307
jit_->LEA(32, EAX, MDisp(xaddr_, offset_ + suboffset));
308
if (alignMask_ != 0xFFFFFFFF)
309
jit_->AND(32, R(EAX), Imm32(alignMask_));
310
}
311
312
if (!g_Config.bIgnoreBadMemAccess) {
313
jit_->MOV(32, MIPSSTATE_VAR(pc), Imm32(jit_->GetCompilerPC()));
314
}
315
// This is a special jit-ABI'd function.
316
if (jit_->CanCALLDirect(safeFunc)) {
317
jit_->CALL(safeFunc);
318
} else {
319
// We can't safely flush a reg, but this shouldn't be normal.
320
IndirectCALL(safeFunc);
321
}
322
}
323
324
bool JitSafeMem::ImmValid()
325
{
326
return iaddr_ != (u32) -1 && Memory::IsValidAddress(iaddr_) && Memory::IsValidAddress(iaddr_ + size_ - 1);
327
}
328
329
void JitSafeMem::IndirectCALL(const void *safeFunc) {
330
#if PPSSPP_ARCH(32BIT)
331
jit_->PUSH(ECX);
332
jit_->SUB(PTRBITS, R(ESP), Imm8(16 - 4));
333
jit_->MOV(PTRBITS, R(ECX), ImmPtr(safeFunc));
334
jit_->CALLptr(R(RCX));
335
jit_->ADD(PTRBITS, R(ESP), Imm8(16 - 4));
336
jit_->POP(ECX);
337
#else
338
jit_->PUSH(RCX);
339
jit_->SUB(PTRBITS, R(RSP), Imm8(8));
340
jit_->MOV(PTRBITS, R(RCX), ImmPtr(safeFunc));
341
jit_->CALLptr(R(RCX));
342
jit_->ADD(64, R(RSP), Imm8(8));
343
jit_->POP(RCX);
344
#endif
345
}
346
347
void JitSafeMem::Finish()
348
{
349
// Memory::Read_U32/etc. may have tripped coreState.
350
if (needsCheck_ && !g_Config.bIgnoreBadMemAccess)
351
jit_->js.afterOp |= JitState::AFTER_CORE_STATE;
352
if (needsSkip_)
353
jit_->SetJumpTarget(skip_);
354
for (auto it = skipChecks_.begin(), end = skipChecks_.end(); it != end; ++it)
355
jit_->SetJumpTarget(*it);
356
}
357
358
static const int FUNCS_ARENA_SIZE = 512 * 1024;
359
360
void JitSafeMemFuncs::Init(ThunkManager *thunks) {
361
using namespace Gen;
362
363
AllocCodeSpace(FUNCS_ARENA_SIZE);
364
thunks_ = thunks;
365
366
BeginWrite(1024);
367
readU32 = GetCodePtr();
368
CreateReadFunc(32, (const void *)&Memory::Read_U32);
369
readU16 = GetCodePtr();
370
CreateReadFunc(16, (const void *)&Memory::Read_U16);
371
readU8 = GetCodePtr();
372
CreateReadFunc(8, (const void *)&Memory::Read_U8);
373
374
writeU32 = GetCodePtr();
375
CreateWriteFunc(32, (const void *)&Memory::Write_U32);
376
writeU16 = GetCodePtr();
377
CreateWriteFunc(16, (const void *)&Memory::Write_U16);
378
writeU8 = GetCodePtr();
379
CreateWriteFunc(8, (const void *)&Memory::Write_U8);
380
EndWrite();
381
}
382
383
void JitSafeMemFuncs::Shutdown() {
384
ResetCodePtr(0);
385
FreeCodeSpace();
386
387
readU32 = nullptr;
388
readU16 = nullptr;
389
readU8 = nullptr;
390
writeU32 = nullptr;
391
writeU16 = nullptr;
392
writeU8 = nullptr;
393
}
394
395
// Mini ABI:
396
// Read funcs take address in EAX, return in RAX.
397
// Write funcs take address in EAX, data in RDX.
398
// On x86-32, Write funcs also have an extra 4 bytes on the stack.
399
400
void JitSafeMemFuncs::CreateReadFunc(int bits, const void *fallbackFunc) {
401
CheckDirectEAX();
402
403
// Since we were CALLed, we need to align the stack before calling C++.
404
#if PPSSPP_ARCH(32BIT)
405
SUB(32, R(ESP), Imm8(16 - 4));
406
ABI_CallFunctionA(thunks_->ProtectFunction(fallbackFunc, 1), R(EAX));
407
ADD(32, R(ESP), Imm8(16 - 4));
408
#else
409
SUB(64, R(RSP), Imm8(0x28));
410
ABI_CallFunctionA(thunks_->ProtectFunction(fallbackFunc, 1), R(EAX));
411
ADD(64, R(RSP), Imm8(0x28));
412
#endif
413
414
RET();
415
416
StartDirectAccess();
417
418
#if PPSSPP_ARCH(32BIT)
419
MOVZX(32, bits, EAX, MDisp(EAX, (u32)Memory::base));
420
#else
421
MOVZX(32, bits, EAX, MRegSum(MEMBASEREG, EAX));
422
#endif
423
424
RET();
425
}
426
427
void JitSafeMemFuncs::CreateWriteFunc(int bits, const void *fallbackFunc) {
428
CheckDirectEAX();
429
430
// Since we were CALLed, we need to align the stack before calling C++.
431
#if PPSSPP_ARCH(32BIT)
432
// 4 for return, 4 for saved reg on stack.
433
SUB(32, R(ESP), Imm8(16 - 4 - 4));
434
ABI_CallFunctionAA(thunks_->ProtectFunction(fallbackFunc, 2), R(EDX), R(EAX));
435
ADD(32, R(ESP), Imm8(16 - 4 - 4));
436
#else
437
SUB(64, R(RSP), Imm8(0x28));
438
ABI_CallFunctionAA(thunks_->ProtectFunction(fallbackFunc, 2), R(EDX), R(EAX));
439
ADD(64, R(RSP), Imm8(0x28));
440
#endif
441
442
RET();
443
444
StartDirectAccess();
445
446
#if PPSSPP_ARCH(32BIT)
447
MOV(bits, MDisp(EAX, (u32)Memory::base), R(EDX));
448
#else
449
MOV(bits, MRegSum(MEMBASEREG, EAX), R(EDX));
450
#endif
451
452
RET();
453
}
454
455
void JitSafeMemFuncs::CheckDirectEAX() {
456
// Clear any cache/kernel bits.
457
AND(32, R(EAX), Imm32(0x3FFFFFFF));
458
459
CMP(32, R(EAX), Imm32(PSP_GetUserMemoryEnd()));
460
FixupBranch tooHighRAM = J_CC(CC_AE);
461
CMP(32, R(EAX), Imm32(PSP_GetKernelMemoryBase()));
462
skips_.push_back(J_CC(CC_AE));
463
464
CMP(32, R(EAX), Imm32(PSP_GetVidMemEnd()));
465
FixupBranch tooHighVid = J_CC(CC_AE);
466
CMP(32, R(EAX), Imm32(PSP_GetVidMemBase()));
467
skips_.push_back(J_CC(CC_AE));
468
469
CMP(32, R(EAX), Imm32(PSP_GetScratchpadMemoryEnd()));
470
FixupBranch tooHighScratch = J_CC(CC_AE);
471
CMP(32, R(EAX), Imm32(PSP_GetScratchpadMemoryBase()));
472
skips_.push_back(J_CC(CC_AE));
473
474
SetJumpTarget(tooHighRAM);
475
SetJumpTarget(tooHighVid);
476
SetJumpTarget(tooHighScratch);
477
}
478
479
void JitSafeMemFuncs::StartDirectAccess() {
480
for (auto it = skips_.begin(), end = skips_.end(); it != end; ++it) {
481
SetJumpTarget(*it);
482
}
483
skips_.clear();
484
}
485
486
};
487
488
#endif // PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
489
490