CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/x86/Asm.cpp
Views: 1401
1
// Copyright (C) 2003 Dolphin Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official SVN repository and contact information can be found at
16
// http://code.google.com/p/dolphin-emu/
17
18
#include "ppsspp_config.h"
19
#if PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
20
21
#include "Common/Math/math_util.h"
22
23
#include "ABI.h"
24
#include "x64Emitter.h"
25
26
#include "Core/Core.h"
27
#include "Core/MemMap.h"
28
#include "Core/System.h"
29
#include "Core/MIPS/MIPS.h"
30
#include "Core/CoreTiming.h"
31
#include "Common/MemoryUtil.h"
32
33
#include "Core/MIPS/JitCommon/JitCommon.h"
34
#include "Core/MIPS/x86/Jit.h"
35
36
using namespace Gen;
37
using namespace X64JitConstants;
38
39
extern volatile CoreState coreState;
40
41
namespace MIPSComp
42
{
43
44
//TODO - make an option
45
//#if _DEBUG
46
static bool enableDebug = false;
47
48
//#else
49
// bool enableDebug = false;
50
//#endif
51
52
//static bool enableStatistics = false; //unused?
53
54
//GLOBAL STATIC ALLOCATIONS x86
55
//EAX - ubiquitous scratch register - EVERYBODY scratches this
56
//EBP - Pointer to fpr/gpr regs
57
58
//GLOBAL STATIC ALLOCATIONS x64
59
//EAX - ubiquitous scratch register - EVERYBODY scratches this
60
//RBX - Base pointer of memory
61
//R14 - Pointer to fpr/gpr regs
62
//R15 - Pointer to array of block pointers
63
64
void ImHere() {
65
DEBUG_LOG(Log::CPU, "JIT Here: %08x", currentMIPS->pc);
66
}
67
68
void Jit::GenerateFixedCode(JitOptions &jo) {
69
BeginWrite(GetMemoryProtectPageSize());
70
AlignCodePage();
71
72
restoreRoundingMode = AlignCode16(); {
73
STMXCSR(MIPSSTATE_VAR(temp));
74
// Clear the rounding mode and flush-to-zero bits back to 0.
75
AND(32, MIPSSTATE_VAR(temp), Imm32(~(7 << 13)));
76
LDMXCSR(MIPSSTATE_VAR(temp));
77
RET();
78
}
79
80
applyRoundingMode = AlignCode16(); {
81
MOV(32, R(EAX), MIPSSTATE_VAR(fcr31));
82
AND(32, R(EAX), Imm32(0x01000003));
83
84
// If it's 0 (nearest + no flush0), we don't actually bother setting - we cleared the rounding
85
// mode out in restoreRoundingMode anyway. This is the most common.
86
FixupBranch skip = J_CC(CC_Z);
87
STMXCSR(MIPSSTATE_VAR(temp));
88
89
// The MIPS bits don't correspond exactly, so we have to adjust.
90
// 0 -> 0 (skip2), 1 -> 3, 2 -> 2 (skip2), 3 -> 1
91
TEST(8, R(AL), Imm8(1));
92
FixupBranch skip2 = J_CC(CC_Z);
93
XOR(32, R(EAX), Imm8(2));
94
SetJumpTarget(skip2);
95
96
// Adjustment complete, now reconstruct MXCSR
97
SHL(32, R(EAX), Imm8(13));
98
// Before setting new bits, we must clear the old ones.
99
AND(32, MIPSSTATE_VAR(temp), Imm32(~(7 << 13))); // Clearing bits 13-14 (rounding mode) and 15 (flush to zero)
100
OR(32, MIPSSTATE_VAR(temp), R(EAX));
101
102
TEST(32, MIPSSTATE_VAR(fcr31), Imm32(1 << 24));
103
FixupBranch skip3 = J_CC(CC_Z);
104
OR(32, MIPSSTATE_VAR(temp), Imm32(1 << 15));
105
SetJumpTarget(skip3);
106
107
LDMXCSR(MIPSSTATE_VAR(temp));
108
SetJumpTarget(skip);
109
RET();
110
}
111
112
enterDispatcher = AlignCode16();
113
ABI_PushAllCalleeSavedRegsAndAdjustStack();
114
#if PPSSPP_ARCH(AMD64)
115
// Two statically allocated registers.
116
MOV(64, R(MEMBASEREG), ImmPtr(Memory::base));
117
uintptr_t jitbase = (uintptr_t)GetBasePtr();
118
if (jitbase > 0x7FFFFFFFULL) {
119
MOV(64, R(JITBASEREG), ImmPtr(GetBasePtr()));
120
jo.reserveR15ForAsm = true;
121
}
122
#endif
123
// From the start of the FP reg, a single byte offset can reach all GPR + all FPR (but no VFPUR)
124
MOV(PTRBITS, R(CTXREG), ImmPtr(&mips_->f[0]));
125
126
outerLoop = GetCodePtr();
127
RestoreRoundingMode(true);
128
ABI_CallFunction(reinterpret_cast<void *>(&CoreTiming::Advance));
129
ApplyRoundingMode(true);
130
FixupBranch skipToCoreStateCheck = J(); //skip the downcount check
131
132
dispatcherCheckCoreState = GetCodePtr();
133
134
// The result of slice decrementation should be in flags if somebody jumped here
135
// IMPORTANT - We jump on negative, not carry!!!
136
FixupBranch bailCoreState = J_CC(CC_S, true);
137
138
SetJumpTarget(skipToCoreStateCheck);
139
if (RipAccessible((const void *)&coreState)) {
140
CMP(32, M(&coreState), Imm32(0)); // rip accessible
141
} else {
142
MOV(PTRBITS, R(RAX), ImmPtr((const void *)&coreState));
143
CMP(32, MatR(RAX), Imm32(0));
144
}
145
FixupBranch badCoreState = J_CC(CC_NZ, true);
146
FixupBranch skipToRealDispatch2 = J(); //skip the sync and compare first time
147
148
dispatcher = GetCodePtr();
149
150
// The result of slice decrementation should be in flags if somebody jumped here
151
// IMPORTANT - We jump on negative, not carry!!!
152
FixupBranch bail = J_CC(CC_S, true);
153
154
SetJumpTarget(skipToRealDispatch2);
155
156
dispatcherNoCheck = GetCodePtr();
157
158
MOV(32, R(EAX), MIPSSTATE_VAR(pc));
159
dispatcherInEAXNoCheck = GetCodePtr();
160
161
#ifdef MASKED_PSP_MEMORY
162
AND(32, R(EAX), Imm32(Memory::MEMVIEW32_MASK));
163
#endif
164
dispatcherFetch = GetCodePtr();
165
#if PPSSPP_ARCH(X86)
166
_assert_msg_( Memory::base != 0, "Memory base bogus");
167
MOV(32, R(EAX), MDisp(EAX, (u32)Memory::base));
168
#elif PPSSPP_ARCH(AMD64)
169
MOV(32, R(EAX), MComplex(MEMBASEREG, RAX, SCALE_1, 0));
170
#endif
171
MOV(32, R(EDX), R(EAX));
172
_assert_msg_(MIPS_JITBLOCK_MASK == 0xFF000000, "Hardcoded assumption of emuhack mask");
173
SHR(32, R(EDX), Imm8(24));
174
CMP(32, R(EDX), Imm8(MIPS_EMUHACK_OPCODE >> 24));
175
FixupBranch notfound = J_CC(CC_NE);
176
if (enableDebug) {
177
ADD(32, MIPSSTATE_VAR(debugCount), Imm8(1));
178
}
179
//grab from list and jump to it
180
AND(32, R(EAX), Imm32(MIPS_EMUHACK_VALUE_MASK));
181
#if PPSSPP_ARCH(X86)
182
ADD(32, R(EAX), ImmPtr(GetBasePtr()));
183
#elif PPSSPP_ARCH(AMD64)
184
if (jo.reserveR15ForAsm) {
185
ADD(64, R(RAX), R(JITBASEREG));
186
} else {
187
// See above, reserveR15ForAsm is used when above 0x7FFFFFFF.
188
ADD(64, R(EAX), Imm32((u32)jitbase));
189
}
190
#endif
191
JMPptr(R(EAX));
192
SetJumpTarget(notfound);
193
194
//Ok, no block, let's jit
195
RestoreRoundingMode(true);
196
ABI_CallFunction(&MIPSComp::JitAt);
197
ApplyRoundingMode(true);
198
JMP(dispatcherNoCheck, true); // Let's just dispatch again, we'll enter the block since we know it's there.
199
200
SetJumpTarget(bail);
201
SetJumpTarget(bailCoreState);
202
203
if (RipAccessible((const void *)&coreState)) {
204
CMP(32, M(&coreState), Imm32(0)); // rip accessible
205
} else {
206
MOV(PTRBITS, R(RAX), ImmPtr((const void *)&coreState));
207
CMP(32, MatR(RAX), Imm32(0));
208
}
209
J_CC(CC_Z, outerLoop, true);
210
211
const uint8_t *quitLoop = GetCodePtr();
212
SetJumpTarget(badCoreState);
213
RestoreRoundingMode(true);
214
ABI_PopAllCalleeSavedRegsAndAdjustStack();
215
RET();
216
217
crashHandler = GetCodePtr();
218
if (RipAccessible((const void *)&coreState)) {
219
MOV(32, M(&coreState), Imm32(CORE_RUNTIME_ERROR));
220
} else {
221
MOV(PTRBITS, R(RAX), ImmPtr((const void *)&coreState));
222
MOV(32, MatR(RAX), Imm32(CORE_RUNTIME_ERROR));
223
}
224
JMP(quitLoop, true);
225
226
// Let's spare the pre-generated code from unprotect-reprotect.
227
endOfPregeneratedCode = AlignCodePage();
228
EndWrite();
229
}
230
231
} // namespace
232
233
#endif // PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
234
235