CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/x86/CompLoadStore.cpp
Views: 1401
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
#if PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
20
21
#include "Core/MemMap.h"
22
#include "Core/MIPS/MIPSAnalyst.h"
23
#include "Core/Config.h"
24
#include "Core/MIPS/MIPSCodeUtils.h"
25
#include "Core/MIPS/x86/Jit.h"
26
#include "Core/MIPS/x86/RegCache.h"
27
28
29
#define _RS MIPS_GET_RS(op)
30
#define _RT MIPS_GET_RT(op)
31
#define _RD MIPS_GET_RD(op)
32
#define _FS MIPS_GET_FS(op)
33
#define _FT MIPS_GET_FT(op)
34
#define _FD MIPS_GET_FD(op)
35
#define _SA MIPS_GET_SA(op)
36
#define _POS ((op>> 6) & 0x1F)
37
#define _SIZE ((op>>11) & 0x1F)
38
#define _IMM16 (signed short)(op & 0xFFFF)
39
#define _IMM26 (op & 0x03FFFFFF)
40
41
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
42
// Currently known non working ones should have DISABLE.
43
44
// #define CONDITIONAL_DISABLE(flag) { Comp_Generic(op); return; }
45
#define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
46
#define DISABLE { Comp_Generic(op); return; }
47
#define INVALIDOP { Comp_Generic(op); return; }
48
49
namespace MIPSComp {
50
using namespace Gen;
51
52
void Jit::CompITypeMemRead(MIPSOpcode op, u32 bits, void (XEmitter::*mov)(int, int, X64Reg, OpArg), const void *safeFunc)
53
{
54
CONDITIONAL_DISABLE(LSU);
55
int offset = _IMM16;
56
MIPSGPReg rt = _RT;
57
MIPSGPReg rs = _RS;
58
59
gpr.Lock(rt, rs);
60
gpr.MapReg(rt, rt == rs, true);
61
62
JitSafeMem safe(this, rs, offset);
63
OpArg src;
64
if (safe.PrepareRead(src, bits / 8))
65
(this->*mov)(32, bits, gpr.RX(rt), src);
66
if (safe.PrepareSlowRead(safeFunc))
67
(this->*mov)(32, bits, gpr.RX(rt), R(EAX));
68
safe.Finish();
69
70
gpr.UnlockAll();
71
}
72
73
static OpArg DowncastImm(OpArg in, int bits) {
74
if (!in.IsImm())
75
return in;
76
if (in.GetImmBits() > bits) {
77
in.SetImmBits(bits);
78
return in;
79
}
80
return in;
81
}
82
83
void Jit::CompITypeMemWrite(MIPSOpcode op, u32 bits, const void *safeFunc, bool makeRTWritable)
84
{
85
CONDITIONAL_DISABLE(LSU);
86
int offset = _IMM16;
87
MIPSGPReg rt = _RT;
88
MIPSGPReg rs = _RS;
89
90
gpr.Lock(rt, rs);
91
92
if (rt == MIPS_REG_ZERO || gpr.R(rt).IsImm()) {
93
if (makeRTWritable) {
94
gpr.MapReg(rt, true, true);
95
}
96
// NOTICE_LOG(Log::JIT, "%d-bit Imm at %08x : %08x", bits, js.blockStart, (u32)gpr.R(rt).GetImmValue());
97
} else {
98
gpr.MapReg(rt, true, false);
99
}
100
101
#if PPSSPP_ARCH(X86)
102
// We use EDX so we can have DL for 8-bit ops.
103
const bool needSwap = bits == 8 && !gpr.R(rt).IsSimpleReg(EDX) && !gpr.R(rt).IsSimpleReg(ECX);
104
if (needSwap)
105
gpr.FlushLockX(EDX);
106
#else
107
const bool needSwap = false;
108
#endif
109
110
JitSafeMem safe(this, rs, offset);
111
OpArg dest;
112
if (safe.PrepareWrite(dest, bits / 8))
113
{
114
if (needSwap)
115
{
116
MOV(32, R(EDX), gpr.R(rt));
117
MOV(bits, dest, R(EDX));
118
}
119
else {
120
if (rt == MIPS_REG_ZERO) {
121
switch (bits) {
122
case 8: MOV(8, dest, Imm8(0)); break;
123
case 16: MOV(16, dest, Imm16(0)); break;
124
case 32: MOV(32, dest, Imm32(0)); break;
125
}
126
} else {
127
// The downcast is needed so we don't try to generate a 8-bit write with a 32-bit imm
128
// (that might have been generated from an li instruction) which is illegal.
129
MOV(bits, dest, DowncastImm(gpr.R(rt), bits));
130
}
131
}
132
}
133
if (safe.PrepareSlowWrite())
134
safe.DoSlowWrite(safeFunc, gpr.R(rt));
135
safe.Finish();
136
137
if (needSwap)
138
gpr.UnlockAllX();
139
gpr.UnlockAll();
140
}
141
142
void Jit::CompITypeMemUnpairedLR(MIPSOpcode op, bool isStore)
143
{
144
CONDITIONAL_DISABLE(LSU);
145
int offset = _IMM16;
146
MIPSGPReg rt = _RT;
147
MIPSGPReg rs = _RS;
148
149
X64Reg shiftReg = ECX;
150
gpr.FlushLockX(ECX, EDX);
151
#if PPSSPP_ARCH(AMD64)
152
// On x64, we need ECX for CL, but it's also the first arg and gets lost. Annoying.
153
gpr.FlushLockX(R9);
154
shiftReg = R9;
155
#endif
156
157
gpr.Lock(rt, rs);
158
gpr.MapReg(rt, true, !isStore);
159
160
// Grab the offset from alignment for shifting (<< 3 for bytes -> bits.)
161
MOV(32, R(shiftReg), gpr.R(rs));
162
ADD(32, R(shiftReg), Imm32(offset));
163
AND(32, R(shiftReg), Imm32(3));
164
SHL(32, R(shiftReg), Imm8(3));
165
166
{
167
JitSafeMem safe(this, rs, offset, ~3);
168
OpArg src;
169
if (safe.PrepareRead(src, 4))
170
{
171
if (!src.IsSimpleReg(EAX))
172
MOV(32, R(EAX), src);
173
174
CompITypeMemUnpairedLRInner(op, shiftReg);
175
}
176
if (safe.PrepareSlowRead(safeMemFuncs.readU32))
177
CompITypeMemUnpairedLRInner(op, shiftReg);
178
safe.Finish();
179
}
180
181
// For store ops, write EDX back to memory.
182
if (isStore)
183
{
184
JitSafeMem safe(this, rs, offset, ~3);
185
OpArg dest;
186
if (safe.PrepareWrite(dest, 4))
187
MOV(32, dest, R(EDX));
188
if (safe.PrepareSlowWrite())
189
safe.DoSlowWrite(safeMemFuncs.writeU32, R(EDX));
190
safe.Finish();
191
}
192
193
gpr.UnlockAll();
194
gpr.UnlockAllX();
195
}
196
197
void Jit::CompITypeMemUnpairedLRInner(MIPSOpcode op, X64Reg shiftReg)
198
{
199
CONDITIONAL_DISABLE(LSU);
200
int o = op>>26;
201
MIPSGPReg rt = _RT;
202
203
// Make sure we have the shift for the target in ECX.
204
if (shiftReg != ECX)
205
MOV(32, R(ECX), R(shiftReg));
206
207
// Now use that shift (left on target, right on source.)
208
switch (o)
209
{
210
case 34: //lwl
211
MOV(32, R(EDX), Imm32(0x00ffffff));
212
SHR(32, R(EDX), R(CL));
213
AND(32, gpr.R(rt), R(EDX));
214
break;
215
216
case 38: //lwr
217
SHR(32, R(EAX), R(CL));
218
break;
219
220
case 42: //swl
221
MOV(32, R(EDX), Imm32(0xffffff00));
222
SHL(32, R(EDX), R(CL));
223
AND(32, R(EAX), R(EDX));
224
break;
225
226
case 46: //swr
227
MOV(32, R(EDX), gpr.R(rt));
228
SHL(32, R(EDX), R(CL));
229
// EDX is already the target value to write, but may be overwritten below. Save it.
230
PUSH(EDX);
231
break;
232
233
default:
234
_dbg_assert_msg_(false, "Unsupported left/right load/store instruction.");
235
}
236
237
// Flip ECX around from 3 bytes / 24 bits.
238
if (shiftReg == ECX)
239
{
240
MOV(32, R(EDX), Imm32(24));
241
SUB(32, R(EDX), R(ECX));
242
MOV(32, R(ECX), R(EDX));
243
}
244
else
245
{
246
MOV(32, R(ECX), Imm32(24));
247
SUB(32, R(ECX), R(shiftReg));
248
}
249
250
// Use the flipped shift (left on source, right on target) and write target.
251
switch (o)
252
{
253
case 34: //lwl
254
SHL(32, R(EAX), R(CL));
255
256
OR(32, gpr.R(rt), R(EAX));
257
break;
258
259
case 38: //lwr
260
MOV(32, R(EDX), Imm32(0xffffff00));
261
SHL(32, R(EDX), R(CL));
262
AND(32, gpr.R(rt), R(EDX));
263
264
OR(32, gpr.R(rt), R(EAX));
265
break;
266
267
case 42: //swl
268
MOV(32, R(EDX), gpr.R(rt));
269
SHR(32, R(EDX), R(CL));
270
271
OR(32, R(EDX), R(EAX));
272
break;
273
274
case 46: //swr
275
MOV(32, R(EDX), Imm32(0x00ffffff));
276
SHR(32, R(EDX), R(CL));
277
AND(32, R(EAX), R(EDX));
278
279
// This is the target value we saved earlier.
280
POP(EDX);
281
OR(32, R(EDX), R(EAX));
282
break;
283
284
default:
285
_dbg_assert_msg_(false, "Unsupported left/right load/store instruction.");
286
}
287
}
288
289
void Jit::Comp_ITypeMem(MIPSOpcode op)
290
{
291
CONDITIONAL_DISABLE(LSU);
292
int offset = _IMM16;
293
MIPSGPReg rs = _RS;
294
MIPSGPReg rt = _RT;
295
int o = op>>26;
296
if (((op >> 29) & 1) == 0 && rt == MIPS_REG_ZERO) {
297
// Don't load anything into $zr
298
return;
299
}
300
301
CheckMemoryBreakpoint(0, rs, offset);
302
303
switch (o)
304
{
305
case 37: //R(rt) = ReadMem16(addr); break; //lhu
306
CompITypeMemRead(op, 16, &XEmitter::MOVZX, safeMemFuncs.readU16);
307
break;
308
309
case 36: //R(rt) = ReadMem8 (addr); break; //lbu
310
CompITypeMemRead(op, 8, &XEmitter::MOVZX, safeMemFuncs.readU8);
311
break;
312
313
case 35: //R(rt) = ReadMem32(addr); break; //lw
314
CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
315
break;
316
317
case 32: //R(rt) = SignExtend8ToU32 (ReadMem8 (addr)); break; //lb
318
CompITypeMemRead(op, 8, &XEmitter::MOVSX, safeMemFuncs.readU8);
319
break;
320
321
case 33: //R(rt) = SignExtend16ToU32(ReadMem16(addr)); break; //lh
322
CompITypeMemRead(op, 16, &XEmitter::MOVSX, safeMemFuncs.readU16);
323
break;
324
325
case 40: //WriteMem8 (addr, R(rt)); break; //sb
326
CompITypeMemWrite(op, 8, safeMemFuncs.writeU8);
327
break;
328
329
case 41: //WriteMem16(addr, R(rt)); break; //sh
330
CompITypeMemWrite(op, 16, safeMemFuncs.writeU16);
331
break;
332
333
case 43: //WriteMem32(addr, R(rt)); break; //sw
334
CompITypeMemWrite(op, 32, safeMemFuncs.writeU32);
335
break;
336
337
case 34: //lwl
338
{
339
MIPSOpcode nextOp = GetOffsetInstruction(1);
340
// Looking for lwr rd, offset-3(rs) which makes a pair.
341
u32 desiredOp = ((op & 0xFFFF0000) + (4 << 26)) + (offset - 3);
342
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
343
{
344
CheckMemoryBreakpoint(1, rs, offset - 3);
345
EatInstruction(nextOp);
346
// nextOp has the correct address.
347
CompITypeMemRead(nextOp, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
348
}
349
else
350
CompITypeMemUnpairedLR(op, false);
351
}
352
break;
353
354
case 38: //lwr
355
{
356
MIPSOpcode nextOp = GetOffsetInstruction(1);
357
// Looking for lwl rd, offset+3(rs) which makes a pair.
358
u32 desiredOp = ((op & 0xFFFF0000) - (4 << 26)) + (offset + 3);
359
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
360
{
361
CheckMemoryBreakpoint(1, rs, offset + 3);
362
EatInstruction(nextOp);
363
// op has the correct address.
364
CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
365
}
366
else
367
CompITypeMemUnpairedLR(op, false);
368
}
369
break;
370
371
case 42: //swl
372
{
373
MIPSOpcode nextOp = GetOffsetInstruction(1);
374
// Looking for swr rd, offset-3(rs) which makes a pair.
375
u32 desiredOp = ((op & 0xFFFF0000) + (4 << 26)) + (offset - 3);
376
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
377
{
378
CheckMemoryBreakpoint(1, rs, offset - 3);
379
EatInstruction(nextOp);
380
// nextOp has the correct address.
381
CompITypeMemWrite(nextOp, 32, safeMemFuncs.writeU32);
382
}
383
else
384
CompITypeMemUnpairedLR(op, true);
385
}
386
break;
387
388
case 46: //swr
389
{
390
MIPSOpcode nextOp = GetOffsetInstruction(1);
391
// Looking for swl rd, offset+3(rs) which makes a pair.
392
u32 desiredOp = ((op & 0xFFFF0000) - (4 << 26)) + (offset + 3);
393
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
394
{
395
CheckMemoryBreakpoint(1, rs, offset + 3);
396
EatInstruction(nextOp);
397
// op has the correct address.
398
CompITypeMemWrite(op, 32, safeMemFuncs.writeU32);
399
}
400
else
401
CompITypeMemUnpairedLR(op, true);
402
}
403
break;
404
405
default:
406
Comp_Generic(op);
407
return;
408
}
409
410
}
411
412
void Jit::Comp_StoreSync(MIPSOpcode op) {
413
CONDITIONAL_DISABLE(LSU);
414
415
int offset = _IMM16;
416
MIPSGPReg rt = _RT;
417
MIPSGPReg rs = _RS;
418
// Note: still does something even if loading to zero.
419
420
CheckMemoryBreakpoint(0, rs, offset);
421
422
FixupBranch skipStore;
423
FixupBranch finish;
424
switch (op >> 26) {
425
case 48: // ll
426
CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
427
MOV(8, MDisp(X64JitConstants::CTXREG, -128 + offsetof(MIPSState, llBit)), Imm8(1));
428
break;
429
430
case 56: // sc
431
// Map before the jump in case any regs spill. Unlock happens inside CompITypeMemWrite().
432
// This is not a very common op, but it's in jit so memory breakpoints can trip.
433
gpr.Lock(rt, rs);
434
gpr.MapReg(rt, true, true);
435
gpr.MapReg(rs, true, false);
436
437
CMP(8, MDisp(X64JitConstants::CTXREG, -128 + offsetof(MIPSState, llBit)), Imm8(1));
438
skipStore = J_CC(CC_NE);
439
440
CompITypeMemWrite(op, 32, safeMemFuncs.writeU32, true);
441
MOV(32, gpr.R(rt), Imm32(1));
442
finish = J();
443
444
SetJumpTarget(skipStore);
445
MOV(32, gpr.R(rt), Imm32(0));
446
SetJumpTarget(finish);
447
break;
448
449
default:
450
INVALIDOP;
451
}
452
}
453
454
void Jit::Comp_Cache(MIPSOpcode op) {
455
CONDITIONAL_DISABLE(LSU);
456
457
int func = (op >> 16) & 0x1F;
458
459
// See Int_Cache for the definitions.
460
switch (func) {
461
case 24: break;
462
case 25: break;
463
case 27: break;
464
case 30: break;
465
default:
466
// Fall back to the interpreter.
467
DISABLE;
468
}
469
}
470
}
471
472
#endif // PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
473
474