CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/IR/IRRegCache.cpp
Views: 1401
1
// Copyright (c) 2023- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#ifndef offsetof
19
#include <cstddef>
20
#endif
21
22
#include <cstring>
23
#include "Common/Log.h"
24
#include "Common/LogReporting.h"
25
#include "Core/MemMap.h"
26
#include "Core/MIPS/IR/IRAnalysis.h"
27
#include "Core/MIPS/IR/IRRegCache.h"
28
#include "Core/MIPS/IR/IRInst.h"
29
#include "Core/MIPS/IR/IRJit.h"
30
#include "Core/MIPS/JitCommon/JitState.h"
31
32
void IRImmRegCache::Flush(IRReg rd) {
33
if (isImm_[rd]) {
34
if (rd == 0) {
35
return;
36
}
37
_assert_((rd > 0 && rd < 32) || (rd >= IRTEMP_0 && rd < IRREG_VFPU_CTRL_BASE));
38
ir_->WriteSetConstant(rd, immVal_[rd]);
39
isImm_[rd] = false;
40
}
41
}
42
43
void IRImmRegCache::Discard(IRReg rd) {
44
if (rd == 0) {
45
return;
46
}
47
isImm_[rd] = false;
48
}
49
50
IRImmRegCache::IRImmRegCache(IRWriter *ir) : ir_(ir) {
51
memset(&isImm_, 0, sizeof(isImm_));
52
memset(&immVal_, 0, sizeof(immVal_));
53
isImm_[0] = true;
54
ir_ = ir;
55
}
56
57
void IRImmRegCache::FlushAll() {
58
for (int i = 1; i < TOTAL_MAPPABLE_IRREGS; ) {
59
if (isImm_[i]) {
60
Flush(i);
61
}
62
63
// Most of the time, lots are not. This speeds it up a lot.
64
bool *next = (bool *)memchr(&isImm_[i], 1, TOTAL_MAPPABLE_IRREGS - i);
65
if (!next)
66
break;
67
i = (int)(next - &isImm_[0]);
68
}
69
}
70
71
void IRImmRegCache::MapIn(IRReg rd) {
72
Flush(rd);
73
}
74
75
void IRImmRegCache::MapDirty(IRReg rd) {
76
Discard(rd);
77
}
78
79
void IRImmRegCache::MapInIn(IRReg rs, IRReg rt) {
80
Flush(rs);
81
Flush(rt);
82
}
83
84
void IRImmRegCache::MapInInIn(IRReg rd, IRReg rs, IRReg rt) {
85
Flush(rd);
86
Flush(rs);
87
Flush(rt);
88
}
89
90
void IRImmRegCache::MapDirtyIn(IRReg rd, IRReg rs) {
91
if (rs != rd) {
92
Discard(rd);
93
}
94
Flush(rs);
95
}
96
97
void IRImmRegCache::MapDirtyInIn(IRReg rd, IRReg rs, IRReg rt) {
98
if (rs != rd && rt != rd) {
99
Discard(rd);
100
}
101
Flush(rs);
102
Flush(rt);
103
}
104
105
IRNativeRegCacheBase::IRNativeRegCacheBase(MIPSComp::JitOptions *jo)
106
: jo_(jo) {}
107
108
void IRNativeRegCacheBase::Start(MIPSComp::IRBlockCache *irBlockCache, int blockNum) {
109
if (!initialReady_) {
110
SetupInitialRegs();
111
initialReady_ = true;
112
}
113
114
memcpy(nr, nrInitial_, sizeof(nr[0]) * config_.totalNativeRegs);
115
memcpy(mr, mrInitial_, sizeof(mr));
116
117
irBlock_ = irBlockCache->GetBlock(blockNum);
118
119
int numStatics;
120
const StaticAllocation *statics = GetStaticAllocations(numStatics);
121
for (int i = 0; i < numStatics; i++) {
122
nr[statics[i].nr].mipsReg = statics[i].mr;
123
nr[statics[i].nr].pointerified = statics[i].pointerified && jo_->enablePointerify;
124
nr[statics[i].nr].normalized32 = statics[i].normalized32;
125
mr[statics[i].mr].loc = statics[i].loc;
126
mr[statics[i].mr].nReg = statics[i].nr;
127
mr[statics[i].mr].isStatic = true;
128
// Lock it until the very end.
129
mr[statics[i].mr].spillLockIRIndex = irBlock_->GetNumIRInstructions();
130
}
131
132
irBlockNum_ = blockNum;
133
irBlockCache_ = irBlockCache;
134
irIndex_ = 0;
135
}
136
137
void IRNativeRegCacheBase::SetupInitialRegs() {
138
_assert_msg_(config_.totalNativeRegs > 0, "totalNativeRegs was never set by backend");
139
140
// Everything else is initialized in the struct.
141
mrInitial_[MIPS_REG_ZERO].loc = MIPSLoc::IMM;
142
mrInitial_[MIPS_REG_ZERO].imm = 0;
143
}
144
145
bool IRNativeRegCacheBase::IsGPRInRAM(IRReg gpr) {
146
_dbg_assert_(IsValidGPR(gpr));
147
return mr[gpr].loc == MIPSLoc::MEM;
148
}
149
150
bool IRNativeRegCacheBase::IsFPRInRAM(IRReg fpr) {
151
_dbg_assert_(IsValidFPR(fpr));
152
return mr[fpr + 32].loc == MIPSLoc::MEM;
153
}
154
155
bool IRNativeRegCacheBase::IsGPRMapped(IRReg gpr) {
156
_dbg_assert_(IsValidGPR(gpr));
157
return mr[gpr].loc == MIPSLoc::REG || mr[gpr].loc == MIPSLoc::REG_IMM;
158
}
159
160
bool IRNativeRegCacheBase::IsFPRMapped(IRReg fpr) {
161
_dbg_assert_(IsValidFPR(fpr));
162
return mr[fpr + 32].loc == MIPSLoc::FREG || mr[fpr + 32].loc == MIPSLoc::VREG;
163
}
164
165
int IRNativeRegCacheBase::GetFPRLaneCount(IRReg fpr) {
166
if (!IsFPRMapped(fpr))
167
return 0;
168
if (mr[fpr + 32].lane == -1)
169
return 1;
170
171
IRReg base = fpr + 32 - mr[fpr + 32].lane;
172
int c = 1;
173
for (int i = 1; i < 4; ++i) {
174
if (mr[base + i].nReg != mr[base].nReg || mr[base + i].loc != mr[base].loc)
175
return c;
176
if (mr[base + i].lane != i)
177
return c;
178
179
c++;
180
}
181
182
return c;
183
}
184
185
int IRNativeRegCacheBase::GetFPRLane(IRReg fpr) {
186
_dbg_assert_(IsValidFPR(fpr));
187
if (mr[fpr + 32].loc == MIPSLoc::FREG || mr[fpr + 32].loc == MIPSLoc::VREG) {
188
int l = mr[fpr + 32].lane;
189
return l == -1 ? 0 : l;
190
}
191
return -1;
192
}
193
194
bool IRNativeRegCacheBase::IsGPRMappedAsPointer(IRReg gpr) {
195
_dbg_assert_(IsValidGPR(gpr));
196
if (mr[gpr].loc == MIPSLoc::REG) {
197
return nr[mr[gpr].nReg].pointerified;
198
} else if (mr[gpr].loc == MIPSLoc::REG_IMM) {
199
_assert_msg_(!nr[mr[gpr].nReg].pointerified, "Really shouldn't be pointerified here");
200
} else if (mr[gpr].loc == MIPSLoc::REG_AS_PTR) {
201
return true;
202
}
203
return false;
204
}
205
206
bool IRNativeRegCacheBase::IsGPRMappedAsStaticPointer(IRReg gpr) {
207
if (IsGPRMappedAsPointer(gpr)) {
208
return mr[gpr].isStatic;
209
}
210
return false;
211
}
212
213
bool IRNativeRegCacheBase::IsGPRImm(IRReg gpr) {
214
_dbg_assert_(IsValidGPR(gpr));
215
if (gpr == MIPS_REG_ZERO)
216
return true;
217
return mr[gpr].loc == MIPSLoc::IMM || mr[gpr].loc == MIPSLoc::REG_IMM;
218
}
219
220
bool IRNativeRegCacheBase::IsGPR2Imm(IRReg base) {
221
return IsGPRImm(base) && IsGPRImm(base + 1);
222
}
223
224
uint32_t IRNativeRegCacheBase::GetGPRImm(IRReg gpr) {
225
_dbg_assert_(IsValidGPR(gpr));
226
if (gpr == MIPS_REG_ZERO)
227
return 0;
228
if (mr[gpr].loc != MIPSLoc::IMM && mr[gpr].loc != MIPSLoc::REG_IMM) {
229
_assert_msg_(mr[gpr].loc == MIPSLoc::IMM || mr[gpr].loc == MIPSLoc::REG_IMM, "GPR %d not in an imm", gpr);
230
}
231
return mr[gpr].imm;
232
}
233
234
uint64_t IRNativeRegCacheBase::GetGPR2Imm(IRReg base) {
235
return (uint64_t)GetGPRImm(base) | ((uint64_t)GetGPRImm(base + 1) << 32);
236
}
237
238
void IRNativeRegCacheBase::SetGPRImm(IRReg gpr, uint32_t immVal) {
239
_dbg_assert_(IsValidGPR(gpr));
240
if (gpr == MIPS_REG_ZERO && immVal != 0) {
241
ERROR_LOG_REPORT(Log::JIT, "Trying to set immediate %08x to r0", immVal);
242
return;
243
}
244
245
if (mr[gpr].loc == MIPSLoc::REG_IMM && mr[gpr].imm == immVal) {
246
// Already have that value, let's keep it in the reg.
247
return;
248
}
249
250
if (mr[gpr].nReg != -1) {
251
// Zap existing value if cached in a reg.
252
_assert_msg_(mr[gpr].lane == -1, "Should not be a multilane reg");
253
DiscardNativeReg(mr[gpr].nReg);
254
}
255
256
mr[gpr].loc = MIPSLoc::IMM;
257
mr[gpr].imm = immVal;
258
}
259
260
void IRNativeRegCacheBase::SetGPR2Imm(IRReg base, uint64_t immVal) {
261
_dbg_assert_(IsValidGPRNoZero(base));
262
uint32_t imm0 = (uint32_t)(immVal & 0xFFFFFFFF);
263
uint32_t imm1 = (uint32_t)(immVal >> 32);
264
265
if (IsGPRImm(base) && IsGPRImm(base + 1) && GetGPRImm(base) == imm0 && GetGPRImm(base + 1) == imm1) {
266
// Already set to this, don't bother.
267
return;
268
}
269
270
if (mr[base].nReg != -1) {
271
// Zap existing value if cached in a reg.
272
DiscardNativeReg(mr[base].nReg);
273
if (mr[base + 1].nReg != -1)
274
DiscardNativeReg(mr[base + 1].nReg);
275
}
276
277
mr[base].loc = MIPSLoc::IMM;
278
mr[base].imm = imm0;
279
mr[base + 1].loc = MIPSLoc::IMM;
280
mr[base + 1].imm = imm1;
281
}
282
283
void IRNativeRegCacheBase::SpillLockGPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
284
_dbg_assert_(IsValidGPR(r1));
285
_dbg_assert_(r2 == IRREG_INVALID || IsValidGPR(r2));
286
_dbg_assert_(r3 == IRREG_INVALID || IsValidGPR(r3));
287
_dbg_assert_(r4 == IRREG_INVALID || IsValidGPR(r4));
288
SetSpillLockIRIndex(r1, r2, r3, r4, 0, irIndex_);
289
}
290
291
void IRNativeRegCacheBase::SpillLockFPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
292
_dbg_assert_(IsValidFPR(r1));
293
_dbg_assert_(r2 == IRREG_INVALID || IsValidFPR(r2));
294
_dbg_assert_(r3 == IRREG_INVALID || IsValidFPR(r3));
295
_dbg_assert_(r4 == IRREG_INVALID || IsValidFPR(r4));
296
SetSpillLockIRIndex(r1, r2, r3, r4, 32, irIndex_);
297
}
298
299
void IRNativeRegCacheBase::ReleaseSpillLockGPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
300
_dbg_assert_(IsValidGPR(r1));
301
_dbg_assert_(r2 == IRREG_INVALID || IsValidGPR(r2));
302
_dbg_assert_(r3 == IRREG_INVALID || IsValidGPR(r3));
303
_dbg_assert_(r4 == IRREG_INVALID || IsValidGPR(r4));
304
SetSpillLockIRIndex(r1, r2, r3, r4, 0, -1);
305
}
306
307
void IRNativeRegCacheBase::ReleaseSpillLockFPR(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
308
_dbg_assert_(IsValidFPR(r1));
309
_dbg_assert_(r2 == IRREG_INVALID || IsValidFPR(r2));
310
_dbg_assert_(r3 == IRREG_INVALID || IsValidFPR(r3));
311
_dbg_assert_(r4 == IRREG_INVALID || IsValidFPR(r4));
312
SetSpillLockIRIndex(r1, r2, r3, r4, 32, -1);
313
}
314
315
void IRNativeRegCacheBase::SetSpillLockIRIndex(IRReg r1, IRReg r2, IRReg r3, IRReg r4, int offset, int index) {
316
if (!mr[r1 + offset].isStatic)
317
mr[r1 + offset].spillLockIRIndex = index;
318
if (r2 != IRREG_INVALID && !mr[r2 + offset].isStatic)
319
mr[r2 + offset].spillLockIRIndex = index;
320
if (r3 != IRREG_INVALID && !mr[r3 + offset].isStatic)
321
mr[r3 + offset].spillLockIRIndex = index;
322
if (r4 != IRREG_INVALID && !mr[r4 + offset].isStatic)
323
mr[r4 + offset].spillLockIRIndex = index;
324
}
325
326
void IRNativeRegCacheBase::SetSpillLockIRIndex(IRReg r1, int index) {
327
if (!mr[r1].isStatic)
328
mr[r1].spillLockIRIndex = index;
329
}
330
331
void IRNativeRegCacheBase::MarkGPRDirty(IRReg gpr, bool andNormalized32) {
332
_assert_(IsGPRMapped(gpr));
333
if (!IsGPRMapped(gpr))
334
return;
335
336
IRNativeReg nreg = mr[gpr].nReg;
337
nr[nreg].isDirty = true;
338
nr[nreg].normalized32 = andNormalized32;
339
// If reg is written to, pointerification is assumed lost.
340
nr[nreg].pointerified = false;
341
if (mr[gpr].loc == MIPSLoc::REG_AS_PTR || mr[gpr].loc == MIPSLoc::REG_IMM) {
342
mr[gpr].loc = MIPSLoc::REG;
343
mr[gpr].imm = -1;
344
}
345
_dbg_assert_(mr[gpr].loc == MIPSLoc::REG);
346
}
347
348
void IRNativeRegCacheBase::MarkGPRAsPointerDirty(IRReg gpr) {
349
_assert_(IsGPRMappedAsPointer(gpr));
350
if (!IsGPRMappedAsPointer(gpr))
351
return;
352
353
#ifdef MASKED_PSP_MEMORY
354
if (mr[gpr].loc == MIPSLoc::REG_AS_PTR) {
355
_assert_msg_(false, "MarkGPRAsPointerDirty is not possible when using MASKED_PSP_MEMORY");
356
}
357
#endif
358
359
IRNativeReg nreg = mr[gpr].nReg;
360
_dbg_assert_(!nr[nreg].normalized32);
361
nr[nreg].isDirty = true;
362
// Stays pointerified or REG_AS_PTR.
363
}
364
365
IRNativeReg IRNativeRegCacheBase::AllocateReg(MIPSLoc type, MIPSMap flags) {
366
_dbg_assert_(type == MIPSLoc::REG || type == MIPSLoc::FREG || type == MIPSLoc::VREG);
367
368
IRNativeReg nreg = FindFreeReg(type, flags);
369
if (nreg != -1)
370
return nreg;
371
372
// Still nothing. Let's spill a reg and goto 10.
373
bool clobbered;
374
IRNativeReg bestToSpill = FindBestToSpill(type, flags, true, &clobbered);
375
if (bestToSpill == -1) {
376
bestToSpill = FindBestToSpill(type, flags, false, &clobbered);
377
}
378
379
if (bestToSpill != -1) {
380
if (clobbered) {
381
DiscardNativeReg(bestToSpill);
382
} else {
383
FlushNativeReg(bestToSpill);
384
}
385
// Now one must be free.
386
return FindFreeReg(type, flags);
387
}
388
389
// Uh oh, we have all of them spilllocked....
390
ERROR_LOG_REPORT(Log::JIT, "Out of spillable registers in block PC %08x, index %d", irBlock_->GetOriginalStart(), irIndex_);
391
_assert_(bestToSpill != -1);
392
return -1;
393
}
394
395
IRNativeReg IRNativeRegCacheBase::FindFreeReg(MIPSLoc type, MIPSMap flags) const {
396
int allocCount = 0, base = 0;
397
const int *allocOrder = GetAllocationOrder(type, flags, allocCount, base);
398
399
for (int i = 0; i < allocCount; i++) {
400
IRNativeReg nreg = IRNativeReg(allocOrder[i] - base);
401
402
if (nr[nreg].mipsReg == IRREG_INVALID && nr[nreg].tempLockIRIndex < irIndex_) {
403
return nreg;
404
}
405
}
406
407
return -1;
408
}
409
410
bool IRNativeRegCacheBase::IsGPRClobbered(IRReg gpr) const {
411
_dbg_assert_(IsValidGPR(gpr));
412
return IsRegClobbered(MIPSLoc::REG, gpr);
413
}
414
415
bool IRNativeRegCacheBase::IsFPRClobbered(IRReg fpr) const {
416
_dbg_assert_(IsValidFPR(fpr));
417
return IsRegClobbered(MIPSLoc::FREG, fpr + 32);
418
}
419
420
IRUsage IRNativeRegCacheBase::GetNextRegUsage(const IRSituation &info, MIPSLoc type, IRReg r) const {
421
if (type == MIPSLoc::REG)
422
return IRNextGPRUsage(r, info);
423
else if (type == MIPSLoc::FREG || type == MIPSLoc::VREG)
424
return IRNextFPRUsage(r - 32, info);
425
_assert_msg_(false, "Unknown spill allocation type");
426
return IRUsage::UNKNOWN;
427
}
428
429
bool IRNativeRegCacheBase::IsRegClobbered(MIPSLoc type, IRReg r) const {
430
static const int UNUSED_LOOKAHEAD_OPS = 30;
431
432
IRSituation info;
433
info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;
434
// We look starting one ahead, unlike spilling. We want to know if it clobbers later.
435
info.currentIndex = irIndex_ + 1;
436
info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);
437
info.numInstructions = irBlock_->GetNumIRInstructions();
438
439
// Make sure we're on the first one if this is multi-lane.
440
IRReg first = r;
441
if (mr[r].lane != -1)
442
first -= mr[r].lane;
443
444
IRUsage usage = GetNextRegUsage(info, type, first);
445
if (usage == IRUsage::CLOBBERED) {
446
// If multiple mips regs use this native reg (i.e. vector, HI/LO), check each.
447
bool canClobber = true;
448
for (IRReg m = first + 1; mr[m].nReg == mr[first].nReg && m < IRREG_INVALID && canClobber; ++m)
449
canClobber = GetNextRegUsage(info, type, m) == IRUsage::CLOBBERED;
450
451
return canClobber;
452
}
453
return false;
454
}
455
456
bool IRNativeRegCacheBase::IsRegRead(MIPSLoc type, IRReg first) const {
457
static const int UNUSED_LOOKAHEAD_OPS = 30;
458
459
IRSituation info;
460
info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;
461
// We look starting one ahead, unlike spilling.
462
info.currentIndex = irIndex_ + 1;
463
info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);
464
info.numInstructions = irBlock_->GetNumIRInstructions();
465
466
// Note: this intentionally doesn't look at the full reg, only the lane.
467
IRUsage usage = GetNextRegUsage(info, type, first);
468
return usage == IRUsage::READ;
469
}
470
471
IRNativeReg IRNativeRegCacheBase::FindBestToSpill(MIPSLoc type, MIPSMap flags, bool unusedOnly, bool *clobbered) const {
472
int allocCount = 0, base = 0;
473
const int *allocOrder = GetAllocationOrder(type, flags, allocCount, base);
474
475
static const int UNUSED_LOOKAHEAD_OPS = 30;
476
477
IRSituation info;
478
info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;
479
info.currentIndex = irIndex_;
480
info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);
481
info.numInstructions = irBlock_->GetNumIRInstructions();
482
483
*clobbered = false;
484
for (int i = 0; i < allocCount; i++) {
485
IRNativeReg nreg = IRNativeReg(allocOrder[i] - base);
486
if (nr[nreg].mipsReg != IRREG_INVALID && mr[nr[nreg].mipsReg].spillLockIRIndex >= irIndex_)
487
continue;
488
if (nr[nreg].tempLockIRIndex >= irIndex_)
489
continue;
490
491
// As it's in alloc-order, we know it's not static so we don't need to check for that.
492
IRReg mipsReg = nr[nreg].mipsReg;
493
IRUsage usage = GetNextRegUsage(info, type, mipsReg);
494
495
// Awesome, a clobbered reg. Let's use it?
496
if (usage == IRUsage::CLOBBERED) {
497
// If multiple mips regs use this native reg (i.e. vector, HI/LO), check each.
498
// Note: mipsReg points to the lowest numbered IRReg.
499
bool canClobber = true;
500
for (IRReg m = mipsReg + 1; mr[m].nReg == nreg && m < IRREG_INVALID && canClobber; ++m)
501
canClobber = GetNextRegUsage(info, type, m) == IRUsage::CLOBBERED;
502
503
// Okay, if all can be clobbered, we're good to go.
504
if (canClobber) {
505
*clobbered = true;
506
return nreg;
507
}
508
}
509
510
// Not awesome. A used reg. Let's try to avoid spilling.
511
if (!unusedOnly || usage == IRUsage::UNUSED) {
512
// TODO: Use age or something to choose which register to spill?
513
// TODO: Spill dirty regs first? or opposite?
514
*clobbered = mipsReg == MIPS_REG_ZERO;
515
return nreg;
516
}
517
}
518
519
return -1;
520
}
521
522
bool IRNativeRegCacheBase::IsNativeRegCompatible(IRNativeReg nreg, MIPSLoc type, MIPSMap flags, int lanes) {
523
int allocCount = 0, base = 0;
524
const int *allocOrder = GetAllocationOrder(type, flags, allocCount, base);
525
526
for (int i = 0; i < allocCount; i++) {
527
IRNativeReg allocReg = IRNativeReg(allocOrder[i] - base);
528
if (allocReg == nreg)
529
return true;
530
}
531
532
return false;
533
}
534
535
bool IRNativeRegCacheBase::TransferNativeReg(IRNativeReg nreg, IRNativeReg dest, MIPSLoc type, IRReg first, int lanes, MIPSMap flags) {
536
// To be overridden if the backend supports transfers.
537
return false;
538
}
539
540
void IRNativeRegCacheBase::DiscardNativeReg(IRNativeReg nreg) {
541
_assert_msg_(nreg >= 0 && nreg < config_.totalNativeRegs, "DiscardNativeReg on invalid register %d", nreg);
542
if (nr[nreg].mipsReg != IRREG_INVALID) {
543
int8_t lanes = 0;
544
for (IRReg m = nr[nreg].mipsReg; mr[m].nReg == nreg && m < IRREG_INVALID; ++m)
545
lanes++;
546
547
if (mr[nr[nreg].mipsReg].isStatic) {
548
_assert_(nr[nreg].mipsReg != MIPS_REG_ZERO);
549
550
int numStatics;
551
const StaticAllocation *statics = GetStaticAllocations(numStatics);
552
553
// If it's not currently marked as in a reg, throw it away.
554
for (IRReg m = nr[nreg].mipsReg; m < nr[nreg].mipsReg + lanes; ++m) {
555
_assert_msg_(mr[m].isStatic, "Reg in lane %d mismatched static status", m - nr[nreg].mipsReg);
556
for (int i = 0; i < numStatics; i++) {
557
if (m == statics[i].mr)
558
mr[m].loc = statics[i].loc;
559
}
560
}
561
} else {
562
for (IRReg m = nr[nreg].mipsReg; m < nr[nreg].mipsReg + lanes; ++m) {
563
mr[m].loc = MIPSLoc::MEM;
564
mr[m].nReg = -1;
565
mr[m].imm = 0;
566
mr[m].lane = -1;
567
_assert_msg_(!mr[m].isStatic, "Reg in lane %d mismatched static status", m - nr[nreg].mipsReg);
568
}
569
570
nr[nreg].mipsReg = IRREG_INVALID;
571
}
572
}
573
574
// Even for a static reg, we assume this means it's not pointerified anymore.
575
nr[nreg].pointerified = false;
576
nr[nreg].isDirty = false;
577
nr[nreg].normalized32 = false;
578
}
579
580
void IRNativeRegCacheBase::FlushNativeReg(IRNativeReg nreg) {
581
_assert_msg_(nreg >= 0 && nreg < config_.totalNativeRegs, "FlushNativeReg on invalid register %d", nreg);
582
if (nr[nreg].mipsReg == IRREG_INVALID || nr[nreg].mipsReg == MIPS_REG_ZERO) {
583
// Nothing to do, reg not mapped or mapped to fixed zero.
584
_dbg_assert_(!nr[nreg].isDirty);
585
return;
586
}
587
_dbg_assert_(!mr[nr[nreg].mipsReg].isStatic);
588
if (mr[nr[nreg].mipsReg].isStatic) {
589
ERROR_LOG(Log::JIT, "Cannot FlushNativeReg a statically mapped register");
590
return;
591
}
592
593
// Multiple mipsRegs may match this if a vector or HI/LO, etc.
594
bool isDirty = nr[nreg].isDirty;
595
int8_t lanes = 0;
596
for (IRReg m = nr[nreg].mipsReg; mr[m].nReg == nreg && m < IRREG_INVALID; ++m) {
597
_assert_(!mr[m].isStatic);
598
// If we're flushing a native reg, better not be partially in mem or an imm.
599
_assert_(mr[m].loc != MIPSLoc::MEM && mr[m].loc != MIPSLoc::IMM);
600
lanes++;
601
}
602
603
if (isDirty) {
604
IRReg first = nr[nreg].mipsReg;
605
if (mr[first].loc == MIPSLoc::REG_AS_PTR) {
606
// We assume this can't be multiple lanes. Maybe some gather craziness?
607
_assert_(lanes == 1);
608
AdjustNativeRegAsPtr(nreg, false);
609
mr[first].loc = MIPSLoc::REG;
610
}
611
StoreNativeReg(nreg, first, lanes);
612
}
613
614
for (int8_t i = 0; i < lanes; ++i) {
615
auto &mreg = mr[nr[nreg].mipsReg + i];
616
mreg.nReg = -1;
617
// Note that it loses its imm status, because imms are always dirty.
618
mreg.loc = MIPSLoc::MEM;
619
mreg.imm = 0;
620
mreg.lane = -1;
621
}
622
623
nr[nreg].mipsReg = IRREG_INVALID;
624
nr[nreg].isDirty = false;
625
nr[nreg].pointerified = false;
626
nr[nreg].normalized32 = false;
627
}
628
629
void IRNativeRegCacheBase::DiscardReg(IRReg mreg) {
630
if (mr[mreg].isStatic) {
631
DiscardNativeReg(mr[mreg].nReg);
632
return;
633
}
634
switch (mr[mreg].loc) {
635
case MIPSLoc::IMM:
636
if (mreg != MIPS_REG_ZERO) {
637
mr[mreg].loc = MIPSLoc::MEM;
638
mr[mreg].imm = 0;
639
}
640
break;
641
642
case MIPSLoc::REG:
643
case MIPSLoc::REG_AS_PTR:
644
case MIPSLoc::REG_IMM:
645
case MIPSLoc::FREG:
646
case MIPSLoc::VREG:
647
DiscardNativeReg(mr[mreg].nReg);
648
break;
649
650
case MIPSLoc::MEM:
651
// Already discarded.
652
break;
653
}
654
mr[mreg].spillLockIRIndex = -1;
655
}
656
657
void IRNativeRegCacheBase::FlushReg(IRReg mreg) {
658
_assert_msg_(!mr[mreg].isStatic, "Cannot flush static reg %d", mreg);
659
660
switch (mr[mreg].loc) {
661
case MIPSLoc::IMM:
662
// IMM is always "dirty".
663
StoreRegValue(mreg, mr[mreg].imm);
664
mr[mreg].loc = MIPSLoc::MEM;
665
mr[mreg].nReg = -1;
666
mr[mreg].imm = 0;
667
break;
668
669
case MIPSLoc::REG:
670
case MIPSLoc::REG_IMM:
671
case MIPSLoc::REG_AS_PTR:
672
case MIPSLoc::FREG:
673
case MIPSLoc::VREG:
674
// Might be in a native reg with multiple IR regs, flush together.
675
FlushNativeReg(mr[mreg].nReg);
676
break;
677
678
case MIPSLoc::MEM:
679
// Already there, nothing to do.
680
break;
681
}
682
}
683
684
void IRNativeRegCacheBase::FlushAll(bool gprs, bool fprs) {
685
// Note: make sure not to change the registers when flushing.
686
// Branching code may expect the native reg to retain its value.
687
688
if (!mr[MIPS_REG_ZERO].isStatic && mr[MIPS_REG_ZERO].nReg != -1)
689
DiscardNativeReg(mr[MIPS_REG_ZERO].nReg);
690
691
for (int i = 1; i < TOTAL_MAPPABLE_IRREGS; i++) {
692
IRReg mipsReg = (IRReg)i;
693
if (!fprs && i >= 32 && IsValidFPR(mipsReg - 32))
694
continue;
695
if (!gprs && IsValidGPR(mipsReg))
696
continue;
697
698
if (mr[i].isStatic) {
699
IRNativeReg nreg = mr[i].nReg;
700
// Cannot leave any IMMs in registers, not even MIPSLoc::REG_IMM.
701
// Can confuse the regalloc later if this flush is mid-block
702
// due to an interpreter fallback that changes the register.
703
if (mr[i].loc == MIPSLoc::IMM) {
704
SetNativeRegValue(mr[i].nReg, mr[i].imm);
705
_assert_(IsValidGPR(mipsReg));
706
mr[i].loc = MIPSLoc::REG;
707
nr[nreg].pointerified = false;
708
} else if (mr[i].loc == MIPSLoc::REG_IMM) {
709
// The register already contains the immediate.
710
if (nr[nreg].pointerified) {
711
ERROR_LOG(Log::JIT, "RVREG_IMM but pointerified. Wrong.");
712
nr[nreg].pointerified = false;
713
}
714
mr[i].loc = MIPSLoc::REG;
715
} else if (mr[i].loc == MIPSLoc::REG_AS_PTR) {
716
AdjustNativeRegAsPtr(mr[i].nReg, false);
717
mr[i].loc = MIPSLoc::REG;
718
}
719
_assert_(mr[i].nReg != -1);
720
} else if (mr[i].loc != MIPSLoc::MEM) {
721
FlushReg(mipsReg);
722
}
723
}
724
725
int count = 0;
726
const StaticAllocation *allocs = GetStaticAllocations(count);
727
for (int i = 0; i < count; i++) {
728
if (!fprs && allocs[i].loc != MIPSLoc::FREG && allocs[i].loc != MIPSLoc::VREG)
729
continue;
730
if (!gprs && allocs[i].loc != MIPSLoc::REG)
731
continue;
732
if (allocs[i].pointerified && !nr[allocs[i].nr].pointerified && jo_->enablePointerify) {
733
// Re-pointerify
734
if (mr[allocs[i].mr].loc == MIPSLoc::REG_IMM)
735
mr[allocs[i].mr].loc = MIPSLoc::REG;
736
_dbg_assert_(mr[allocs[i].mr].loc == MIPSLoc::REG);
737
AdjustNativeRegAsPtr(allocs[i].nr, true);
738
nr[allocs[i].nr].pointerified = true;
739
} else if (!allocs[i].pointerified) {
740
// If this register got pointerified on the way, mark it as not.
741
// This is so that after save/reload (like in an interpreter fallback),
742
// it won't be regarded as such, as it may no longer be.
743
nr[allocs[i].nr].pointerified = false;
744
}
745
}
746
// Sanity check
747
for (int i = 0; i < config_.totalNativeRegs; i++) {
748
if (nr[i].mipsReg != IRREG_INVALID && !mr[nr[i].mipsReg].isStatic) {
749
ERROR_LOG_REPORT(Log::JIT, "Flush fail: nr[%i].mipsReg=%i", i, nr[i].mipsReg);
750
}
751
}
752
}
753
754
void IRNativeRegCacheBase::Map(const IRInst &inst) {
755
Mapping mapping[3];
756
MappingFromInst(inst, mapping);
757
758
ApplyMapping(mapping, 3);
759
CleanupMapping(mapping, 3);
760
}
761
762
void IRNativeRegCacheBase::MapWithExtra(const IRInst &inst, std::vector<Mapping> extra) {
763
extra.resize(extra.size() + 3);
764
MappingFromInst(inst, &extra[extra.size() - 3]);
765
766
ApplyMapping(extra.data(), (int)extra.size());
767
CleanupMapping(extra.data(), (int)extra.size());
768
}
769
770
IRNativeReg IRNativeRegCacheBase::MapWithTemp(const IRInst &inst, MIPSLoc type) {
771
Mapping mapping[3];
772
MappingFromInst(inst, mapping);
773
774
ApplyMapping(mapping, 3);
775
// Grab a temp while things are spill locked.
776
IRNativeReg temp = AllocateReg(type, MIPSMap::INIT);
777
CleanupMapping(mapping, 3);
778
return temp;
779
}
780
781
void IRNativeRegCacheBase::ApplyMapping(const Mapping *mapping, int count) {
782
for (int i = 0; i < count; ++i) {
783
SetSpillLockIRIndex(mapping[i].reg, irIndex_);
784
if (!config_.mapFPUSIMD && mapping[i].type != 'G') {
785
for (int j = 1; j < mapping[i].lanes; ++j)
786
SetSpillLockIRIndex(mapping[i].reg + j, irIndex_);
787
}
788
}
789
790
auto isNoinit = [](MIPSMap f) {
791
return (f & MIPSMap::NOINIT) == MIPSMap::NOINIT;
792
};
793
794
auto mapRegs = [&](int i) {
795
MIPSLoc type = MIPSLoc::MEM;
796
switch (mapping[i].type) {
797
case 'G': type = MIPSLoc::REG; break;
798
case 'F': type = MIPSLoc::FREG; break;
799
case 'V': type = MIPSLoc::VREG; break;
800
801
case '_':
802
// Ignored intentionally.
803
return;
804
805
default:
806
_assert_msg_(false, "Unexpected type: %c", mapping[i].type);
807
return;
808
}
809
810
bool mapSIMD = config_.mapFPUSIMD || mapping[i].type == 'G';
811
MIPSMap flags = mapping[i].flags;
812
for (int j = 0; j < count; ++j) {
813
if (mapping[j].type == mapping[i].type && mapping[j].reg == mapping[i].reg && i != j) {
814
_assert_msg_(!mapSIMD || mapping[j].lanes == mapping[i].lanes, "Lane aliasing not supported yet");
815
816
if (!isNoinit(mapping[j].flags) && isNoinit(flags)) {
817
flags = (flags & MIPSMap::BACKEND_MASK) | MIPSMap::DIRTY;
818
}
819
}
820
}
821
822
if (mapSIMD) {
823
MapNativeReg(type, mapping[i].reg, mapping[i].lanes, flags);
824
return;
825
}
826
827
for (int j = 0; j < mapping[i].lanes; ++j)
828
MapNativeReg(type, mapping[i].reg + j, 1, flags);
829
};
830
auto mapFilteredRegs = [&](auto pred) {
831
for (int i = 0; i < count; ++i) {
832
if (pred(mapping[i].flags))
833
mapRegs(i);
834
}
835
};
836
837
// Do two passes: with backend special flags, and without.
838
mapFilteredRegs([](MIPSMap flags) {
839
return (flags & MIPSMap::BACKEND_MASK) != MIPSMap::INIT;
840
});
841
mapFilteredRegs([](MIPSMap flags) {
842
return (flags & MIPSMap::BACKEND_MASK) == MIPSMap::INIT;
843
});
844
}
845
846
void IRNativeRegCacheBase::CleanupMapping(const Mapping *mapping, int count) {
847
for (int i = 0; i < count; ++i) {
848
SetSpillLockIRIndex(mapping[i].reg, -1);
849
if (!config_.mapFPUSIMD && mapping[i].type != 'G') {
850
for (int j = 1; j < mapping[i].lanes; ++j)
851
SetSpillLockIRIndex(mapping[i].reg + j, -1);
852
}
853
}
854
855
// Sanity check. If these don't pass, we may have Vec overlap issues or etc.
856
for (int i = 0; i < count; ++i) {
857
if (mapping[i].reg != IRREG_INVALID) {
858
auto &mreg = mr[mapping[i].reg];
859
_dbg_assert_(mreg.nReg != -1);
860
if (mapping[i].type == 'G') {
861
_dbg_assert_(mreg.loc == MIPSLoc::REG || mreg.loc == MIPSLoc::REG_AS_PTR || mreg.loc == MIPSLoc::REG_IMM);
862
} else if (mapping[i].type == 'F') {
863
_dbg_assert_(mreg.loc == MIPSLoc::FREG);
864
} else if (mapping[i].type == 'V') {
865
_dbg_assert_(mreg.loc == MIPSLoc::VREG);
866
}
867
if (mapping[i].lanes != 1 && (config_.mapFPUSIMD || mapping[i].type == 'G')) {
868
_dbg_assert_(mreg.lane == 0);
869
_dbg_assert_(mr[mapping[i].reg + mapping[i].lanes - 1].lane == mapping[i].lanes - 1);
870
_dbg_assert_(mreg.nReg == mr[mapping[i].reg + mapping[i].lanes - 1].nReg);
871
} else {
872
_dbg_assert_(mreg.lane == -1);
873
}
874
}
875
}
876
}
877
878
void IRNativeRegCacheBase::MappingFromInst(const IRInst &inst, Mapping mapping[3]) {
879
mapping[0].reg = inst.dest;
880
mapping[1].reg = inst.src1;
881
mapping[2].reg = inst.src2;
882
883
const IRMeta *m = GetIRMeta(inst.op);
884
for (int i = 0; i < 3; ++i) {
885
switch (m->types[i]) {
886
case 'G':
887
mapping[i].type = 'G';
888
_assert_msg_(IsValidGPR(mapping[i].reg), "G was not valid GPR?");
889
break;
890
891
case 'F':
892
mapping[i].reg += 32;
893
mapping[i].type = 'F';
894
_assert_msg_(IsValidFPR(mapping[i].reg - 32), "F was not valid FPR?");
895
break;
896
897
case 'V':
898
case '2':
899
mapping[i].reg += 32;
900
mapping[i].type = config_.mapUseVRegs ? 'V' : 'F';
901
mapping[i].lanes = m->types[i] == 'V' ? 4 : (m->types[i] == '2' ? 2 : 1);
902
_assert_msg_(IsValidFPR(mapping[i].reg - 32), "%c was not valid FPR?", m->types[i]);
903
break;
904
905
case 'T':
906
mapping[i].type = 'G';
907
_assert_msg_(mapping[i].reg < VFPU_CTRL_MAX, "T was not valid VFPU CTRL?");
908
mapping[i].reg += IRREG_VFPU_CTRL_BASE;
909
break;
910
911
case '\0':
912
case '_':
913
case 'C':
914
case 'r':
915
case 'I':
916
case 'v':
917
case 's':
918
case 'm':
919
mapping[i].type = '_';
920
mapping[i].reg = IRREG_INVALID;
921
mapping[i].lanes = 0;
922
break;
923
924
default:
925
_assert_msg_(mapping[i].reg == IRREG_INVALID, "Unexpected register type %c", m->types[i]);
926
break;
927
}
928
}
929
930
if (mapping[0].type != '_') {
931
if ((m->flags & IRFLAG_SRC3DST) != 0)
932
mapping[0].flags = MIPSMap::DIRTY;
933
else if ((m->flags & IRFLAG_SRC3) != 0)
934
mapping[0].flags = MIPSMap::INIT;
935
else
936
mapping[0].flags = MIPSMap::NOINIT;
937
}
938
}
939
940
IRNativeReg IRNativeRegCacheBase::MapNativeReg(MIPSLoc type, IRReg first, int lanes, MIPSMap flags) {
941
_assert_msg_(first != IRREG_INVALID, "Cannot map invalid register");
942
_assert_msg_(lanes >= 1 && lanes <= 4, "Cannot map %d lanes", lanes);
943
if (first == IRREG_INVALID || lanes < 0)
944
return -1;
945
946
// Let's see if it's already mapped or we need a new reg.
947
IRNativeReg nreg = mr[first].nReg;
948
if (mr[first].isStatic) {
949
_assert_msg_(nreg != -1, "MapIRReg on static without an nReg?");
950
} else {
951
switch (mr[first].loc) {
952
case MIPSLoc::REG_IMM:
953
case MIPSLoc::REG_AS_PTR:
954
case MIPSLoc::REG:
955
if (type != MIPSLoc::REG) {
956
nreg = AllocateReg(type, flags);
957
} else if (!IsNativeRegCompatible(nreg, type, flags, lanes)) {
958
// If it's not compatible, we'll need to reallocate.
959
if (TransferNativeReg(nreg, -1, type, first, lanes, flags)) {
960
nreg = mr[first].nReg;
961
} else {
962
FlushNativeReg(nreg);
963
nreg = AllocateReg(type, flags);
964
}
965
}
966
break;
967
968
case MIPSLoc::FREG:
969
case MIPSLoc::VREG:
970
if (type != mr[first].loc) {
971
nreg = AllocateReg(type, flags);
972
} else if (!IsNativeRegCompatible(nreg, type, flags, lanes)) {
973
if (TransferNativeReg(nreg, -1, type, first, lanes, flags)) {
974
nreg = mr[first].nReg;
975
} else {
976
FlushNativeReg(nreg);
977
nreg = AllocateReg(type, flags);
978
}
979
}
980
break;
981
982
case MIPSLoc::IMM:
983
case MIPSLoc::MEM:
984
nreg = AllocateReg(type, flags);
985
break;
986
}
987
}
988
989
if (nreg != -1) {
990
// This will handle already mapped and new mappings.
991
MapNativeReg(type, nreg, first, lanes, flags);
992
}
993
994
return nreg;
995
}
996
997
void IRNativeRegCacheBase::MapNativeReg(MIPSLoc type, IRNativeReg nreg, IRReg first, int lanes, MIPSMap flags) {
998
// First, try to clean up any lane mismatches.
999
// It must either be in the same nreg and lane count, or not in an nreg.
1000
for (int i = 0; i < lanes; ++i) {
1001
auto &mreg = mr[first + i];
1002
if (mreg.nReg != -1) {
1003
// How many lanes is it currently in?
1004
int oldlanes = 0;
1005
for (IRReg m = nr[mreg.nReg].mipsReg; mr[m].nReg == mreg.nReg && m < IRREG_INVALID; ++m)
1006
oldlanes++;
1007
1008
// We may need to flush if it goes outside or we're initing.
1009
int oldlane = mreg.lane == -1 ? 0 : mreg.lane;
1010
bool mismatch = oldlanes != lanes || oldlane != i;
1011
if (mismatch) {
1012
_assert_msg_(!mreg.isStatic, "Cannot MapNativeReg a static reg mismatch");
1013
if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {
1014
// If we need init, we have to flush mismatches.
1015
if (!TransferNativeReg(mreg.nReg, nreg, type, first, lanes, flags)) {
1016
// TODO: We may also be motivated to have multiple read-only "views" or an IRReg.
1017
// For example Vec4Scale v0..v3, v0..v3, v3
1018
FlushNativeReg(mreg.nReg);
1019
}
1020
// The mismatch has been "resolved" now.
1021
mismatch = false;
1022
} else if (oldlanes != 1) {
1023
// Even if we don't care about the current contents, we can't discard outside.
1024
bool extendsBefore = oldlane > i;
1025
bool extendsAfter = i + oldlanes - oldlane > lanes;
1026
if (extendsBefore || extendsAfter) {
1027
// Usually, this is 4->1. Check for clobber.
1028
bool clobbered = false;
1029
if (lanes == 1) {
1030
IRSituation info;
1031
info.lookaheadCount = 16;
1032
info.currentIndex = irIndex_;
1033
info.instructions = irBlockCache_->GetBlockInstructionPtr(irBlockNum_);
1034
info.numInstructions = irBlock_->GetNumIRInstructions();
1035
1036
IRReg basefpr = first - oldlane - 32;
1037
clobbered = true;
1038
for (int l = 0; l < oldlanes; ++l) {
1039
// Ignore the one we're modifying.
1040
if (l == oldlane)
1041
continue;
1042
1043
if (IRNextFPRUsage(basefpr + l, info) != IRUsage::CLOBBERED) {
1044
clobbered = false;
1045
break;
1046
}
1047
}
1048
}
1049
1050
if (clobbered)
1051
DiscardNativeReg(mreg.nReg);
1052
else
1053
FlushNativeReg(mreg.nReg);
1054
1055
// That took care of the mismatch, either by clobber or flush.
1056
mismatch = false;
1057
}
1058
}
1059
}
1060
1061
// If it's still in a different reg, either discard or possibly transfer.
1062
if (mreg.nReg != -1 && (mreg.nReg != nreg || mismatch)) {
1063
_assert_msg_(!mreg.isStatic, "Cannot MapNativeReg a static reg to a new reg");
1064
if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {
1065
// We better not be trying to map to a different nreg if it's in one now.
1066
// This might happen on some sort of transfer...
1067
if (!TransferNativeReg(mreg.nReg, nreg, type, first, lanes, flags))
1068
FlushNativeReg(mreg.nReg);
1069
} else {
1070
DiscardNativeReg(mreg.nReg);
1071
}
1072
}
1073
}
1074
1075
// If somehow this is an imm and mapping to a multilane native reg (HI/LO?), we store it.
1076
// TODO: Could check the others are imm and be smarter, but seems an unlikely case.
1077
if (mreg.loc == MIPSLoc::IMM && lanes > 1) {
1078
if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT)
1079
StoreRegValue(first + i, mreg.imm);
1080
mreg.loc = MIPSLoc::MEM;
1081
if (!mreg.isStatic)
1082
mreg.nReg = -1;
1083
mreg.imm = 0;
1084
}
1085
}
1086
1087
// Double check: everything should be in the same loc for multilane now.
1088
for (int i = 1; i < lanes; ++i) {
1089
_assert_(mr[first + i].loc == mr[first].loc);
1090
}
1091
1092
bool markDirty = (flags & MIPSMap::DIRTY) == MIPSMap::DIRTY;
1093
if (mr[first].nReg != nreg) {
1094
nr[nreg].isDirty = markDirty;
1095
nr[nreg].pointerified = false;
1096
nr[nreg].normalized32 = false;
1097
}
1098
1099
// Alright, now to actually map.
1100
if ((flags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {
1101
if (first == MIPS_REG_ZERO) {
1102
_assert_msg_(lanes == 1, "Cannot use MIPS_REG_ZERO in multilane");
1103
SetNativeRegValue(nreg, 0);
1104
mr[first].loc = MIPSLoc::REG_IMM;
1105
mr[first].imm = 0;
1106
} else {
1107
// Note: we checked above, everything is in the same loc if multilane.
1108
switch (mr[first].loc) {
1109
case MIPSLoc::IMM:
1110
_assert_msg_(lanes == 1, "Not handling multilane imm here");
1111
SetNativeRegValue(nreg, mr[first].imm);
1112
// IMM is always dirty unless static.
1113
if (!mr[first].isStatic)
1114
nr[nreg].isDirty = true;
1115
1116
// If we are mapping dirty, it means we're gonna overwrite.
1117
// So the imm value is no longer valid.
1118
if ((flags & MIPSMap::DIRTY) == MIPSMap::DIRTY)
1119
mr[first].loc = MIPSLoc::REG;
1120
else
1121
mr[first].loc = MIPSLoc::REG_IMM;
1122
break;
1123
1124
case MIPSLoc::REG_IMM:
1125
// If it's not dirty, we can keep it.
1126
_assert_msg_(type == MIPSLoc::REG, "Should have flushed this reg already");
1127
if ((flags & MIPSMap::DIRTY) == MIPSMap::DIRTY || lanes != 1)
1128
mr[first].loc = MIPSLoc::REG;
1129
for (int i = 1; i < lanes; ++i)
1130
mr[first + i].loc = type;
1131
break;
1132
1133
case MIPSLoc::REG_AS_PTR:
1134
_assert_msg_(lanes == 1, "Should have flushed before getting here");
1135
_assert_msg_(type == MIPSLoc::REG, "Should have flushed this reg already");
1136
#ifndef MASKED_PSP_MEMORY
1137
AdjustNativeRegAsPtr(nreg, false);
1138
#endif
1139
for (int i = 0; i < lanes; ++i)
1140
mr[first + i].loc = type;
1141
#ifdef MASKED_PSP_MEMORY
1142
LoadNativeReg(nreg, first, lanes);
1143
#endif
1144
break;
1145
1146
case MIPSLoc::REG:
1147
case MIPSLoc::FREG:
1148
case MIPSLoc::VREG:
1149
// Might be flipping from FREG -> VREG or something.
1150
_assert_msg_(type == mr[first].loc, "Should have flushed this reg already");
1151
for (int i = 0; i < lanes; ++i)
1152
mr[first + i].loc = type;
1153
break;
1154
1155
case MIPSLoc::MEM:
1156
for (int i = 0; i < lanes; ++i)
1157
mr[first + i].loc = type;
1158
LoadNativeReg(nreg, first, lanes);
1159
break;
1160
}
1161
}
1162
} else {
1163
for (int i = 0; i < lanes; ++i)
1164
mr[first + i].loc = type;
1165
}
1166
1167
for (int i = 0; i < lanes; ++i) {
1168
mr[first + i].nReg = nreg;
1169
mr[first + i].lane = lanes == 1 ? -1 : i;
1170
}
1171
1172
nr[nreg].mipsReg = first;
1173
1174
if (markDirty) {
1175
nr[nreg].isDirty = true;
1176
nr[nreg].pointerified = false;
1177
nr[nreg].normalized32 = false;
1178
_assert_(first != MIPS_REG_ZERO);
1179
}
1180
}
1181
1182
IRNativeReg IRNativeRegCacheBase::MapNativeRegAsPointer(IRReg gpr) {
1183
_dbg_assert_(IsValidGPRNoZero(gpr));
1184
1185
// Already mapped.
1186
if (mr[gpr].loc == MIPSLoc::REG_AS_PTR) {
1187
return mr[gpr].nReg;
1188
}
1189
1190
// Cannot use if somehow multilane.
1191
if (mr[gpr].nReg != -1 && mr[gpr].lane != -1) {
1192
FlushNativeReg(mr[gpr].nReg);
1193
}
1194
1195
IRNativeReg nreg = mr[gpr].nReg;
1196
if (mr[gpr].loc != MIPSLoc::REG && mr[gpr].loc != MIPSLoc::REG_IMM) {
1197
nreg = MapNativeReg(MIPSLoc::REG, gpr, 1, MIPSMap::INIT);
1198
}
1199
1200
if (mr[gpr].loc == MIPSLoc::REG || mr[gpr].loc == MIPSLoc::REG_IMM) {
1201
// If there was an imm attached, discard it.
1202
mr[gpr].loc = MIPSLoc::REG;
1203
mr[gpr].imm = 0;
1204
1205
#ifdef MASKED_PSP_MEMORY
1206
if (nr[mr[gpr].nReg].isDirty) {
1207
StoreNativeReg(mr[gpr].nReg, gpr, 1);
1208
nr[mr[gpr].nReg].isDirty = false;
1209
}
1210
#endif
1211
1212
if (!jo_->enablePointerify) {
1213
AdjustNativeRegAsPtr(nreg, true);
1214
mr[gpr].loc = MIPSLoc::REG_AS_PTR;
1215
} else if (!nr[nreg].pointerified) {
1216
AdjustNativeRegAsPtr(nreg, true);
1217
nr[nreg].pointerified = true;
1218
}
1219
} else {
1220
ERROR_LOG(Log::JIT, "MapNativeRegAsPointer: MapNativeReg failed to allocate a register?");
1221
}
1222
return nreg;
1223
}
1224
1225
void IRNativeRegCacheBase::AdjustNativeRegAsPtr(IRNativeReg nreg, bool state) {
1226
// This isn't necessary to implement if REG_AS_PTR is unsupported entirely.
1227
_assert_msg_(false, "AdjustNativeRegAsPtr unimplemented");
1228
}
1229
1230
int IRNativeRegCacheBase::GetMipsRegOffset(IRReg r) {
1231
_dbg_assert_(IsValidGPR(r) || (r >= 32 && IsValidFPR(r - 32)));
1232
return r * 4;
1233
}
1234
1235
bool IRNativeRegCacheBase::IsValidGPR(IRReg r) const {
1236
// See MIPSState for these offsets.
1237
1238
// Don't allow FPU regs, VFPU regs, or VFPU temps here.
1239
if (r >= 32 && IsValidFPR(r - 32))
1240
return false;
1241
// Don't allow nextPC, etc. since it's probably a mistake.
1242
if (r > IRREG_FPCOND && r != IRREG_LLBIT)
1243
return false;
1244
// Don't allow PC either.
1245
if (r == 241)
1246
return false;
1247
1248
return true;
1249
}
1250
1251
bool IRNativeRegCacheBase::IsValidGPRNoZero(IRReg r) const {
1252
return IsValidGPR(r) && r != MIPS_REG_ZERO;
1253
}
1254
1255
bool IRNativeRegCacheBase::IsValidFPR(IRReg r) const {
1256
// FPR parameters are off by 32 within the MIPSState object.
1257
if (r >= TOTAL_MAPPABLE_IRREGS - 32)
1258
return false;
1259
1260
// See MIPSState for these offsets.
1261
int index = r + 32;
1262
1263
// Allow FPU or VFPU regs here.
1264
if (index >= 32 && index < 32 + 32 + 128)
1265
return true;
1266
// Also allow VFPU temps.
1267
if (index >= 224 && index < 224 + 16)
1268
return true;
1269
1270
// Nothing else is allowed for the FPU side.
1271
return false;
1272
}
1273
1274