Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/Debugger/MemBlockInfo.cpp
5663 views
1
// Copyright (c) 2021- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <atomic>
20
#include <cstring>
21
#include <mutex>
22
#include <condition_variable>
23
#include <thread>
24
25
#include "Common/Log.h"
26
#include "Common/Serialize/Serializer.h"
27
#include "Common/Serialize/SerializeFuncs.h"
28
#include "Common/Thread/ThreadUtil.h"
29
#include "Core/Config.h"
30
#include "Core/CoreTiming.h"
31
#include "Core/Debugger/Breakpoints.h"
32
#include "Core/Debugger/MemBlockInfo.h"
33
#include "Core/MIPS/MIPS.h"
34
#include "Common/StringUtils.h"
35
36
class MemSlabMap {
37
public:
38
MemSlabMap();
39
~MemSlabMap();
40
41
bool Mark(uint32_t addr, uint32_t size, uint64_t ticks, uint32_t pc, bool allocated, const char *tag);
42
bool Find(MemBlockFlags flags, uint32_t addr, uint32_t size, std::vector<MemBlockInfo> &results);
43
// Note that the returned pointer gets invalidated as soon as Mark is called.
44
const char *FastFindWriteTag(MemBlockFlags flags, uint32_t addr, uint32_t size);
45
void Reset();
46
void DoState(PointerWrap &p);
47
48
private:
49
struct Slab {
50
uint32_t start = 0;
51
uint32_t end = 0;
52
uint64_t ticks = 0;
53
uint32_t pc = 0;
54
bool allocated = false;
55
// Intentionally not save stated.
56
bool bulkStorage = false;
57
char tag[128]{};
58
Slab *prev = nullptr;
59
Slab *next = nullptr;
60
61
void DoState(PointerWrap &p);
62
};
63
64
static constexpr uint32_t MAX_SIZE = 0x40000000;
65
static constexpr uint32_t SLICES = 65536;
66
static constexpr uint32_t SLICE_SIZE = MAX_SIZE / SLICES;
67
68
Slab *FindSlab(uint32_t addr);
69
void Clear();
70
// Returns the new slab after size.
71
Slab *Split(Slab *slab, uint32_t size);
72
void MergeAdjacent(Slab *slab);
73
static inline bool Same(const Slab *a, const Slab *b);
74
void Merge(Slab *a, Slab *b);
75
void FillHeads(Slab *slab);
76
77
Slab *first_ = nullptr;
78
Slab *lastFind_ = nullptr;
79
std::vector<Slab *> heads_;
80
Slab *bulkStorage_ = nullptr;
81
};
82
83
struct PendingNotifyMem {
84
MemBlockFlags flags;
85
uint32_t start;
86
uint32_t size;
87
uint32_t copySrc;
88
uint64_t ticks;
89
uint32_t pc;
90
char tag[128];
91
};
92
93
// 160 KB.
94
static constexpr size_t MAX_PENDING_NOTIFIES = 1024;
95
static constexpr size_t MAX_PENDING_NOTIFIES_THREAD = 1000;
96
static MemSlabMap allocMap;
97
static MemSlabMap suballocMap;
98
static MemSlabMap writeMap;
99
static MemSlabMap textureMap;
100
static std::vector<PendingNotifyMem> pendingNotifies;
101
static std::atomic<uint32_t> pendingNotifyMinAddr1;
102
static std::atomic<uint32_t> pendingNotifyMaxAddr1;
103
static std::atomic<uint32_t> pendingNotifyMinAddr2;
104
static std::atomic<uint32_t> pendingNotifyMaxAddr2;
105
// To prevent deadlocks, acquire Read before Write if you're going to acquire both.
106
static std::mutex pendingWriteMutex;
107
static std::mutex pendingReadMutex;
108
static int detailedOverride;
109
110
static std::thread flushThread;
111
static std::atomic<bool> flushThreadRunning;
112
static std::atomic<bool> flushThreadPending;
113
static std::mutex flushLock;
114
static std::condition_variable flushCond;
115
116
MemSlabMap::MemSlabMap() {
117
Reset();
118
}
119
120
MemSlabMap::~MemSlabMap() {
121
Clear();
122
}
123
124
bool MemSlabMap::Mark(uint32_t addr, uint32_t size, uint64_t ticks, uint32_t pc, bool allocated, const char *tag) {
125
uint32_t end = addr + size;
126
Slab *slab = FindSlab(addr);
127
Slab *firstMatch = nullptr;
128
while (slab != nullptr && slab->start < end) {
129
if (slab->start < addr)
130
slab = Split(slab, addr - slab->start);
131
// Don't replace slab, the return is the after part.
132
if (slab->end > end) {
133
Split(slab, end - slab->start);
134
}
135
136
slab->allocated = allocated;
137
if (pc != 0) {
138
slab->ticks = ticks;
139
slab->pc = pc;
140
}
141
if (tag)
142
truncate_cpy(slab->tag, tag);
143
144
// Move on to the next one.
145
if (firstMatch == nullptr)
146
firstMatch = slab;
147
slab = slab->next;
148
}
149
150
if (firstMatch != nullptr) {
151
// This will merge all those blocks to one.
152
MergeAdjacent(firstMatch);
153
return true;
154
}
155
return false;
156
}
157
158
bool MemSlabMap::Find(MemBlockFlags flags, uint32_t addr, uint32_t size, std::vector<MemBlockInfo> &results) {
159
uint32_t end = addr + size;
160
Slab *slab = FindSlab(addr);
161
bool found = false;
162
while (slab != nullptr && slab->start < end) {
163
if (slab->pc != 0 || slab->tag[0] != '\0') {
164
results.push_back({ flags, slab->start, slab->end - slab->start, slab->ticks, slab->pc, slab->tag, slab->allocated });
165
found = true;
166
}
167
slab = slab->next;
168
}
169
return found;
170
}
171
172
const char *MemSlabMap::FastFindWriteTag(MemBlockFlags flags, uint32_t addr, uint32_t size) {
173
uint32_t end = addr + size;
174
Slab *slab = FindSlab(addr);
175
while (slab != nullptr && slab->start < end) {
176
if (slab->pc != 0 || slab->tag[0] != '\0') {
177
return slab->tag;
178
}
179
slab = slab->next;
180
}
181
return nullptr;
182
}
183
184
void MemSlabMap::Reset() {
185
Clear();
186
187
first_ = new Slab();
188
first_->end = MAX_SIZE;
189
lastFind_ = first_;
190
191
heads_.resize(SLICES, first_);
192
}
193
194
void MemSlabMap::DoState(PointerWrap &p) {
195
auto s = p.Section("MemSlabMap", 1);
196
if (!s)
197
return;
198
199
int count = 0;
200
if (p.mode == p.MODE_READ) {
201
// Since heads_ is a static size, let's avoid clearing it.
202
// This helps in case a debugger call happens concurrently.
203
Slab *old = first_;
204
Slab *oldBulk = bulkStorage_;
205
Do(p, count);
206
207
first_ = new Slab();
208
first_->DoState(p);
209
lastFind_ = first_;
210
--count;
211
212
FillHeads(first_);
213
214
bulkStorage_ = new Slab[count];
215
216
Slab *slab = first_;
217
for (int i = 0; i < count; ++i) {
218
slab->next = &bulkStorage_[i];
219
slab->next->bulkStorage = true;
220
slab->next->DoState(p);
221
222
slab->next->prev = slab;
223
slab = slab->next;
224
225
FillHeads(slab);
226
}
227
228
// Now that it's entirely disconnected, delete the old slabs.
229
while (old != nullptr) {
230
Slab *next = old->next;
231
if (!old->bulkStorage)
232
delete old;
233
old = next;
234
}
235
delete [] oldBulk;
236
} else {
237
for (Slab *slab = first_; slab != nullptr; slab = slab->next)
238
++count;
239
Do(p, count);
240
241
first_->DoState(p);
242
--count;
243
244
Slab *slab = first_;
245
for (int i = 0; i < count; ++i) {
246
if (slab->next) {
247
slab->next->DoState(p);
248
slab = slab->next;
249
}
250
}
251
}
252
}
253
254
void MemSlabMap::Slab::DoState(PointerWrap &p) {
255
auto s = p.Section("MemSlabMapSlab", 1, 3);
256
if (!s)
257
return;
258
259
Do(p, start);
260
Do(p, end);
261
Do(p, ticks);
262
Do(p, pc);
263
Do(p, allocated);
264
if (s >= 3) {
265
Do(p, tag);
266
} else if (s >= 2) {
267
char shortTag[32];
268
Do(p, shortTag);
269
memcpy(tag, shortTag, sizeof(shortTag));
270
} else {
271
std::string stringTag;
272
Do(p, stringTag);
273
truncate_cpy(tag, stringTag);
274
}
275
}
276
277
void MemSlabMap::Clear() {
278
Slab *s = first_;
279
while (s != nullptr) {
280
Slab *next = s->next;
281
if (!s->bulkStorage)
282
delete s;
283
s = next;
284
}
285
delete [] bulkStorage_;
286
bulkStorage_ = nullptr;
287
first_ = nullptr;
288
lastFind_ = nullptr;
289
heads_.clear();
290
}
291
292
MemSlabMap::Slab *MemSlabMap::FindSlab(uint32_t addr) {
293
// Jump ahead using our index.
294
Slab *slab = heads_[addr / SLICE_SIZE];
295
// We often move forward, so check the last find.
296
if (lastFind_->start > slab->start && lastFind_->start <= addr)
297
slab = lastFind_;
298
299
while (slab != nullptr && slab->start <= addr) {
300
if (slab->end > addr) {
301
lastFind_ = slab;
302
return slab;
303
}
304
slab = slab->next;
305
}
306
return nullptr;
307
}
308
309
MemSlabMap::Slab *MemSlabMap::Split(Slab *slab, uint32_t size) {
310
Slab *next = new Slab();
311
next->start = slab->start + size;
312
next->end = slab->end;
313
next->ticks = slab->ticks;
314
next->pc = slab->pc;
315
next->allocated = slab->allocated;
316
truncate_cpy(next->tag, slab->tag);
317
next->prev = slab;
318
next->next = slab->next;
319
320
slab->next = next;
321
if (next->next)
322
next->next->prev = next;
323
324
// If the split is big, we might have to update our index.
325
FillHeads(next);
326
327
slab->end = slab->start + size;
328
return next;
329
}
330
331
bool MemSlabMap::Same(const Slab *a, const Slab *b) {
332
if (a->allocated != b->allocated)
333
return false;
334
if (a->pc != b->pc)
335
return false;
336
if (strcmp(a->tag, b->tag))
337
return false;
338
return true;
339
}
340
341
void MemSlabMap::MergeAdjacent(Slab *slab) {
342
while (slab->next != nullptr && Same(slab, slab->next)) {
343
Merge(slab, slab->next);
344
}
345
while (slab->prev != nullptr && Same(slab, slab->prev)) {
346
Merge(slab, slab->prev);
347
}
348
}
349
350
void MemSlabMap::Merge(Slab *a, Slab *b) {
351
if (a->next == b) {
352
_assert_(a->end == b->start);
353
a->end = b->end;
354
a->next = b->next;
355
356
if (a->next)
357
a->next->prev = a;
358
} else if (a->prev == b) {
359
_assert_(b->end == a->start);
360
a->start = b->start;
361
a->prev = b->prev;
362
363
if (a->prev)
364
a->prev->next = a;
365
else if (first_ == b)
366
first_ = a;
367
} else {
368
_assert_(false);
369
}
370
// Take over index entries b had.
371
FillHeads(a);
372
if (b->ticks > a->ticks) {
373
a->ticks = b->ticks;
374
// In case we ignore PC for same.
375
a->pc = b->pc;
376
}
377
if (lastFind_ == b)
378
lastFind_ = a;
379
if (!b->bulkStorage)
380
delete b;
381
}
382
383
void MemSlabMap::FillHeads(Slab *slab) {
384
uint32_t slice = slab->start / SLICE_SIZE;
385
uint32_t endSlice = (slab->end - 1) / SLICE_SIZE;
386
387
// For the first slice, only replace if it's the one we're removing.
388
if (slab->start == slice * SLICE_SIZE) {
389
heads_[slice] = slab;
390
}
391
392
// Now replace all the rest - we definitely cover the start of them.
393
Slab **next = &heads_[slice + 1];
394
// We want to set slice + 1 through endSlice, inclusive.
395
size_t c = endSlice - slice;
396
for (size_t i = 0; i < c; ++i) {
397
next[i] = slab;
398
}
399
}
400
401
size_t FormatMemWriteTagAtNoFlush(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size);
402
403
void FlushPendingMemInfo() {
404
// This lock prevents us from another thread reading while we're busy flushing.
405
std::lock_guard<std::mutex> guard(pendingReadMutex);
406
std::vector<PendingNotifyMem> thisBatch;
407
{
408
std::lock_guard<std::mutex> guard(pendingWriteMutex);
409
thisBatch = std::move(pendingNotifies);
410
pendingNotifies.clear();
411
pendingNotifies.reserve(MAX_PENDING_NOTIFIES);
412
413
pendingNotifyMinAddr1 = 0xFFFFFFFF;
414
pendingNotifyMaxAddr1 = 0;
415
pendingNotifyMinAddr2 = 0xFFFFFFFF;
416
pendingNotifyMaxAddr2 = 0;
417
}
418
419
for (const auto &info : thisBatch) {
420
if (info.copySrc != 0) {
421
char tagData[128];
422
size_t tagSize = FormatMemWriteTagAtNoFlush(tagData, sizeof(tagData), info.tag, info.copySrc, info.size);
423
writeMap.Mark(info.start, info.size, info.ticks, info.pc, true, tagData);
424
continue;
425
}
426
427
if (info.flags & MemBlockFlags::ALLOC) {
428
allocMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
429
} else if (info.flags & MemBlockFlags::FREE) {
430
// Maintain the previous allocation tag for debugging.
431
allocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);
432
suballocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);
433
}
434
if (info.flags & MemBlockFlags::SUB_ALLOC) {
435
suballocMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
436
} else if (info.flags & MemBlockFlags::SUB_FREE) {
437
// Maintain the previous allocation tag for debugging.
438
suballocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);
439
}
440
if (info.flags & MemBlockFlags::TEXTURE) {
441
textureMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
442
}
443
if (info.flags & MemBlockFlags::WRITE) {
444
writeMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
445
}
446
}
447
}
448
449
static inline uint32_t NormalizeAddress(uint32_t addr) {
450
if ((addr & 0x3F000000) == 0x04000000)
451
return addr & 0x041FFFFF;
452
return addr & 0x3FFFFFFF;
453
}
454
455
static inline bool MergeRecentMemInfo(const PendingNotifyMem &info, size_t copyLength) {
456
if (pendingNotifies.size() < 4)
457
return false;
458
459
for (size_t i = 1; i <= 4; ++i) {
460
auto &prev = pendingNotifies[pendingNotifies.size() - i];
461
if (prev.copySrc != 0)
462
return false;
463
464
if (prev.flags != info.flags)
465
continue;
466
467
if (prev.start >= info.start + info.size || prev.start + prev.size <= info.start)
468
continue;
469
470
// This means there's overlap, but not a match, so we can't combine any.
471
if (prev.start != info.start || prev.size > info.size)
472
return false;
473
474
memcpy(prev.tag, info.tag, copyLength + 1);
475
prev.size = info.size;
476
prev.ticks = info.ticks;
477
prev.pc = info.pc;
478
return true;
479
}
480
481
return false;
482
}
483
484
void NotifyMemInfoPC(MemBlockFlags flags, uint32_t start, uint32_t size, uint32_t pc, const char *tagStr, size_t strLength) {
485
if (size == 0) {
486
return;
487
}
488
// Clear the uncached and kernel bits.
489
start = NormalizeAddress(start);
490
491
bool needFlush = false;
492
// When the setting is off, we skip smaller info to keep things fast.
493
if (MemBlockInfoDetailed(size) && flags != MemBlockFlags::READ) {
494
PendingNotifyMem info{ flags, start, size };
495
info.ticks = CoreTiming::GetTicks();
496
info.pc = pc;
497
498
size_t copyLength = strLength;
499
if (copyLength >= sizeof(info.tag)) {
500
copyLength = sizeof(info.tag) - 1;
501
}
502
memcpy(info.tag, tagStr, copyLength);
503
info.tag[copyLength] = 0;
504
505
std::lock_guard<std::mutex> guard(pendingWriteMutex);
506
// Sometimes we get duplicates, quickly check.
507
if (!MergeRecentMemInfo(info, copyLength)) {
508
if (start < 0x08000000) {
509
pendingNotifyMinAddr1 = std::min(pendingNotifyMinAddr1.load(), start);
510
pendingNotifyMaxAddr1 = std::max(pendingNotifyMaxAddr1.load(), start + size);
511
} else {
512
pendingNotifyMinAddr2 = std::min(pendingNotifyMinAddr2.load(), start);
513
pendingNotifyMaxAddr2 = std::max(pendingNotifyMaxAddr2.load(), start + size);
514
}
515
pendingNotifies.push_back(info);
516
}
517
needFlush = pendingNotifies.size() > MAX_PENDING_NOTIFIES_THREAD;
518
}
519
520
if (needFlush) {
521
{
522
std::lock_guard<std::mutex> guard(flushLock);
523
flushThreadPending = true;
524
}
525
flushCond.notify_one();
526
}
527
528
if (!(flags & MemBlockFlags::SKIP_MEMCHECK)) {
529
if (flags & MemBlockFlags::WRITE) {
530
g_breakpoints.ExecMemCheck(start, true, size, pc, tagStr);
531
} else if (flags & MemBlockFlags::READ) {
532
g_breakpoints.ExecMemCheck(start, false, size, pc, tagStr);
533
}
534
}
535
}
536
537
void NotifyMemInfo(MemBlockFlags flags, uint32_t start, uint32_t size, const char *str, size_t strLength) {
538
NotifyMemInfoPC(flags, start, size, currentMIPS->pc, str, strLength);
539
}
540
541
void NotifyMemInfoCopy(uint32_t destPtr, uint32_t srcPtr, uint32_t size, const char *prefix) {
542
if (size == 0)
543
return;
544
545
bool needsFlush = false;
546
if (g_breakpoints.HasMemChecks()) {
547
// This will cause a flush, but it's needed to trigger memchecks with proper data.
548
char tagData[128];
549
size_t tagSize = FormatMemWriteTagAt(tagData, sizeof(tagData), prefix, srcPtr, size);
550
NotifyMemInfo(MemBlockFlags::READ, srcPtr, size, tagData, tagSize);
551
NotifyMemInfo(MemBlockFlags::WRITE, destPtr, size, tagData, tagSize);
552
} else if (MemBlockInfoDetailed(size)) {
553
srcPtr = NormalizeAddress(srcPtr);
554
destPtr = NormalizeAddress(destPtr);
555
556
PendingNotifyMem info{ MemBlockFlags::WRITE, destPtr, size };
557
info.copySrc = srcPtr;
558
info.ticks = CoreTiming::GetTicks();
559
info.pc = currentMIPS->pc;
560
561
// Store the prefix for now. The correct tag will be calculated on flush.
562
truncate_cpy(info.tag, prefix);
563
564
std::lock_guard<std::mutex> guard(pendingWriteMutex);
565
if (destPtr < 0x08000000) {
566
pendingNotifyMinAddr1 = std::min(pendingNotifyMinAddr1.load(), destPtr);
567
pendingNotifyMaxAddr1 = std::max(pendingNotifyMaxAddr1.load(), destPtr + size);
568
} else {
569
pendingNotifyMinAddr2 = std::min(pendingNotifyMinAddr2.load(), destPtr);
570
pendingNotifyMaxAddr2 = std::max(pendingNotifyMaxAddr2.load(), destPtr + size);
571
}
572
pendingNotifies.push_back(info);
573
needsFlush = pendingNotifies.size() > MAX_PENDING_NOTIFIES_THREAD;
574
}
575
576
if (needsFlush) {
577
{
578
std::lock_guard<std::mutex> guard(flushLock);
579
flushThreadPending = true;
580
}
581
flushCond.notify_one();
582
}
583
}
584
585
std::vector<MemBlockInfo> FindMemInfo(uint32_t start, uint32_t size) {
586
start = NormalizeAddress(start);
587
588
if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)
589
FlushPendingMemInfo();
590
if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)
591
FlushPendingMemInfo();
592
593
std::vector<MemBlockInfo> results;
594
allocMap.Find(MemBlockFlags::ALLOC, start, size, results);
595
suballocMap.Find(MemBlockFlags::SUB_ALLOC, start, size, results);
596
writeMap.Find(MemBlockFlags::WRITE, start, size, results);
597
textureMap.Find(MemBlockFlags::TEXTURE, start, size, results);
598
return results;
599
}
600
601
std::vector<MemBlockInfo> FindMemInfoByFlag(MemBlockFlags flags, uint32_t start, uint32_t size) {
602
start = NormalizeAddress(start);
603
604
if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)
605
FlushPendingMemInfo();
606
if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)
607
FlushPendingMemInfo();
608
609
std::vector<MemBlockInfo> results;
610
if (flags & MemBlockFlags::ALLOC)
611
allocMap.Find(MemBlockFlags::ALLOC, start, size, results);
612
if (flags & MemBlockFlags::SUB_ALLOC)
613
suballocMap.Find(MemBlockFlags::SUB_ALLOC, start, size, results);
614
if (flags & MemBlockFlags::WRITE)
615
writeMap.Find(MemBlockFlags::WRITE, start, size, results);
616
if (flags & MemBlockFlags::TEXTURE)
617
textureMap.Find(MemBlockFlags::TEXTURE, start, size, results);
618
return results;
619
}
620
621
static const char *FindWriteTagByFlag(MemBlockFlags flags, uint32_t start, uint32_t size, bool flush = true) {
622
start = NormalizeAddress(start);
623
624
if (flush) {
625
if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)
626
FlushPendingMemInfo();
627
if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)
628
FlushPendingMemInfo();
629
}
630
631
if (flags & MemBlockFlags::ALLOC) {
632
const char *tag = allocMap.FastFindWriteTag(MemBlockFlags::ALLOC, start, size);
633
if (tag)
634
return tag;
635
}
636
if (flags & MemBlockFlags::SUB_ALLOC) {
637
const char *tag = suballocMap.FastFindWriteTag(MemBlockFlags::SUB_ALLOC, start, size);
638
if (tag)
639
return tag;
640
}
641
if (flags & MemBlockFlags::WRITE) {
642
const char *tag = writeMap.FastFindWriteTag(MemBlockFlags::WRITE, start, size);
643
if (tag)
644
return tag;
645
}
646
if (flags & MemBlockFlags::TEXTURE) {
647
const char *tag = textureMap.FastFindWriteTag(MemBlockFlags::TEXTURE, start, size);
648
if (tag)
649
return tag;
650
}
651
return nullptr;
652
}
653
654
size_t FormatMemWriteTagAt(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size) {
655
const char *tag = FindWriteTagByFlag(MemBlockFlags::WRITE, start, size);
656
if (tag && strcmp(tag, "MemInit") != 0) {
657
return snprintf(buf, sz, "%s%s", prefix, tag);
658
}
659
// Fall back to alloc and texture, especially for VRAM. We prefer write above.
660
tag = FindWriteTagByFlag(MemBlockFlags::ALLOC | MemBlockFlags::TEXTURE, start, size);
661
if (tag) {
662
return snprintf(buf, sz, "%s%s", prefix, tag);
663
}
664
return snprintf(buf, sz, "%s%08x_size_%08x", prefix, start, size);
665
}
666
667
size_t FormatMemWriteTagAtNoFlush(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size) {
668
const char *tag = FindWriteTagByFlag(MemBlockFlags::WRITE, start, size, false);
669
if (tag && strcmp(tag, "MemInit") != 0) {
670
return snprintf(buf, sz, "%s%s", prefix, tag);
671
}
672
// Fall back to alloc and texture, especially for VRAM. We prefer write above.
673
tag = FindWriteTagByFlag(MemBlockFlags::ALLOC | MemBlockFlags::TEXTURE, start, size, false);
674
if (tag) {
675
return snprintf(buf, sz, "%s%s", prefix, tag);
676
}
677
return snprintf(buf, sz, "%s%08x_size_%08x", prefix, start, size);
678
}
679
680
static void FlushMemInfoThread() {
681
SetCurrentThreadName("FlushMemInfo");
682
683
while (flushThreadRunning.load()) {
684
flushThreadPending = false;
685
FlushPendingMemInfo();
686
687
std::unique_lock<std::mutex> guard(flushLock);
688
flushCond.wait(guard, [] {
689
return flushThreadPending.load();
690
});
691
}
692
}
693
694
void MemBlockInfoInit() {
695
std::lock_guard<std::mutex> guard(pendingReadMutex);
696
std::lock_guard<std::mutex> guardW(pendingWriteMutex);
697
pendingNotifies.reserve(MAX_PENDING_NOTIFIES);
698
pendingNotifyMinAddr1 = 0xFFFFFFFF;
699
pendingNotifyMaxAddr1 = 0;
700
pendingNotifyMinAddr2 = 0xFFFFFFFF;
701
pendingNotifyMaxAddr2 = 0;
702
703
flushThreadRunning = true;
704
flushThreadPending = false;
705
flushThread = std::thread(&FlushMemInfoThread);
706
}
707
708
void MemBlockInfoShutdown() {
709
{
710
std::lock_guard<std::mutex> guard(pendingReadMutex);
711
std::lock_guard<std::mutex> guardW(pendingWriteMutex);
712
allocMap.Reset();
713
suballocMap.Reset();
714
writeMap.Reset();
715
textureMap.Reset();
716
pendingNotifies.clear();
717
}
718
719
if (flushThreadRunning.load()) {
720
std::lock_guard<std::mutex> guard(flushLock);
721
flushThreadRunning = false;
722
flushThreadPending = true;
723
}
724
flushCond.notify_one();
725
flushThread.join();
726
}
727
728
void MemBlockInfoDoState(PointerWrap &p) {
729
auto s = p.Section("MemBlockInfo", 0, 1);
730
if (!s)
731
return;
732
733
FlushPendingMemInfo();
734
allocMap.DoState(p);
735
suballocMap.DoState(p);
736
writeMap.DoState(p);
737
textureMap.DoState(p);
738
}
739
740
// Used by the debugger.
741
void MemBlockOverrideDetailed() {
742
detailedOverride++;
743
}
744
745
void MemBlockReleaseDetailed() {
746
detailedOverride--;
747
}
748
749
bool MemBlockInfoDetailed() {
750
return g_Config.bDebugMemInfoDetailed || detailedOverride != 0;
751
}
752
753