CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/Debugger/MemBlockInfo.cpp
Views: 1401
1
// Copyright (c) 2021- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <atomic>
20
#include <condition_variable>
21
#include <cstring>
22
#include <mutex>
23
#include <thread>
24
25
#include "Common/Log.h"
26
#include "Common/Serialize/Serializer.h"
27
#include "Common/Serialize/SerializeFuncs.h"
28
#include "Common/Thread/ThreadUtil.h"
29
#include "Core/Config.h"
30
#include "Core/CoreTiming.h"
31
#include "Core/Debugger/Breakpoints.h"
32
#include "Core/Debugger/MemBlockInfo.h"
33
#include "Core/MIPS/MIPS.h"
34
#include "Common/StringUtils.h"
35
36
class MemSlabMap {
37
public:
38
MemSlabMap();
39
~MemSlabMap();
40
41
bool Mark(uint32_t addr, uint32_t size, uint64_t ticks, uint32_t pc, bool allocated, const char *tag);
42
bool Find(MemBlockFlags flags, uint32_t addr, uint32_t size, std::vector<MemBlockInfo> &results);
43
// Note that the returned pointer gets invalidated as soon as Mark is called.
44
const char *FastFindWriteTag(MemBlockFlags flags, uint32_t addr, uint32_t size);
45
void Reset();
46
void DoState(PointerWrap &p);
47
48
private:
49
struct Slab {
50
uint32_t start = 0;
51
uint32_t end = 0;
52
uint64_t ticks = 0;
53
uint32_t pc = 0;
54
bool allocated = false;
55
// Intentionally not save stated.
56
bool bulkStorage = false;
57
char tag[128]{};
58
Slab *prev = nullptr;
59
Slab *next = nullptr;
60
61
void DoState(PointerWrap &p);
62
};
63
64
static constexpr uint32_t MAX_SIZE = 0x40000000;
65
static constexpr uint32_t SLICES = 65536;
66
static constexpr uint32_t SLICE_SIZE = MAX_SIZE / SLICES;
67
68
Slab *FindSlab(uint32_t addr);
69
void Clear();
70
// Returns the new slab after size.
71
Slab *Split(Slab *slab, uint32_t size);
72
void MergeAdjacent(Slab *slab);
73
static inline bool Same(const Slab *a, const Slab *b);
74
void Merge(Slab *a, Slab *b);
75
void FillHeads(Slab *slab);
76
77
Slab *first_ = nullptr;
78
Slab *lastFind_ = nullptr;
79
std::vector<Slab *> heads_;
80
Slab *bulkStorage_ = nullptr;
81
};
82
83
struct PendingNotifyMem {
84
MemBlockFlags flags;
85
uint32_t start;
86
uint32_t size;
87
uint32_t copySrc;
88
uint64_t ticks;
89
uint32_t pc;
90
char tag[128];
91
};
92
93
// 160 KB.
94
static constexpr size_t MAX_PENDING_NOTIFIES = 1024;
95
static constexpr size_t MAX_PENDING_NOTIFIES_THREAD = 1000;
96
static MemSlabMap allocMap;
97
static MemSlabMap suballocMap;
98
static MemSlabMap writeMap;
99
static MemSlabMap textureMap;
100
static std::vector<PendingNotifyMem> pendingNotifies;
101
static std::atomic<uint32_t> pendingNotifyMinAddr1;
102
static std::atomic<uint32_t> pendingNotifyMaxAddr1;
103
static std::atomic<uint32_t> pendingNotifyMinAddr2;
104
static std::atomic<uint32_t> pendingNotifyMaxAddr2;
105
// To prevent deadlocks, acquire Read before Write if you're going to acquire both.
106
static std::mutex pendingWriteMutex;
107
static std::mutex pendingReadMutex;
108
static int detailedOverride;
109
110
static std::thread flushThread;
111
static std::atomic<bool> flushThreadRunning;
112
static std::atomic<bool> flushThreadPending;
113
static std::mutex flushLock;
114
static std::condition_variable flushCond;
115
116
MemSlabMap::MemSlabMap() {
117
Reset();
118
}
119
120
MemSlabMap::~MemSlabMap() {
121
Clear();
122
}
123
124
bool MemSlabMap::Mark(uint32_t addr, uint32_t size, uint64_t ticks, uint32_t pc, bool allocated, const char *tag) {
125
uint32_t end = addr + size;
126
Slab *slab = FindSlab(addr);
127
Slab *firstMatch = nullptr;
128
while (slab != nullptr && slab->start < end) {
129
if (slab->start < addr)
130
slab = Split(slab, addr - slab->start);
131
// Don't replace slab, the return is the after part.
132
if (slab->end > end) {
133
Split(slab, end - slab->start);
134
}
135
136
slab->allocated = allocated;
137
if (pc != 0) {
138
slab->ticks = ticks;
139
slab->pc = pc;
140
}
141
if (tag)
142
truncate_cpy(slab->tag, tag);
143
144
// Move on to the next one.
145
if (firstMatch == nullptr)
146
firstMatch = slab;
147
slab = slab->next;
148
}
149
150
if (firstMatch != nullptr) {
151
// This will merge all those blocks to one.
152
MergeAdjacent(firstMatch);
153
return true;
154
}
155
return false;
156
}
157
158
bool MemSlabMap::Find(MemBlockFlags flags, uint32_t addr, uint32_t size, std::vector<MemBlockInfo> &results) {
159
uint32_t end = addr + size;
160
Slab *slab = FindSlab(addr);
161
bool found = false;
162
while (slab != nullptr && slab->start < end) {
163
if (slab->pc != 0 || slab->tag[0] != '\0') {
164
results.push_back({ flags, slab->start, slab->end - slab->start, slab->ticks, slab->pc, slab->tag, slab->allocated });
165
found = true;
166
}
167
slab = slab->next;
168
}
169
return found;
170
}
171
172
const char *MemSlabMap::FastFindWriteTag(MemBlockFlags flags, uint32_t addr, uint32_t size) {
173
uint32_t end = addr + size;
174
Slab *slab = FindSlab(addr);
175
while (slab != nullptr && slab->start < end) {
176
if (slab->pc != 0 || slab->tag[0] != '\0') {
177
return slab->tag;
178
}
179
slab = slab->next;
180
}
181
return nullptr;
182
}
183
184
void MemSlabMap::Reset() {
185
Clear();
186
187
first_ = new Slab();
188
first_->end = MAX_SIZE;
189
lastFind_ = first_;
190
191
heads_.resize(SLICES, first_);
192
}
193
194
void MemSlabMap::DoState(PointerWrap &p) {
195
auto s = p.Section("MemSlabMap", 1);
196
if (!s)
197
return;
198
199
int count = 0;
200
if (p.mode == p.MODE_READ) {
201
// Since heads_ is a static size, let's avoid clearing it.
202
// This helps in case a debugger call happens concurrently.
203
Slab *old = first_;
204
Slab *oldBulk = bulkStorage_;
205
Do(p, count);
206
207
first_ = new Slab();
208
first_->DoState(p);
209
lastFind_ = first_;
210
--count;
211
212
FillHeads(first_);
213
214
bulkStorage_ = new Slab[count];
215
216
Slab *slab = first_;
217
for (int i = 0; i < count; ++i) {
218
slab->next = &bulkStorage_[i];
219
slab->next->bulkStorage = true;
220
slab->next->DoState(p);
221
222
slab->next->prev = slab;
223
slab = slab->next;
224
225
FillHeads(slab);
226
}
227
228
// Now that it's entirely disconnected, delete the old slabs.
229
while (old != nullptr) {
230
Slab *next = old->next;
231
if (!old->bulkStorage)
232
delete old;
233
old = next;
234
}
235
delete [] oldBulk;
236
} else {
237
for (Slab *slab = first_; slab != nullptr; slab = slab->next)
238
++count;
239
Do(p, count);
240
241
first_->DoState(p);
242
--count;
243
244
Slab *slab = first_;
245
for (int i = 0; i < count; ++i) {
246
slab->next->DoState(p);
247
slab = slab->next;
248
}
249
}
250
}
251
252
void MemSlabMap::Slab::DoState(PointerWrap &p) {
253
auto s = p.Section("MemSlabMapSlab", 1, 3);
254
if (!s)
255
return;
256
257
Do(p, start);
258
Do(p, end);
259
Do(p, ticks);
260
Do(p, pc);
261
Do(p, allocated);
262
if (s >= 3) {
263
Do(p, tag);
264
} else if (s >= 2) {
265
char shortTag[32];
266
Do(p, shortTag);
267
memcpy(tag, shortTag, sizeof(shortTag));
268
} else {
269
std::string stringTag;
270
Do(p, stringTag);
271
truncate_cpy(tag, stringTag.c_str());
272
}
273
}
274
275
void MemSlabMap::Clear() {
276
Slab *s = first_;
277
while (s != nullptr) {
278
Slab *next = s->next;
279
if (!s->bulkStorage)
280
delete s;
281
s = next;
282
}
283
delete [] bulkStorage_;
284
bulkStorage_ = nullptr;
285
first_ = nullptr;
286
lastFind_ = nullptr;
287
heads_.clear();
288
}
289
290
MemSlabMap::Slab *MemSlabMap::FindSlab(uint32_t addr) {
291
// Jump ahead using our index.
292
Slab *slab = heads_[addr / SLICE_SIZE];
293
// We often move forward, so check the last find.
294
if (lastFind_->start > slab->start && lastFind_->start <= addr)
295
slab = lastFind_;
296
297
while (slab != nullptr && slab->start <= addr) {
298
if (slab->end > addr) {
299
lastFind_ = slab;
300
return slab;
301
}
302
slab = slab->next;
303
}
304
return nullptr;
305
}
306
307
MemSlabMap::Slab *MemSlabMap::Split(Slab *slab, uint32_t size) {
308
Slab *next = new Slab();
309
next->start = slab->start + size;
310
next->end = slab->end;
311
next->ticks = slab->ticks;
312
next->pc = slab->pc;
313
next->allocated = slab->allocated;
314
truncate_cpy(next->tag, slab->tag);
315
next->prev = slab;
316
next->next = slab->next;
317
318
slab->next = next;
319
if (next->next)
320
next->next->prev = next;
321
322
// If the split is big, we might have to update our index.
323
FillHeads(next);
324
325
slab->end = slab->start + size;
326
return next;
327
}
328
329
bool MemSlabMap::Same(const Slab *a, const Slab *b) {
330
if (a->allocated != b->allocated)
331
return false;
332
if (a->pc != b->pc)
333
return false;
334
if (strcmp(a->tag, b->tag))
335
return false;
336
return true;
337
}
338
339
void MemSlabMap::MergeAdjacent(Slab *slab) {
340
while (slab->next != nullptr && Same(slab, slab->next)) {
341
Merge(slab, slab->next);
342
}
343
while (slab->prev != nullptr && Same(slab, slab->prev)) {
344
Merge(slab, slab->prev);
345
}
346
}
347
348
void MemSlabMap::Merge(Slab *a, Slab *b) {
349
if (a->next == b) {
350
_assert_(a->end == b->start);
351
a->end = b->end;
352
a->next = b->next;
353
354
if (a->next)
355
a->next->prev = a;
356
} else if (a->prev == b) {
357
_assert_(b->end == a->start);
358
a->start = b->start;
359
a->prev = b->prev;
360
361
if (a->prev)
362
a->prev->next = a;
363
else if (first_ == b)
364
first_ = a;
365
} else {
366
_assert_(false);
367
}
368
// Take over index entries b had.
369
FillHeads(a);
370
if (b->ticks > a->ticks) {
371
a->ticks = b->ticks;
372
// In case we ignore PC for same.
373
a->pc = b->pc;
374
}
375
if (lastFind_ == b)
376
lastFind_ = a;
377
if (!b->bulkStorage)
378
delete b;
379
}
380
381
void MemSlabMap::FillHeads(Slab *slab) {
382
uint32_t slice = slab->start / SLICE_SIZE;
383
uint32_t endSlice = (slab->end - 1) / SLICE_SIZE;
384
385
// For the first slice, only replace if it's the one we're removing.
386
if (slab->start == slice * SLICE_SIZE) {
387
heads_[slice] = slab;
388
}
389
390
// Now replace all the rest - we definitely cover the start of them.
391
Slab **next = &heads_[slice + 1];
392
// We want to set slice + 1 through endSlice, inclusive.
393
size_t c = endSlice - slice;
394
for (size_t i = 0; i < c; ++i) {
395
next[i] = slab;
396
}
397
}
398
399
size_t FormatMemWriteTagAtNoFlush(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size);
400
401
void FlushPendingMemInfo() {
402
// This lock prevents us from another thread reading while we're busy flushing.
403
std::lock_guard<std::mutex> guard(pendingReadMutex);
404
std::vector<PendingNotifyMem> thisBatch;
405
{
406
std::lock_guard<std::mutex> guard(pendingWriteMutex);
407
thisBatch = std::move(pendingNotifies);
408
pendingNotifies.clear();
409
pendingNotifies.reserve(MAX_PENDING_NOTIFIES);
410
411
pendingNotifyMinAddr1 = 0xFFFFFFFF;
412
pendingNotifyMaxAddr1 = 0;
413
pendingNotifyMinAddr2 = 0xFFFFFFFF;
414
pendingNotifyMaxAddr2 = 0;
415
}
416
417
for (const auto &info : thisBatch) {
418
if (info.copySrc != 0) {
419
char tagData[128];
420
size_t tagSize = FormatMemWriteTagAtNoFlush(tagData, sizeof(tagData), info.tag, info.copySrc, info.size);
421
writeMap.Mark(info.start, info.size, info.ticks, info.pc, true, tagData);
422
continue;
423
}
424
425
if (info.flags & MemBlockFlags::ALLOC) {
426
allocMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
427
} else if (info.flags & MemBlockFlags::FREE) {
428
// Maintain the previous allocation tag for debugging.
429
allocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);
430
suballocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);
431
}
432
if (info.flags & MemBlockFlags::SUB_ALLOC) {
433
suballocMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
434
} else if (info.flags & MemBlockFlags::SUB_FREE) {
435
// Maintain the previous allocation tag for debugging.
436
suballocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);
437
}
438
if (info.flags & MemBlockFlags::TEXTURE) {
439
textureMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
440
}
441
if (info.flags & MemBlockFlags::WRITE) {
442
writeMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);
443
}
444
}
445
}
446
447
static inline uint32_t NormalizeAddress(uint32_t addr) {
448
if ((addr & 0x3F000000) == 0x04000000)
449
return addr & 0x041FFFFF;
450
return addr & 0x3FFFFFFF;
451
}
452
453
static inline bool MergeRecentMemInfo(const PendingNotifyMem &info, size_t copyLength) {
454
if (pendingNotifies.size() < 4)
455
return false;
456
457
for (size_t i = 1; i <= 4; ++i) {
458
auto &prev = pendingNotifies[pendingNotifies.size() - i];
459
if (prev.copySrc != 0)
460
return false;
461
462
if (prev.flags != info.flags)
463
continue;
464
465
if (prev.start >= info.start + info.size || prev.start + prev.size <= info.start)
466
continue;
467
468
// This means there's overlap, but not a match, so we can't combine any.
469
if (prev.start != info.start || prev.size > info.size)
470
return false;
471
472
memcpy(prev.tag, info.tag, copyLength + 1);
473
prev.size = info.size;
474
prev.ticks = info.ticks;
475
prev.pc = info.pc;
476
return true;
477
}
478
479
return false;
480
}
481
482
void NotifyMemInfoPC(MemBlockFlags flags, uint32_t start, uint32_t size, uint32_t pc, const char *tagStr, size_t strLength) {
483
if (size == 0) {
484
return;
485
}
486
// Clear the uncached and kernel bits.
487
start = NormalizeAddress(start);
488
489
bool needFlush = false;
490
// When the setting is off, we skip smaller info to keep things fast.
491
if (MemBlockInfoDetailed(size) && flags != MemBlockFlags::READ) {
492
PendingNotifyMem info{ flags, start, size };
493
info.ticks = CoreTiming::GetTicks();
494
info.pc = pc;
495
496
size_t copyLength = strLength;
497
if (copyLength >= sizeof(info.tag)) {
498
copyLength = sizeof(info.tag) - 1;
499
}
500
memcpy(info.tag, tagStr, copyLength);
501
info.tag[copyLength] = 0;
502
503
std::lock_guard<std::mutex> guard(pendingWriteMutex);
504
// Sometimes we get duplicates, quickly check.
505
if (!MergeRecentMemInfo(info, copyLength)) {
506
if (start < 0x08000000) {
507
pendingNotifyMinAddr1 = std::min(pendingNotifyMinAddr1.load(), start);
508
pendingNotifyMaxAddr1 = std::max(pendingNotifyMaxAddr1.load(), start + size);
509
} else {
510
pendingNotifyMinAddr2 = std::min(pendingNotifyMinAddr2.load(), start);
511
pendingNotifyMaxAddr2 = std::max(pendingNotifyMaxAddr2.load(), start + size);
512
}
513
pendingNotifies.push_back(info);
514
}
515
needFlush = pendingNotifies.size() > MAX_PENDING_NOTIFIES_THREAD;
516
}
517
518
if (needFlush) {
519
{
520
std::lock_guard<std::mutex> guard(flushLock);
521
flushThreadPending = true;
522
}
523
flushCond.notify_one();
524
}
525
526
if (!(flags & MemBlockFlags::SKIP_MEMCHECK)) {
527
if (flags & MemBlockFlags::WRITE) {
528
CBreakPoints::ExecMemCheck(start, true, size, pc, tagStr);
529
} else if (flags & MemBlockFlags::READ) {
530
CBreakPoints::ExecMemCheck(start, false, size, pc, tagStr);
531
}
532
}
533
}
534
535
void NotifyMemInfo(MemBlockFlags flags, uint32_t start, uint32_t size, const char *str, size_t strLength) {
536
NotifyMemInfoPC(flags, start, size, currentMIPS->pc, str, strLength);
537
}
538
539
void NotifyMemInfoCopy(uint32_t destPtr, uint32_t srcPtr, uint32_t size, const char *prefix) {
540
if (size == 0)
541
return;
542
543
bool needsFlush = false;
544
if (CBreakPoints::HasMemChecks()) {
545
// This will cause a flush, but it's needed to trigger memchecks with proper data.
546
char tagData[128];
547
size_t tagSize = FormatMemWriteTagAt(tagData, sizeof(tagData), prefix, srcPtr, size);
548
NotifyMemInfo(MemBlockFlags::READ, srcPtr, size, tagData, tagSize);
549
NotifyMemInfo(MemBlockFlags::WRITE, destPtr, size, tagData, tagSize);
550
} else if (MemBlockInfoDetailed(size)) {
551
srcPtr = NormalizeAddress(srcPtr);
552
destPtr = NormalizeAddress(destPtr);
553
554
PendingNotifyMem info{ MemBlockFlags::WRITE, destPtr, size };
555
info.copySrc = srcPtr;
556
info.ticks = CoreTiming::GetTicks();
557
info.pc = currentMIPS->pc;
558
559
// Store the prefix for now. The correct tag will be calculated on flush.
560
truncate_cpy(info.tag, prefix);
561
562
std::lock_guard<std::mutex> guard(pendingWriteMutex);
563
if (destPtr < 0x08000000) {
564
pendingNotifyMinAddr1 = std::min(pendingNotifyMinAddr1.load(), destPtr);
565
pendingNotifyMaxAddr1 = std::max(pendingNotifyMaxAddr1.load(), destPtr + size);
566
} else {
567
pendingNotifyMinAddr2 = std::min(pendingNotifyMinAddr2.load(), destPtr);
568
pendingNotifyMaxAddr2 = std::max(pendingNotifyMaxAddr2.load(), destPtr + size);
569
}
570
pendingNotifies.push_back(info);
571
needsFlush = pendingNotifies.size() > MAX_PENDING_NOTIFIES_THREAD;
572
}
573
574
if (needsFlush) {
575
{
576
std::lock_guard<std::mutex> guard(flushLock);
577
flushThreadPending = true;
578
}
579
flushCond.notify_one();
580
}
581
}
582
583
std::vector<MemBlockInfo> FindMemInfo(uint32_t start, uint32_t size) {
584
start = NormalizeAddress(start);
585
586
if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)
587
FlushPendingMemInfo();
588
if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)
589
FlushPendingMemInfo();
590
591
std::vector<MemBlockInfo> results;
592
allocMap.Find(MemBlockFlags::ALLOC, start, size, results);
593
suballocMap.Find(MemBlockFlags::SUB_ALLOC, start, size, results);
594
writeMap.Find(MemBlockFlags::WRITE, start, size, results);
595
textureMap.Find(MemBlockFlags::TEXTURE, start, size, results);
596
return results;
597
}
598
599
std::vector<MemBlockInfo> FindMemInfoByFlag(MemBlockFlags flags, uint32_t start, uint32_t size) {
600
start = NormalizeAddress(start);
601
602
if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)
603
FlushPendingMemInfo();
604
if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)
605
FlushPendingMemInfo();
606
607
std::vector<MemBlockInfo> results;
608
if (flags & MemBlockFlags::ALLOC)
609
allocMap.Find(MemBlockFlags::ALLOC, start, size, results);
610
if (flags & MemBlockFlags::SUB_ALLOC)
611
suballocMap.Find(MemBlockFlags::SUB_ALLOC, start, size, results);
612
if (flags & MemBlockFlags::WRITE)
613
writeMap.Find(MemBlockFlags::WRITE, start, size, results);
614
if (flags & MemBlockFlags::TEXTURE)
615
textureMap.Find(MemBlockFlags::TEXTURE, start, size, results);
616
return results;
617
}
618
619
static const char *FindWriteTagByFlag(MemBlockFlags flags, uint32_t start, uint32_t size, bool flush = true) {
620
start = NormalizeAddress(start);
621
622
if (flush) {
623
if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)
624
FlushPendingMemInfo();
625
if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)
626
FlushPendingMemInfo();
627
}
628
629
if (flags & MemBlockFlags::ALLOC) {
630
const char *tag = allocMap.FastFindWriteTag(MemBlockFlags::ALLOC, start, size);
631
if (tag)
632
return tag;
633
}
634
if (flags & MemBlockFlags::SUB_ALLOC) {
635
const char *tag = suballocMap.FastFindWriteTag(MemBlockFlags::SUB_ALLOC, start, size);
636
if (tag)
637
return tag;
638
}
639
if (flags & MemBlockFlags::WRITE) {
640
const char *tag = writeMap.FastFindWriteTag(MemBlockFlags::WRITE, start, size);
641
if (tag)
642
return tag;
643
}
644
if (flags & MemBlockFlags::TEXTURE) {
645
const char *tag = textureMap.FastFindWriteTag(MemBlockFlags::TEXTURE, start, size);
646
if (tag)
647
return tag;
648
}
649
return nullptr;
650
}
651
652
size_t FormatMemWriteTagAt(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size) {
653
const char *tag = FindWriteTagByFlag(MemBlockFlags::WRITE, start, size);
654
if (tag && strcmp(tag, "MemInit") != 0) {
655
return snprintf(buf, sz, "%s%s", prefix, tag);
656
}
657
// Fall back to alloc and texture, especially for VRAM. We prefer write above.
658
tag = FindWriteTagByFlag(MemBlockFlags::ALLOC | MemBlockFlags::TEXTURE, start, size);
659
if (tag) {
660
return snprintf(buf, sz, "%s%s", prefix, tag);
661
}
662
return snprintf(buf, sz, "%s%08x_size_%08x", prefix, start, size);
663
}
664
665
size_t FormatMemWriteTagAtNoFlush(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size) {
666
const char *tag = FindWriteTagByFlag(MemBlockFlags::WRITE, start, size, false);
667
if (tag && strcmp(tag, "MemInit") != 0) {
668
return snprintf(buf, sz, "%s%s", prefix, tag);
669
}
670
// Fall back to alloc and texture, especially for VRAM. We prefer write above.
671
tag = FindWriteTagByFlag(MemBlockFlags::ALLOC | MemBlockFlags::TEXTURE, start, size, false);
672
if (tag) {
673
return snprintf(buf, sz, "%s%s", prefix, tag);
674
}
675
return snprintf(buf, sz, "%s%08x_size_%08x", prefix, start, size);
676
}
677
678
static void FlushMemInfoThread() {
679
SetCurrentThreadName("FlushMemInfo");
680
681
while (flushThreadRunning.load()) {
682
flushThreadPending = false;
683
FlushPendingMemInfo();
684
685
std::unique_lock<std::mutex> guard(flushLock);
686
flushCond.wait(guard, [] {
687
return flushThreadPending.load();
688
});
689
}
690
}
691
692
void MemBlockInfoInit() {
693
std::lock_guard<std::mutex> guard(pendingReadMutex);
694
std::lock_guard<std::mutex> guardW(pendingWriteMutex);
695
pendingNotifies.reserve(MAX_PENDING_NOTIFIES);
696
pendingNotifyMinAddr1 = 0xFFFFFFFF;
697
pendingNotifyMaxAddr1 = 0;
698
pendingNotifyMinAddr2 = 0xFFFFFFFF;
699
pendingNotifyMaxAddr2 = 0;
700
701
flushThreadRunning = true;
702
flushThreadPending = false;
703
flushThread = std::thread(&FlushMemInfoThread);
704
}
705
706
void MemBlockInfoShutdown() {
707
{
708
std::lock_guard<std::mutex> guard(pendingReadMutex);
709
std::lock_guard<std::mutex> guardW(pendingWriteMutex);
710
allocMap.Reset();
711
suballocMap.Reset();
712
writeMap.Reset();
713
textureMap.Reset();
714
pendingNotifies.clear();
715
}
716
717
if (flushThreadRunning.load()) {
718
std::lock_guard<std::mutex> guard(flushLock);
719
flushThreadRunning = false;
720
flushThreadPending = true;
721
}
722
flushCond.notify_one();
723
flushThread.join();
724
}
725
726
void MemBlockInfoDoState(PointerWrap &p) {
727
auto s = p.Section("MemBlockInfo", 0, 1);
728
if (!s)
729
return;
730
731
FlushPendingMemInfo();
732
allocMap.DoState(p);
733
suballocMap.DoState(p);
734
writeMap.DoState(p);
735
textureMap.DoState(p);
736
}
737
738
// Used by the debugger.
739
void MemBlockOverrideDetailed() {
740
detailedOverride++;
741
}
742
743
void MemBlockReleaseDetailed() {
744
detailedOverride--;
745
}
746
747
bool MemBlockInfoDetailed() {
748
return g_Config.bDebugMemInfoDetailed || detailedOverride != 0;
749
}
750
751