CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
Path: blob/master/Core/Debugger/MemBlockInfo.cpp
Views: 1401
// Copyright (c) 2021- PPSSPP Project.12// This program is free software: you can redistribute it and/or modify3// it under the terms of the GNU General Public License as published by4// the Free Software Foundation, version 2.0 or later versions.56// This program is distributed in the hope that it will be useful,7// but WITHOUT ANY WARRANTY; without even the implied warranty of8// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the9// GNU General Public License 2.0 for more details.1011// A copy of the GPL 2.0 should have been included with the program.12// If not, see http://www.gnu.org/licenses/1314// Official git repository and contact information can be found at15// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.1617#include <algorithm>18#include <atomic>19#include <condition_variable>20#include <cstring>21#include <mutex>22#include <thread>2324#include "Common/Log.h"25#include "Common/Serialize/Serializer.h"26#include "Common/Serialize/SerializeFuncs.h"27#include "Common/Thread/ThreadUtil.h"28#include "Core/Config.h"29#include "Core/CoreTiming.h"30#include "Core/Debugger/Breakpoints.h"31#include "Core/Debugger/MemBlockInfo.h"32#include "Core/MIPS/MIPS.h"33#include "Common/StringUtils.h"3435class MemSlabMap {36public:37MemSlabMap();38~MemSlabMap();3940bool Mark(uint32_t addr, uint32_t size, uint64_t ticks, uint32_t pc, bool allocated, const char *tag);41bool Find(MemBlockFlags flags, uint32_t addr, uint32_t size, std::vector<MemBlockInfo> &results);42// Note that the returned pointer gets invalidated as soon as Mark is called.43const char *FastFindWriteTag(MemBlockFlags flags, uint32_t addr, uint32_t size);44void Reset();45void DoState(PointerWrap &p);4647private:48struct Slab {49uint32_t start = 0;50uint32_t end = 0;51uint64_t ticks = 0;52uint32_t pc = 0;53bool allocated = false;54// Intentionally not save stated.55bool bulkStorage = false;56char tag[128]{};57Slab *prev = nullptr;58Slab *next = nullptr;5960void DoState(PointerWrap &p);61};6263static constexpr uint32_t MAX_SIZE = 0x40000000;64static constexpr uint32_t SLICES = 65536;65static constexpr uint32_t SLICE_SIZE = MAX_SIZE / SLICES;6667Slab *FindSlab(uint32_t addr);68void Clear();69// Returns the new slab after size.70Slab *Split(Slab *slab, uint32_t size);71void MergeAdjacent(Slab *slab);72static inline bool Same(const Slab *a, const Slab *b);73void Merge(Slab *a, Slab *b);74void FillHeads(Slab *slab);7576Slab *first_ = nullptr;77Slab *lastFind_ = nullptr;78std::vector<Slab *> heads_;79Slab *bulkStorage_ = nullptr;80};8182struct PendingNotifyMem {83MemBlockFlags flags;84uint32_t start;85uint32_t size;86uint32_t copySrc;87uint64_t ticks;88uint32_t pc;89char tag[128];90};9192// 160 KB.93static constexpr size_t MAX_PENDING_NOTIFIES = 1024;94static constexpr size_t MAX_PENDING_NOTIFIES_THREAD = 1000;95static MemSlabMap allocMap;96static MemSlabMap suballocMap;97static MemSlabMap writeMap;98static MemSlabMap textureMap;99static std::vector<PendingNotifyMem> pendingNotifies;100static std::atomic<uint32_t> pendingNotifyMinAddr1;101static std::atomic<uint32_t> pendingNotifyMaxAddr1;102static std::atomic<uint32_t> pendingNotifyMinAddr2;103static std::atomic<uint32_t> pendingNotifyMaxAddr2;104// To prevent deadlocks, acquire Read before Write if you're going to acquire both.105static std::mutex pendingWriteMutex;106static std::mutex pendingReadMutex;107static int detailedOverride;108109static std::thread flushThread;110static std::atomic<bool> flushThreadRunning;111static std::atomic<bool> flushThreadPending;112static std::mutex flushLock;113static std::condition_variable flushCond;114115MemSlabMap::MemSlabMap() {116Reset();117}118119MemSlabMap::~MemSlabMap() {120Clear();121}122123bool MemSlabMap::Mark(uint32_t addr, uint32_t size, uint64_t ticks, uint32_t pc, bool allocated, const char *tag) {124uint32_t end = addr + size;125Slab *slab = FindSlab(addr);126Slab *firstMatch = nullptr;127while (slab != nullptr && slab->start < end) {128if (slab->start < addr)129slab = Split(slab, addr - slab->start);130// Don't replace slab, the return is the after part.131if (slab->end > end) {132Split(slab, end - slab->start);133}134135slab->allocated = allocated;136if (pc != 0) {137slab->ticks = ticks;138slab->pc = pc;139}140if (tag)141truncate_cpy(slab->tag, tag);142143// Move on to the next one.144if (firstMatch == nullptr)145firstMatch = slab;146slab = slab->next;147}148149if (firstMatch != nullptr) {150// This will merge all those blocks to one.151MergeAdjacent(firstMatch);152return true;153}154return false;155}156157bool MemSlabMap::Find(MemBlockFlags flags, uint32_t addr, uint32_t size, std::vector<MemBlockInfo> &results) {158uint32_t end = addr + size;159Slab *slab = FindSlab(addr);160bool found = false;161while (slab != nullptr && slab->start < end) {162if (slab->pc != 0 || slab->tag[0] != '\0') {163results.push_back({ flags, slab->start, slab->end - slab->start, slab->ticks, slab->pc, slab->tag, slab->allocated });164found = true;165}166slab = slab->next;167}168return found;169}170171const char *MemSlabMap::FastFindWriteTag(MemBlockFlags flags, uint32_t addr, uint32_t size) {172uint32_t end = addr + size;173Slab *slab = FindSlab(addr);174while (slab != nullptr && slab->start < end) {175if (slab->pc != 0 || slab->tag[0] != '\0') {176return slab->tag;177}178slab = slab->next;179}180return nullptr;181}182183void MemSlabMap::Reset() {184Clear();185186first_ = new Slab();187first_->end = MAX_SIZE;188lastFind_ = first_;189190heads_.resize(SLICES, first_);191}192193void MemSlabMap::DoState(PointerWrap &p) {194auto s = p.Section("MemSlabMap", 1);195if (!s)196return;197198int count = 0;199if (p.mode == p.MODE_READ) {200// Since heads_ is a static size, let's avoid clearing it.201// This helps in case a debugger call happens concurrently.202Slab *old = first_;203Slab *oldBulk = bulkStorage_;204Do(p, count);205206first_ = new Slab();207first_->DoState(p);208lastFind_ = first_;209--count;210211FillHeads(first_);212213bulkStorage_ = new Slab[count];214215Slab *slab = first_;216for (int i = 0; i < count; ++i) {217slab->next = &bulkStorage_[i];218slab->next->bulkStorage = true;219slab->next->DoState(p);220221slab->next->prev = slab;222slab = slab->next;223224FillHeads(slab);225}226227// Now that it's entirely disconnected, delete the old slabs.228while (old != nullptr) {229Slab *next = old->next;230if (!old->bulkStorage)231delete old;232old = next;233}234delete [] oldBulk;235} else {236for (Slab *slab = first_; slab != nullptr; slab = slab->next)237++count;238Do(p, count);239240first_->DoState(p);241--count;242243Slab *slab = first_;244for (int i = 0; i < count; ++i) {245slab->next->DoState(p);246slab = slab->next;247}248}249}250251void MemSlabMap::Slab::DoState(PointerWrap &p) {252auto s = p.Section("MemSlabMapSlab", 1, 3);253if (!s)254return;255256Do(p, start);257Do(p, end);258Do(p, ticks);259Do(p, pc);260Do(p, allocated);261if (s >= 3) {262Do(p, tag);263} else if (s >= 2) {264char shortTag[32];265Do(p, shortTag);266memcpy(tag, shortTag, sizeof(shortTag));267} else {268std::string stringTag;269Do(p, stringTag);270truncate_cpy(tag, stringTag.c_str());271}272}273274void MemSlabMap::Clear() {275Slab *s = first_;276while (s != nullptr) {277Slab *next = s->next;278if (!s->bulkStorage)279delete s;280s = next;281}282delete [] bulkStorage_;283bulkStorage_ = nullptr;284first_ = nullptr;285lastFind_ = nullptr;286heads_.clear();287}288289MemSlabMap::Slab *MemSlabMap::FindSlab(uint32_t addr) {290// Jump ahead using our index.291Slab *slab = heads_[addr / SLICE_SIZE];292// We often move forward, so check the last find.293if (lastFind_->start > slab->start && lastFind_->start <= addr)294slab = lastFind_;295296while (slab != nullptr && slab->start <= addr) {297if (slab->end > addr) {298lastFind_ = slab;299return slab;300}301slab = slab->next;302}303return nullptr;304}305306MemSlabMap::Slab *MemSlabMap::Split(Slab *slab, uint32_t size) {307Slab *next = new Slab();308next->start = slab->start + size;309next->end = slab->end;310next->ticks = slab->ticks;311next->pc = slab->pc;312next->allocated = slab->allocated;313truncate_cpy(next->tag, slab->tag);314next->prev = slab;315next->next = slab->next;316317slab->next = next;318if (next->next)319next->next->prev = next;320321// If the split is big, we might have to update our index.322FillHeads(next);323324slab->end = slab->start + size;325return next;326}327328bool MemSlabMap::Same(const Slab *a, const Slab *b) {329if (a->allocated != b->allocated)330return false;331if (a->pc != b->pc)332return false;333if (strcmp(a->tag, b->tag))334return false;335return true;336}337338void MemSlabMap::MergeAdjacent(Slab *slab) {339while (slab->next != nullptr && Same(slab, slab->next)) {340Merge(slab, slab->next);341}342while (slab->prev != nullptr && Same(slab, slab->prev)) {343Merge(slab, slab->prev);344}345}346347void MemSlabMap::Merge(Slab *a, Slab *b) {348if (a->next == b) {349_assert_(a->end == b->start);350a->end = b->end;351a->next = b->next;352353if (a->next)354a->next->prev = a;355} else if (a->prev == b) {356_assert_(b->end == a->start);357a->start = b->start;358a->prev = b->prev;359360if (a->prev)361a->prev->next = a;362else if (first_ == b)363first_ = a;364} else {365_assert_(false);366}367// Take over index entries b had.368FillHeads(a);369if (b->ticks > a->ticks) {370a->ticks = b->ticks;371// In case we ignore PC for same.372a->pc = b->pc;373}374if (lastFind_ == b)375lastFind_ = a;376if (!b->bulkStorage)377delete b;378}379380void MemSlabMap::FillHeads(Slab *slab) {381uint32_t slice = slab->start / SLICE_SIZE;382uint32_t endSlice = (slab->end - 1) / SLICE_SIZE;383384// For the first slice, only replace if it's the one we're removing.385if (slab->start == slice * SLICE_SIZE) {386heads_[slice] = slab;387}388389// Now replace all the rest - we definitely cover the start of them.390Slab **next = &heads_[slice + 1];391// We want to set slice + 1 through endSlice, inclusive.392size_t c = endSlice - slice;393for (size_t i = 0; i < c; ++i) {394next[i] = slab;395}396}397398size_t FormatMemWriteTagAtNoFlush(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size);399400void FlushPendingMemInfo() {401// This lock prevents us from another thread reading while we're busy flushing.402std::lock_guard<std::mutex> guard(pendingReadMutex);403std::vector<PendingNotifyMem> thisBatch;404{405std::lock_guard<std::mutex> guard(pendingWriteMutex);406thisBatch = std::move(pendingNotifies);407pendingNotifies.clear();408pendingNotifies.reserve(MAX_PENDING_NOTIFIES);409410pendingNotifyMinAddr1 = 0xFFFFFFFF;411pendingNotifyMaxAddr1 = 0;412pendingNotifyMinAddr2 = 0xFFFFFFFF;413pendingNotifyMaxAddr2 = 0;414}415416for (const auto &info : thisBatch) {417if (info.copySrc != 0) {418char tagData[128];419size_t tagSize = FormatMemWriteTagAtNoFlush(tagData, sizeof(tagData), info.tag, info.copySrc, info.size);420writeMap.Mark(info.start, info.size, info.ticks, info.pc, true, tagData);421continue;422}423424if (info.flags & MemBlockFlags::ALLOC) {425allocMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);426} else if (info.flags & MemBlockFlags::FREE) {427// Maintain the previous allocation tag for debugging.428allocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);429suballocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);430}431if (info.flags & MemBlockFlags::SUB_ALLOC) {432suballocMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);433} else if (info.flags & MemBlockFlags::SUB_FREE) {434// Maintain the previous allocation tag for debugging.435suballocMap.Mark(info.start, info.size, info.ticks, 0, false, nullptr);436}437if (info.flags & MemBlockFlags::TEXTURE) {438textureMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);439}440if (info.flags & MemBlockFlags::WRITE) {441writeMap.Mark(info.start, info.size, info.ticks, info.pc, true, info.tag);442}443}444}445446static inline uint32_t NormalizeAddress(uint32_t addr) {447if ((addr & 0x3F000000) == 0x04000000)448return addr & 0x041FFFFF;449return addr & 0x3FFFFFFF;450}451452static inline bool MergeRecentMemInfo(const PendingNotifyMem &info, size_t copyLength) {453if (pendingNotifies.size() < 4)454return false;455456for (size_t i = 1; i <= 4; ++i) {457auto &prev = pendingNotifies[pendingNotifies.size() - i];458if (prev.copySrc != 0)459return false;460461if (prev.flags != info.flags)462continue;463464if (prev.start >= info.start + info.size || prev.start + prev.size <= info.start)465continue;466467// This means there's overlap, but not a match, so we can't combine any.468if (prev.start != info.start || prev.size > info.size)469return false;470471memcpy(prev.tag, info.tag, copyLength + 1);472prev.size = info.size;473prev.ticks = info.ticks;474prev.pc = info.pc;475return true;476}477478return false;479}480481void NotifyMemInfoPC(MemBlockFlags flags, uint32_t start, uint32_t size, uint32_t pc, const char *tagStr, size_t strLength) {482if (size == 0) {483return;484}485// Clear the uncached and kernel bits.486start = NormalizeAddress(start);487488bool needFlush = false;489// When the setting is off, we skip smaller info to keep things fast.490if (MemBlockInfoDetailed(size) && flags != MemBlockFlags::READ) {491PendingNotifyMem info{ flags, start, size };492info.ticks = CoreTiming::GetTicks();493info.pc = pc;494495size_t copyLength = strLength;496if (copyLength >= sizeof(info.tag)) {497copyLength = sizeof(info.tag) - 1;498}499memcpy(info.tag, tagStr, copyLength);500info.tag[copyLength] = 0;501502std::lock_guard<std::mutex> guard(pendingWriteMutex);503// Sometimes we get duplicates, quickly check.504if (!MergeRecentMemInfo(info, copyLength)) {505if (start < 0x08000000) {506pendingNotifyMinAddr1 = std::min(pendingNotifyMinAddr1.load(), start);507pendingNotifyMaxAddr1 = std::max(pendingNotifyMaxAddr1.load(), start + size);508} else {509pendingNotifyMinAddr2 = std::min(pendingNotifyMinAddr2.load(), start);510pendingNotifyMaxAddr2 = std::max(pendingNotifyMaxAddr2.load(), start + size);511}512pendingNotifies.push_back(info);513}514needFlush = pendingNotifies.size() > MAX_PENDING_NOTIFIES_THREAD;515}516517if (needFlush) {518{519std::lock_guard<std::mutex> guard(flushLock);520flushThreadPending = true;521}522flushCond.notify_one();523}524525if (!(flags & MemBlockFlags::SKIP_MEMCHECK)) {526if (flags & MemBlockFlags::WRITE) {527CBreakPoints::ExecMemCheck(start, true, size, pc, tagStr);528} else if (flags & MemBlockFlags::READ) {529CBreakPoints::ExecMemCheck(start, false, size, pc, tagStr);530}531}532}533534void NotifyMemInfo(MemBlockFlags flags, uint32_t start, uint32_t size, const char *str, size_t strLength) {535NotifyMemInfoPC(flags, start, size, currentMIPS->pc, str, strLength);536}537538void NotifyMemInfoCopy(uint32_t destPtr, uint32_t srcPtr, uint32_t size, const char *prefix) {539if (size == 0)540return;541542bool needsFlush = false;543if (CBreakPoints::HasMemChecks()) {544// This will cause a flush, but it's needed to trigger memchecks with proper data.545char tagData[128];546size_t tagSize = FormatMemWriteTagAt(tagData, sizeof(tagData), prefix, srcPtr, size);547NotifyMemInfo(MemBlockFlags::READ, srcPtr, size, tagData, tagSize);548NotifyMemInfo(MemBlockFlags::WRITE, destPtr, size, tagData, tagSize);549} else if (MemBlockInfoDetailed(size)) {550srcPtr = NormalizeAddress(srcPtr);551destPtr = NormalizeAddress(destPtr);552553PendingNotifyMem info{ MemBlockFlags::WRITE, destPtr, size };554info.copySrc = srcPtr;555info.ticks = CoreTiming::GetTicks();556info.pc = currentMIPS->pc;557558// Store the prefix for now. The correct tag will be calculated on flush.559truncate_cpy(info.tag, prefix);560561std::lock_guard<std::mutex> guard(pendingWriteMutex);562if (destPtr < 0x08000000) {563pendingNotifyMinAddr1 = std::min(pendingNotifyMinAddr1.load(), destPtr);564pendingNotifyMaxAddr1 = std::max(pendingNotifyMaxAddr1.load(), destPtr + size);565} else {566pendingNotifyMinAddr2 = std::min(pendingNotifyMinAddr2.load(), destPtr);567pendingNotifyMaxAddr2 = std::max(pendingNotifyMaxAddr2.load(), destPtr + size);568}569pendingNotifies.push_back(info);570needsFlush = pendingNotifies.size() > MAX_PENDING_NOTIFIES_THREAD;571}572573if (needsFlush) {574{575std::lock_guard<std::mutex> guard(flushLock);576flushThreadPending = true;577}578flushCond.notify_one();579}580}581582std::vector<MemBlockInfo> FindMemInfo(uint32_t start, uint32_t size) {583start = NormalizeAddress(start);584585if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)586FlushPendingMemInfo();587if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)588FlushPendingMemInfo();589590std::vector<MemBlockInfo> results;591allocMap.Find(MemBlockFlags::ALLOC, start, size, results);592suballocMap.Find(MemBlockFlags::SUB_ALLOC, start, size, results);593writeMap.Find(MemBlockFlags::WRITE, start, size, results);594textureMap.Find(MemBlockFlags::TEXTURE, start, size, results);595return results;596}597598std::vector<MemBlockInfo> FindMemInfoByFlag(MemBlockFlags flags, uint32_t start, uint32_t size) {599start = NormalizeAddress(start);600601if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)602FlushPendingMemInfo();603if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)604FlushPendingMemInfo();605606std::vector<MemBlockInfo> results;607if (flags & MemBlockFlags::ALLOC)608allocMap.Find(MemBlockFlags::ALLOC, start, size, results);609if (flags & MemBlockFlags::SUB_ALLOC)610suballocMap.Find(MemBlockFlags::SUB_ALLOC, start, size, results);611if (flags & MemBlockFlags::WRITE)612writeMap.Find(MemBlockFlags::WRITE, start, size, results);613if (flags & MemBlockFlags::TEXTURE)614textureMap.Find(MemBlockFlags::TEXTURE, start, size, results);615return results;616}617618static const char *FindWriteTagByFlag(MemBlockFlags flags, uint32_t start, uint32_t size, bool flush = true) {619start = NormalizeAddress(start);620621if (flush) {622if (pendingNotifyMinAddr1 < start + size && pendingNotifyMaxAddr1 >= start)623FlushPendingMemInfo();624if (pendingNotifyMinAddr2 < start + size && pendingNotifyMaxAddr2 >= start)625FlushPendingMemInfo();626}627628if (flags & MemBlockFlags::ALLOC) {629const char *tag = allocMap.FastFindWriteTag(MemBlockFlags::ALLOC, start, size);630if (tag)631return tag;632}633if (flags & MemBlockFlags::SUB_ALLOC) {634const char *tag = suballocMap.FastFindWriteTag(MemBlockFlags::SUB_ALLOC, start, size);635if (tag)636return tag;637}638if (flags & MemBlockFlags::WRITE) {639const char *tag = writeMap.FastFindWriteTag(MemBlockFlags::WRITE, start, size);640if (tag)641return tag;642}643if (flags & MemBlockFlags::TEXTURE) {644const char *tag = textureMap.FastFindWriteTag(MemBlockFlags::TEXTURE, start, size);645if (tag)646return tag;647}648return nullptr;649}650651size_t FormatMemWriteTagAt(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size) {652const char *tag = FindWriteTagByFlag(MemBlockFlags::WRITE, start, size);653if (tag && strcmp(tag, "MemInit") != 0) {654return snprintf(buf, sz, "%s%s", prefix, tag);655}656// Fall back to alloc and texture, especially for VRAM. We prefer write above.657tag = FindWriteTagByFlag(MemBlockFlags::ALLOC | MemBlockFlags::TEXTURE, start, size);658if (tag) {659return snprintf(buf, sz, "%s%s", prefix, tag);660}661return snprintf(buf, sz, "%s%08x_size_%08x", prefix, start, size);662}663664size_t FormatMemWriteTagAtNoFlush(char *buf, size_t sz, const char *prefix, uint32_t start, uint32_t size) {665const char *tag = FindWriteTagByFlag(MemBlockFlags::WRITE, start, size, false);666if (tag && strcmp(tag, "MemInit") != 0) {667return snprintf(buf, sz, "%s%s", prefix, tag);668}669// Fall back to alloc and texture, especially for VRAM. We prefer write above.670tag = FindWriteTagByFlag(MemBlockFlags::ALLOC | MemBlockFlags::TEXTURE, start, size, false);671if (tag) {672return snprintf(buf, sz, "%s%s", prefix, tag);673}674return snprintf(buf, sz, "%s%08x_size_%08x", prefix, start, size);675}676677static void FlushMemInfoThread() {678SetCurrentThreadName("FlushMemInfo");679680while (flushThreadRunning.load()) {681flushThreadPending = false;682FlushPendingMemInfo();683684std::unique_lock<std::mutex> guard(flushLock);685flushCond.wait(guard, [] {686return flushThreadPending.load();687});688}689}690691void MemBlockInfoInit() {692std::lock_guard<std::mutex> guard(pendingReadMutex);693std::lock_guard<std::mutex> guardW(pendingWriteMutex);694pendingNotifies.reserve(MAX_PENDING_NOTIFIES);695pendingNotifyMinAddr1 = 0xFFFFFFFF;696pendingNotifyMaxAddr1 = 0;697pendingNotifyMinAddr2 = 0xFFFFFFFF;698pendingNotifyMaxAddr2 = 0;699700flushThreadRunning = true;701flushThreadPending = false;702flushThread = std::thread(&FlushMemInfoThread);703}704705void MemBlockInfoShutdown() {706{707std::lock_guard<std::mutex> guard(pendingReadMutex);708std::lock_guard<std::mutex> guardW(pendingWriteMutex);709allocMap.Reset();710suballocMap.Reset();711writeMap.Reset();712textureMap.Reset();713pendingNotifies.clear();714}715716if (flushThreadRunning.load()) {717std::lock_guard<std::mutex> guard(flushLock);718flushThreadRunning = false;719flushThreadPending = true;720}721flushCond.notify_one();722flushThread.join();723}724725void MemBlockInfoDoState(PointerWrap &p) {726auto s = p.Section("MemBlockInfo", 0, 1);727if (!s)728return;729730FlushPendingMemInfo();731allocMap.DoState(p);732suballocMap.DoState(p);733writeMap.DoState(p);734textureMap.DoState(p);735}736737// Used by the debugger.738void MemBlockOverrideDetailed() {739detailedOverride++;740}741742void MemBlockReleaseDetailed() {743detailedOverride--;744}745746bool MemBlockInfoDetailed() {747return g_Config.bDebugMemInfoDetailed || detailedOverride != 0;748}749750751