CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/FileLoaders/CachingFileLoader.cpp
Views: 1401
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <cstring>
19
#include <thread>
20
#include <algorithm>
21
22
#include "Common/Thread/ThreadUtil.h"
23
#include "Common/TimeUtil.h"
24
#include "Core/FileLoaders/CachingFileLoader.h"
25
26
// Takes ownership of backend.
27
CachingFileLoader::CachingFileLoader(FileLoader *backend)
28
: ProxiedFileLoader(backend) {
29
}
30
31
void CachingFileLoader::Prepare() {
32
std::call_once(preparedFlag_, [this](){
33
filesize_ = ProxiedFileLoader::FileSize();
34
if (filesize_ > 0) {
35
InitCache();
36
}
37
});
38
}
39
40
CachingFileLoader::~CachingFileLoader() {
41
if (filesize_ > 0) {
42
ShutdownCache();
43
}
44
}
45
46
bool CachingFileLoader::Exists() {
47
if (exists_ == -1) {
48
exists_ = ProxiedFileLoader::Exists() ? 1 : 0;
49
}
50
return exists_ == 1;
51
}
52
53
bool CachingFileLoader::ExistsFast() {
54
if (exists_ == -1) {
55
return ProxiedFileLoader::ExistsFast();
56
}
57
return exists_ == 1;
58
}
59
60
bool CachingFileLoader::IsDirectory() {
61
if (isDirectory_ == -1) {
62
isDirectory_ = ProxiedFileLoader::IsDirectory() ? 1 : 0;
63
}
64
return isDirectory_ == 1;
65
}
66
67
s64 CachingFileLoader::FileSize() {
68
Prepare();
69
return filesize_;
70
}
71
72
size_t CachingFileLoader::ReadAt(s64 absolutePos, size_t bytes, void *data, Flags flags) {
73
Prepare();
74
if (absolutePos >= filesize_) {
75
bytes = 0;
76
} else if (absolutePos + (s64)bytes >= filesize_) {
77
bytes = (size_t)(filesize_ - absolutePos);
78
}
79
80
size_t readSize = 0;
81
if ((flags & Flags::HINT_UNCACHED) != 0) {
82
readSize = backend_->ReadAt(absolutePos, bytes, data, flags);
83
} else {
84
readSize = ReadFromCache(absolutePos, bytes, data);
85
// While in case the cache size is too small for the entire read.
86
while (readSize < bytes) {
87
SaveIntoCache(absolutePos + readSize, bytes - readSize, flags);
88
size_t bytesFromCache = ReadFromCache(absolutePos + readSize, bytes - readSize, (u8 *)data + readSize);
89
readSize += bytesFromCache;
90
if (bytesFromCache == 0) {
91
// We can't read any more.
92
break;
93
}
94
}
95
96
StartReadAhead(absolutePos + readSize);
97
}
98
99
return readSize;
100
}
101
102
void CachingFileLoader::InitCache() {
103
cacheSize_ = 0;
104
oldestGeneration_ = 0;
105
generation_ = 0;
106
}
107
108
void CachingFileLoader::ShutdownCache() {
109
// TODO: Maybe add some hint that deletion is coming soon?
110
// We can't delete while the thread is running, so have to wait.
111
// This should only happen from the menu.
112
while (aheadThreadRunning_) {
113
sleep_ms(1);
114
}
115
if (aheadThread_.joinable())
116
aheadThread_.join();
117
118
std::lock_guard<std::recursive_mutex> guard(blocksMutex_);
119
for (auto block : blocks_) {
120
delete [] block.second.ptr;
121
}
122
blocks_.clear();
123
cacheSize_ = 0;
124
}
125
126
size_t CachingFileLoader::ReadFromCache(s64 pos, size_t bytes, void *data) {
127
s64 cacheStartPos = pos >> BLOCK_SHIFT;
128
s64 cacheEndPos = (pos + bytes - 1) >> BLOCK_SHIFT;
129
// TODO: Smarter.
130
size_t readSize = 0;
131
size_t offset = (size_t)(pos - (cacheStartPos << BLOCK_SHIFT));
132
u8 *p = (u8 *)data;
133
134
std::lock_guard<std::recursive_mutex> guard(blocksMutex_);
135
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
136
auto block = blocks_.find(i);
137
if (block == blocks_.end()) {
138
return readSize;
139
}
140
block->second.generation = generation_;
141
142
size_t toRead = std::min(bytes - readSize, (size_t)BLOCK_SIZE - offset);
143
memcpy(p + readSize, block->second.ptr + offset, toRead);
144
readSize += toRead;
145
146
// Don't need an offset after the first read.
147
offset = 0;
148
}
149
return readSize;
150
}
151
152
void CachingFileLoader::SaveIntoCache(s64 pos, size_t bytes, Flags flags, bool readingAhead) {
153
s64 cacheStartPos = pos >> BLOCK_SHIFT;
154
s64 cacheEndPos = (pos + bytes - 1) >> BLOCK_SHIFT;
155
156
std::lock_guard<std::recursive_mutex> guard(blocksMutex_);
157
size_t blocksToRead = 0;
158
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
159
auto block = blocks_.find(i);
160
if (block != blocks_.end()) {
161
break;
162
}
163
++blocksToRead;
164
if (blocksToRead >= MAX_BLOCKS_PER_READ) {
165
break;
166
}
167
}
168
169
if (!MakeCacheSpaceFor(blocksToRead, readingAhead) || blocksToRead == 0) {
170
return;
171
}
172
173
if (blocksToRead == 1) {
174
blocksMutex_.unlock();
175
176
u8 *buf = new u8[BLOCK_SIZE];
177
backend_->ReadAt(cacheStartPos << BLOCK_SHIFT, BLOCK_SIZE, buf, flags);
178
179
blocksMutex_.lock();
180
// While blocksMutex_ was unlocked, another thread may have read.
181
// If so, free the one we just read.
182
if (blocks_.find(cacheStartPos) == blocks_.end()) {
183
blocks_[cacheStartPos] = BlockInfo(buf);
184
} else {
185
delete [] buf;
186
}
187
} else {
188
blocksMutex_.unlock();
189
190
u8 *wholeRead = new u8[blocksToRead << BLOCK_SHIFT];
191
backend_->ReadAt(cacheStartPos << BLOCK_SHIFT, blocksToRead << BLOCK_SHIFT, wholeRead, flags);
192
193
blocksMutex_.lock();
194
for (size_t i = 0; i < blocksToRead; ++i) {
195
if (blocks_.find(cacheStartPos + i) != blocks_.end()) {
196
// Written while we were busy, just skip it. Keep the existing block.
197
continue;
198
}
199
u8 *buf = new u8[BLOCK_SIZE];
200
memcpy(buf, wholeRead + (i << BLOCK_SHIFT), BLOCK_SIZE);
201
blocks_[cacheStartPos + i] = BlockInfo(buf);
202
}
203
delete[] wholeRead;
204
}
205
206
cacheSize_ += blocksToRead;
207
++generation_;
208
}
209
210
bool CachingFileLoader::MakeCacheSpaceFor(size_t blocks, bool readingAhead) {
211
size_t goal = MAX_BLOCKS_CACHED - blocks;
212
213
if (readingAhead && cacheSize_ > goal) {
214
return false;
215
}
216
217
std::lock_guard<std::recursive_mutex> guard(blocksMutex_);
218
while (cacheSize_ > goal) {
219
u64 minGeneration = generation_;
220
221
// We increment the iterator inside because we delete things inside.
222
for (auto it = blocks_.begin(); it != blocks_.end(); ) {
223
// Check for the minimum seen generation.
224
// TODO: Do this smarter?
225
if (it->second.generation != 0 && it->second.generation < minGeneration) {
226
minGeneration = it->second.generation;
227
}
228
229
// 0 means it was never used yet or was the first read (e.g. block descriptor.)
230
if (it->second.generation == oldestGeneration_ || it->second.generation == 0) {
231
s64 pos = it->first;
232
delete it->second.ptr;
233
blocks_.erase(it);
234
--cacheSize_;
235
236
// Our iterator is invalid now. Keep going?
237
if (cacheSize_ > goal) {
238
// This finds the one at that position.
239
it = blocks_.lower_bound(pos);
240
} else {
241
break;
242
}
243
} else {
244
++it;
245
}
246
}
247
248
// If we didn't find any, update to the lowest we did find.
249
oldestGeneration_ = minGeneration;
250
}
251
252
return true;
253
}
254
255
void CachingFileLoader::StartReadAhead(s64 pos) {
256
std::lock_guard<std::recursive_mutex> guard(blocksMutex_);
257
if (aheadThreadRunning_) {
258
// Already going.
259
return;
260
}
261
if (cacheSize_ + BLOCK_READAHEAD > MAX_BLOCKS_CACHED) {
262
// Not enough space to readahead.
263
return;
264
}
265
266
aheadThreadRunning_ = true;
267
if (aheadThread_.joinable())
268
aheadThread_.join();
269
aheadThread_ = std::thread([this, pos] {
270
SetCurrentThreadName("FileLoaderReadAhead");
271
272
AndroidJNIThreadContext jniContext;
273
274
std::unique_lock<std::recursive_mutex> guard(blocksMutex_);
275
s64 cacheStartPos = pos >> BLOCK_SHIFT;
276
s64 cacheEndPos = cacheStartPos + BLOCK_READAHEAD - 1;
277
278
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
279
auto block = blocks_.find(i);
280
if (block == blocks_.end()) {
281
guard.unlock();
282
SaveIntoCache(i << BLOCK_SHIFT, BLOCK_SIZE * BLOCK_READAHEAD, Flags::NONE, true);
283
break;
284
}
285
}
286
287
aheadThreadRunning_ = false;
288
});
289
}
290
291