CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/FileLoaders/RamCachingFileLoader.cpp
Views: 1401
1
// Copyright (c) 2015- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <thread>
20
#include <cstring>
21
#include <cstdlib>
22
23
#include "Common/Thread/ThreadUtil.h"
24
#include "Common/TimeUtil.h"
25
#include "Core/FileLoaders/RamCachingFileLoader.h"
26
27
#include "Common/Log.h"
28
29
// Takes ownership of backend.
30
RamCachingFileLoader::RamCachingFileLoader(FileLoader *backend)
31
: ProxiedFileLoader(backend) {
32
filesize_ = backend->FileSize();
33
if (filesize_ > 0) {
34
InitCache();
35
}
36
}
37
38
RamCachingFileLoader::~RamCachingFileLoader() {
39
if (filesize_ > 0) {
40
ShutdownCache();
41
}
42
}
43
44
bool RamCachingFileLoader::Exists() {
45
if (exists_ == -1) {
46
exists_ = ProxiedFileLoader::Exists() ? 1 : 0;
47
}
48
return exists_ == 1;
49
}
50
51
bool RamCachingFileLoader::ExistsFast() {
52
if (exists_ == -1) {
53
return ProxiedFileLoader::ExistsFast();
54
}
55
return exists_ == 1;
56
}
57
58
bool RamCachingFileLoader::IsDirectory() {
59
if (isDirectory_ == -1) {
60
isDirectory_ = ProxiedFileLoader::IsDirectory() ? 1 : 0;
61
}
62
return isDirectory_ == 1;
63
}
64
65
s64 RamCachingFileLoader::FileSize() {
66
return filesize_;
67
}
68
69
size_t RamCachingFileLoader::ReadAt(s64 absolutePos, size_t bytes, void *data, Flags flags) {
70
size_t readSize = 0;
71
if (cache_ == nullptr || (flags & Flags::HINT_UNCACHED) != 0) {
72
readSize = backend_->ReadAt(absolutePos, bytes, data, flags);
73
} else {
74
readSize = ReadFromCache(absolutePos, bytes, data);
75
// While in case the cache size is too small for the entire read.
76
while (readSize < bytes) {
77
SaveIntoCache(absolutePos + readSize, bytes - readSize, flags);
78
size_t bytesFromCache = ReadFromCache(absolutePos + readSize, bytes - readSize, (u8 *)data + readSize);
79
readSize += bytesFromCache;
80
if (bytesFromCache == 0) {
81
// We can't read any more.
82
break;
83
}
84
}
85
86
StartReadAhead(absolutePos + readSize);
87
}
88
return readSize;
89
}
90
91
void RamCachingFileLoader::InitCache() {
92
std::lock_guard<std::mutex> guard(blocksMutex_);
93
u32 blockCount = (u32)((filesize_ + BLOCK_SIZE - 1) >> BLOCK_SHIFT);
94
// Overallocate for the last block.
95
cache_ = (u8 *)malloc((size_t)blockCount << BLOCK_SHIFT);
96
if (cache_ == nullptr) {
97
return;
98
}
99
aheadRemaining_ = blockCount;
100
blocks_.resize(blockCount);
101
}
102
103
void RamCachingFileLoader::ShutdownCache() {
104
Cancel();
105
106
// We can't delete while the thread is running, so have to wait.
107
// This should only happen from the menu.
108
while (aheadThreadRunning_) {
109
sleep_ms(1);
110
}
111
if (aheadThread_.joinable())
112
aheadThread_.join();
113
114
std::lock_guard<std::mutex> guard(blocksMutex_);
115
blocks_.clear();
116
if (cache_ != nullptr) {
117
free(cache_);
118
cache_ = nullptr;
119
}
120
}
121
122
void RamCachingFileLoader::Cancel() {
123
if (aheadThreadRunning_) {
124
std::lock_guard<std::mutex> guard(blocksMutex_);
125
aheadCancel_ = true;
126
}
127
128
ProxiedFileLoader::Cancel();
129
}
130
131
size_t RamCachingFileLoader::ReadFromCache(s64 pos, size_t bytes, void *data) {
132
s64 cacheStartPos = pos >> BLOCK_SHIFT;
133
s64 cacheEndPos = (pos + bytes - 1) >> BLOCK_SHIFT;
134
if ((size_t)cacheEndPos >= blocks_.size()) {
135
cacheEndPos = blocks_.size() - 1;
136
}
137
138
size_t readSize = 0;
139
size_t offset = (size_t)(pos - (cacheStartPos << BLOCK_SHIFT));
140
u8 *p = (u8 *)data;
141
142
// Clamp bytes to what's actually available.
143
if (pos + (s64)bytes > filesize_) {
144
// Should've been caught above, but just in case.
145
if (pos >= filesize_) {
146
return 0;
147
}
148
bytes = (size_t)(filesize_ - pos);
149
}
150
151
std::lock_guard<std::mutex> guard(blocksMutex_);
152
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
153
if (blocks_[(size_t)i] == 0) {
154
return readSize;
155
}
156
157
size_t toRead = std::min(bytes - readSize, (size_t)BLOCK_SIZE - offset);
158
s64 cachePos = (i << BLOCK_SHIFT) + offset;
159
memcpy(p + readSize, &cache_[cachePos], toRead);
160
readSize += toRead;
161
162
// Don't need an offset after the first read.
163
offset = 0;
164
}
165
return readSize;
166
}
167
168
void RamCachingFileLoader::SaveIntoCache(s64 pos, size_t bytes, Flags flags) {
169
s64 cacheStartPos = pos >> BLOCK_SHIFT;
170
s64 cacheEndPos = (pos + bytes - 1) >> BLOCK_SHIFT;
171
if ((size_t)cacheEndPos >= blocks_.size()) {
172
cacheEndPos = blocks_.size() - 1;
173
}
174
175
size_t blocksToRead = 0;
176
{
177
std::lock_guard<std::mutex> guard(blocksMutex_);
178
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
179
if (blocks_[(size_t)i] == 0) {
180
++blocksToRead;
181
if (blocksToRead >= MAX_BLOCKS_PER_READ) {
182
break;
183
}
184
}
185
}
186
}
187
188
s64 cacheFilePos = cacheStartPos << BLOCK_SHIFT;
189
size_t bytesRead = backend_->ReadAt(cacheFilePos, blocksToRead << BLOCK_SHIFT, &cache_[cacheFilePos], flags);
190
191
// In case there was an error, let's not mark blocks that failed to read as read.
192
u32 blocksActuallyRead = (u32)((bytesRead + BLOCK_SIZE - 1) >> BLOCK_SHIFT);
193
{
194
std::lock_guard<std::mutex> guard(blocksMutex_);
195
196
// In case they were simultaneously read.
197
u32 blocksRead = 0;
198
for (size_t i = 0; i < blocksActuallyRead; ++i) {
199
if (blocks_[(size_t)cacheStartPos + i] == 0) {
200
blocks_[(size_t)cacheStartPos + i] = 1;
201
++blocksRead;
202
}
203
}
204
205
if (aheadRemaining_ != 0) {
206
aheadRemaining_ -= blocksRead;
207
}
208
}
209
}
210
211
void RamCachingFileLoader::StartReadAhead(s64 pos) {
212
if (cache_ == nullptr) {
213
return;
214
}
215
216
std::lock_guard<std::mutex> guard(blocksMutex_);
217
aheadPos_ = pos;
218
if (aheadThreadRunning_) {
219
// Already going.
220
return;
221
}
222
223
aheadThreadRunning_ = true;
224
aheadCancel_ = false;
225
if (aheadThread_.joinable())
226
aheadThread_.join();
227
aheadThread_ = std::thread([this] {
228
SetCurrentThreadName("FileLoaderReadAhead");
229
230
AndroidJNIThreadContext jniContext;
231
232
while (aheadRemaining_ != 0 && !aheadCancel_) {
233
// Where should we look?
234
const u32 cacheStartPos = NextAheadBlock();
235
if (cacheStartPos == 0xFFFFFFFF) {
236
// Must be full.
237
break;
238
}
239
u32 cacheEndPos = cacheStartPos + BLOCK_READAHEAD - 1;
240
if (cacheEndPos >= blocks_.size()) {
241
cacheEndPos = (u32)blocks_.size() - 1;
242
}
243
244
for (u32 i = cacheStartPos; i <= cacheEndPos; ++i) {
245
if (blocks_[i] == 0) {
246
SaveIntoCache((u64)i << BLOCK_SHIFT, BLOCK_SIZE * BLOCK_READAHEAD, Flags::NONE);
247
break;
248
}
249
}
250
}
251
252
aheadThreadRunning_ = false;
253
});
254
}
255
256
u32 RamCachingFileLoader::NextAheadBlock() {
257
std::lock_guard<std::mutex> guard(blocksMutex_);
258
259
// If we had an aheadPos_ set, start reading from there and go forward.
260
u32 startFrom = (u32)(aheadPos_ >> BLOCK_SHIFT);
261
// But next time, start from the beginning again.
262
aheadPos_ = 0;
263
264
for (u32 i = startFrom; i < blocks_.size(); ++i) {
265
if (blocks_[i] == 0) {
266
return i;
267
}
268
}
269
270
return 0xFFFFFFFF;
271
}
272
273