Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/fs/9p/cache.c
15109 views
1
/*
2
* V9FS cache definitions.
3
*
4
* Copyright (C) 2009 by Abhishek Kulkarni <[email protected]>
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2
8
* as published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to:
17
* Free Software Foundation
18
* 51 Franklin Street, Fifth Floor
19
* Boston, MA 02111-1301 USA
20
*
21
*/
22
23
#include <linux/jiffies.h>
24
#include <linux/file.h>
25
#include <linux/slab.h>
26
#include <linux/stat.h>
27
#include <linux/sched.h>
28
#include <linux/fs.h>
29
#include <net/9p/9p.h>
30
31
#include "v9fs.h"
32
#include "cache.h"
33
34
#define CACHETAG_LEN 11
35
36
struct fscache_netfs v9fs_cache_netfs = {
37
.name = "9p",
38
.version = 0,
39
};
40
41
/**
42
* v9fs_random_cachetag - Generate a random tag to be associated
43
* with a new cache session.
44
*
45
* The value of jiffies is used for a fairly randomly cache tag.
46
*/
47
48
static
49
int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
50
{
51
v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
52
if (!v9ses->cachetag)
53
return -ENOMEM;
54
55
return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
56
}
57
58
static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
59
void *buffer, uint16_t bufmax)
60
{
61
struct v9fs_session_info *v9ses;
62
uint16_t klen = 0;
63
64
v9ses = (struct v9fs_session_info *)cookie_netfs_data;
65
P9_DPRINTK(P9_DEBUG_FSC, "session %p buf %p size %u", v9ses,
66
buffer, bufmax);
67
68
if (v9ses->cachetag)
69
klen = strlen(v9ses->cachetag);
70
71
if (klen > bufmax)
72
return 0;
73
74
memcpy(buffer, v9ses->cachetag, klen);
75
P9_DPRINTK(P9_DEBUG_FSC, "cache session tag %s", v9ses->cachetag);
76
return klen;
77
}
78
79
const struct fscache_cookie_def v9fs_cache_session_index_def = {
80
.name = "9P.session",
81
.type = FSCACHE_COOKIE_TYPE_INDEX,
82
.get_key = v9fs_cache_session_get_key,
83
};
84
85
void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
86
{
87
/* If no cache session tag was specified, we generate a random one. */
88
if (!v9ses->cachetag)
89
v9fs_random_cachetag(v9ses);
90
91
v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
92
&v9fs_cache_session_index_def,
93
v9ses);
94
P9_DPRINTK(P9_DEBUG_FSC, "session %p get cookie %p", v9ses,
95
v9ses->fscache);
96
}
97
98
void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
99
{
100
P9_DPRINTK(P9_DEBUG_FSC, "session %p put cookie %p", v9ses,
101
v9ses->fscache);
102
fscache_relinquish_cookie(v9ses->fscache, 0);
103
v9ses->fscache = NULL;
104
}
105
106
107
static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108
void *buffer, uint16_t bufmax)
109
{
110
const struct v9fs_inode *v9inode = cookie_netfs_data;
111
memcpy(buffer, &v9inode->fscache_key->path,
112
sizeof(v9inode->fscache_key->path));
113
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode,
114
v9inode->fscache_key->path);
115
return sizeof(v9inode->fscache_key->path);
116
}
117
118
static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
119
uint64_t *size)
120
{
121
const struct v9fs_inode *v9inode = cookie_netfs_data;
122
*size = i_size_read(&v9inode->vfs_inode);
123
124
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &v9inode->vfs_inode,
125
*size);
126
}
127
128
static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
129
void *buffer, uint16_t buflen)
130
{
131
const struct v9fs_inode *v9inode = cookie_netfs_data;
132
memcpy(buffer, &v9inode->fscache_key->version,
133
sizeof(v9inode->fscache_key->version));
134
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode,
135
v9inode->fscache_key->version);
136
return sizeof(v9inode->fscache_key->version);
137
}
138
139
static enum
140
fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
141
const void *buffer,
142
uint16_t buflen)
143
{
144
const struct v9fs_inode *v9inode = cookie_netfs_data;
145
146
if (buflen != sizeof(v9inode->fscache_key->version))
147
return FSCACHE_CHECKAUX_OBSOLETE;
148
149
if (memcmp(buffer, &v9inode->fscache_key->version,
150
sizeof(v9inode->fscache_key->version)))
151
return FSCACHE_CHECKAUX_OBSOLETE;
152
153
return FSCACHE_CHECKAUX_OKAY;
154
}
155
156
static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
157
{
158
struct v9fs_inode *v9inode = cookie_netfs_data;
159
struct pagevec pvec;
160
pgoff_t first;
161
int loop, nr_pages;
162
163
pagevec_init(&pvec, 0);
164
first = 0;
165
166
for (;;) {
167
nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
168
first,
169
PAGEVEC_SIZE - pagevec_count(&pvec));
170
if (!nr_pages)
171
break;
172
173
for (loop = 0; loop < nr_pages; loop++)
174
ClearPageFsCache(pvec.pages[loop]);
175
176
first = pvec.pages[nr_pages - 1]->index + 1;
177
178
pvec.nr = nr_pages;
179
pagevec_release(&pvec);
180
cond_resched();
181
}
182
}
183
184
const struct fscache_cookie_def v9fs_cache_inode_index_def = {
185
.name = "9p.inode",
186
.type = FSCACHE_COOKIE_TYPE_DATAFILE,
187
.get_key = v9fs_cache_inode_get_key,
188
.get_attr = v9fs_cache_inode_get_attr,
189
.get_aux = v9fs_cache_inode_get_aux,
190
.check_aux = v9fs_cache_inode_check_aux,
191
.now_uncached = v9fs_cache_inode_now_uncached,
192
};
193
194
void v9fs_cache_inode_get_cookie(struct inode *inode)
195
{
196
struct v9fs_inode *v9inode;
197
struct v9fs_session_info *v9ses;
198
199
if (!S_ISREG(inode->i_mode))
200
return;
201
202
v9inode = V9FS_I(inode);
203
if (v9inode->fscache)
204
return;
205
206
v9ses = v9fs_inode2v9ses(inode);
207
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
208
&v9fs_cache_inode_index_def,
209
v9inode);
210
211
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode,
212
v9inode->fscache);
213
}
214
215
void v9fs_cache_inode_put_cookie(struct inode *inode)
216
{
217
struct v9fs_inode *v9inode = V9FS_I(inode);
218
219
if (!v9inode->fscache)
220
return;
221
P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode,
222
v9inode->fscache);
223
224
fscache_relinquish_cookie(v9inode->fscache, 0);
225
v9inode->fscache = NULL;
226
}
227
228
void v9fs_cache_inode_flush_cookie(struct inode *inode)
229
{
230
struct v9fs_inode *v9inode = V9FS_I(inode);
231
232
if (!v9inode->fscache)
233
return;
234
P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode,
235
v9inode->fscache);
236
237
fscache_relinquish_cookie(v9inode->fscache, 1);
238
v9inode->fscache = NULL;
239
}
240
241
void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
242
{
243
struct v9fs_inode *v9inode = V9FS_I(inode);
244
struct p9_fid *fid;
245
246
if (!v9inode->fscache)
247
return;
248
249
spin_lock(&v9inode->fscache_lock);
250
fid = filp->private_data;
251
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
252
v9fs_cache_inode_flush_cookie(inode);
253
else
254
v9fs_cache_inode_get_cookie(inode);
255
256
spin_unlock(&v9inode->fscache_lock);
257
}
258
259
void v9fs_cache_inode_reset_cookie(struct inode *inode)
260
{
261
struct v9fs_inode *v9inode = V9FS_I(inode);
262
struct v9fs_session_info *v9ses;
263
struct fscache_cookie *old;
264
265
if (!v9inode->fscache)
266
return;
267
268
old = v9inode->fscache;
269
270
spin_lock(&v9inode->fscache_lock);
271
fscache_relinquish_cookie(v9inode->fscache, 1);
272
273
v9ses = v9fs_inode2v9ses(inode);
274
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
275
&v9fs_cache_inode_index_def,
276
v9inode);
277
P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p",
278
inode, old, v9inode->fscache);
279
280
spin_unlock(&v9inode->fscache_lock);
281
}
282
283
int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
284
{
285
struct inode *inode = page->mapping->host;
286
struct v9fs_inode *v9inode = V9FS_I(inode);
287
288
BUG_ON(!v9inode->fscache);
289
290
return fscache_maybe_release_page(v9inode->fscache, page, gfp);
291
}
292
293
void __v9fs_fscache_invalidate_page(struct page *page)
294
{
295
struct inode *inode = page->mapping->host;
296
struct v9fs_inode *v9inode = V9FS_I(inode);
297
298
BUG_ON(!v9inode->fscache);
299
300
if (PageFsCache(page)) {
301
fscache_wait_on_page_write(v9inode->fscache, page);
302
BUG_ON(!PageLocked(page));
303
fscache_uncache_page(v9inode->fscache, page);
304
}
305
}
306
307
static void v9fs_vfs_readpage_complete(struct page *page, void *data,
308
int error)
309
{
310
if (!error)
311
SetPageUptodate(page);
312
313
unlock_page(page);
314
}
315
316
/**
317
* __v9fs_readpage_from_fscache - read a page from cache
318
*
319
* Returns 0 if the pages are in cache and a BIO is submitted,
320
* 1 if the pages are not in cache and -error otherwise.
321
*/
322
323
int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
324
{
325
int ret;
326
const struct v9fs_inode *v9inode = V9FS_I(inode);
327
328
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
329
if (!v9inode->fscache)
330
return -ENOBUFS;
331
332
ret = fscache_read_or_alloc_page(v9inode->fscache,
333
page,
334
v9fs_vfs_readpage_complete,
335
NULL,
336
GFP_KERNEL);
337
switch (ret) {
338
case -ENOBUFS:
339
case -ENODATA:
340
P9_DPRINTK(P9_DEBUG_FSC, "page/inode not in cache %d", ret);
341
return 1;
342
case 0:
343
P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
344
return ret;
345
default:
346
P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
347
return ret;
348
}
349
}
350
351
/**
352
* __v9fs_readpages_from_fscache - read multiple pages from cache
353
*
354
* Returns 0 if the pages are in cache and a BIO is submitted,
355
* 1 if the pages are not in cache and -error otherwise.
356
*/
357
358
int __v9fs_readpages_from_fscache(struct inode *inode,
359
struct address_space *mapping,
360
struct list_head *pages,
361
unsigned *nr_pages)
362
{
363
int ret;
364
const struct v9fs_inode *v9inode = V9FS_I(inode);
365
366
P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages);
367
if (!v9inode->fscache)
368
return -ENOBUFS;
369
370
ret = fscache_read_or_alloc_pages(v9inode->fscache,
371
mapping, pages, nr_pages,
372
v9fs_vfs_readpage_complete,
373
NULL,
374
mapping_gfp_mask(mapping));
375
switch (ret) {
376
case -ENOBUFS:
377
case -ENODATA:
378
P9_DPRINTK(P9_DEBUG_FSC, "pages/inodes not in cache %d", ret);
379
return 1;
380
case 0:
381
BUG_ON(!list_empty(pages));
382
BUG_ON(*nr_pages != 0);
383
P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
384
return ret;
385
default:
386
P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
387
return ret;
388
}
389
}
390
391
/**
392
* __v9fs_readpage_to_fscache - write a page to the cache
393
*
394
*/
395
396
void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
397
{
398
int ret;
399
const struct v9fs_inode *v9inode = V9FS_I(inode);
400
401
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
402
ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
403
P9_DPRINTK(P9_DEBUG_FSC, "ret = %d", ret);
404
if (ret != 0)
405
v9fs_uncache_page(inode, page);
406
}
407
408
/*
409
* wait for a page to complete writing to the cache
410
*/
411
void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
412
{
413
const struct v9fs_inode *v9inode = V9FS_I(inode);
414
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
415
if (PageFsCache(page))
416
fscache_wait_on_page_write(v9inode->fscache, page);
417
}
418
419