Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/tests/sys/fs/fusefs/read.cc
39537 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2019 The FreeBSD Foundation
5
*
6
* This software was developed by BFF Storage Systems, LLC under sponsorship
7
* from the FreeBSD Foundation.
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice, this list of conditions and the following disclaimer.
14
* 2. Redistributions in binary form must reproduce the above copyright
15
* notice, this list of conditions and the following disclaimer in the
16
* documentation and/or other materials provided with the distribution.
17
*
18
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
* SUCH DAMAGE.
29
*/
30
31
extern "C" {
32
#include <sys/param.h>
33
#include <sys/mman.h>
34
#include <sys/socket.h>
35
#include <sys/sysctl.h>
36
#include <sys/uio.h>
37
38
#include <aio.h>
39
#include <fcntl.h>
40
#include <semaphore.h>
41
#include <setjmp.h>
42
#include <signal.h>
43
#include <unistd.h>
44
}
45
46
#include "mockfs.hh"
47
#include "utils.hh"
48
49
using namespace testing;
50
51
class Read: public FuseTest {
52
53
public:
54
void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
55
{
56
FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
57
}
58
};
59
60
class RofsRead: public Read {
61
public:
62
virtual void SetUp() {
63
m_ro = true;
64
Read::SetUp();
65
}
66
};
67
68
class Read_7_8: public FuseTest {
69
public:
70
virtual void SetUp() {
71
m_kernel_minor_version = 8;
72
FuseTest::SetUp();
73
}
74
75
void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
76
{
77
FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
78
}
79
};
80
81
class AioRead: public Read {
82
public:
83
virtual void SetUp() {
84
if (!is_unsafe_aio_enabled())
85
GTEST_SKIP() <<
86
"vfs.aio.enable_unsafe must be set for this test";
87
FuseTest::SetUp();
88
}
89
};
90
91
class AsyncRead: public AioRead {
92
virtual void SetUp() {
93
m_init_flags = FUSE_ASYNC_READ;
94
AioRead::SetUp();
95
}
96
};
97
98
class ReadAhead: public Read,
99
public WithParamInterface<tuple<bool, int>>
100
{
101
virtual void SetUp() {
102
int val;
103
const char *node = "vfs.maxbcachebuf";
104
size_t size = sizeof(val);
105
ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
106
<< strerror(errno);
107
108
m_maxreadahead = val * get<1>(GetParam());
109
m_noclusterr = get<0>(GetParam());
110
Read::SetUp();
111
}
112
};
113
114
class ReadMaxRead: public Read {
115
virtual void SetUp() {
116
m_maxread = 16384;
117
Read::SetUp();
118
}
119
};
120
121
class ReadNoatime: public Read {
122
virtual void SetUp() {
123
m_noatime = true;
124
Read::SetUp();
125
}
126
};
127
128
class ReadSigbus: public Read
129
{
130
public:
131
static jmp_buf s_jmpbuf;
132
static void *s_si_addr;
133
134
void TearDown() {
135
struct sigaction sa;
136
137
bzero(&sa, sizeof(sa));
138
sa.sa_handler = SIG_DFL;
139
sigaction(SIGBUS, &sa, NULL);
140
141
FuseTest::TearDown();
142
}
143
144
};
145
146
static void
147
handle_sigbus(int signo __unused, siginfo_t *info, void *uap __unused) {
148
ReadSigbus::s_si_addr = info->si_addr;
149
longjmp(ReadSigbus::s_jmpbuf, 1);
150
}
151
152
jmp_buf ReadSigbus::s_jmpbuf;
153
void *ReadSigbus::s_si_addr;
154
155
class TimeGran: public Read, public WithParamInterface<unsigned> {
156
public:
157
virtual void SetUp() {
158
m_time_gran = 1 << GetParam();
159
Read::SetUp();
160
}
161
};
162
163
/* AIO reads need to set the header's pid field correctly */
164
/* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
165
TEST_F(AioRead, aio_read)
166
{
167
const char FULLPATH[] = "mountpoint/some_file.txt";
168
const char RELPATH[] = "some_file.txt";
169
const char *CONTENTS = "abcdefgh";
170
uint64_t ino = 42;
171
int fd;
172
ssize_t bufsize = strlen(CONTENTS);
173
uint8_t buf[bufsize];
174
struct aiocb iocb, *piocb;
175
176
expect_lookup(RELPATH, ino, bufsize);
177
expect_open(ino, 0, 1);
178
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
179
180
fd = open(FULLPATH, O_RDONLY);
181
ASSERT_LE(0, fd) << strerror(errno);
182
183
iocb.aio_nbytes = bufsize;
184
iocb.aio_fildes = fd;
185
iocb.aio_buf = buf;
186
iocb.aio_offset = 0;
187
iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
188
ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
189
ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
190
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
191
192
leak(fd);
193
}
194
195
/*
196
* Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
197
* is at most one outstanding read operation per file handle
198
*/
199
TEST_F(AioRead, async_read_disabled)
200
{
201
const char FULLPATH[] = "mountpoint/some_file.txt";
202
const char RELPATH[] = "some_file.txt";
203
uint64_t ino = 42;
204
int fd;
205
ssize_t bufsize = 50;
206
char buf0[bufsize], buf1[bufsize];
207
off_t off0 = 0;
208
off_t off1 = m_maxbcachebuf;
209
struct aiocb iocb0, iocb1;
210
volatile sig_atomic_t read_count = 0;
211
212
expect_lookup(RELPATH, ino, 131072);
213
expect_open(ino, 0, 1);
214
EXPECT_CALL(*m_mock, process(
215
ResultOf([=](auto in) {
216
return (in.header.opcode == FUSE_READ &&
217
in.header.nodeid == ino &&
218
in.body.read.fh == FH &&
219
in.body.read.offset == (uint64_t)off0);
220
}, Eq(true)),
221
_)
222
).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
223
read_count++;
224
/* Filesystem is slow to respond */
225
}));
226
EXPECT_CALL(*m_mock, process(
227
ResultOf([=](auto in) {
228
return (in.header.opcode == FUSE_READ &&
229
in.header.nodeid == ino &&
230
in.body.read.fh == FH &&
231
in.body.read.offset == (uint64_t)off1);
232
}, Eq(true)),
233
_)
234
).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
235
read_count++;
236
/* Filesystem is slow to respond */
237
}));
238
239
fd = open(FULLPATH, O_RDONLY);
240
ASSERT_LE(0, fd) << strerror(errno);
241
242
/*
243
* Submit two AIO read requests, and respond to neither. If the
244
* filesystem ever gets the second read request, then we failed to
245
* limit outstanding reads.
246
*/
247
iocb0.aio_nbytes = bufsize;
248
iocb0.aio_fildes = fd;
249
iocb0.aio_buf = buf0;
250
iocb0.aio_offset = off0;
251
iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
252
ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
253
254
iocb1.aio_nbytes = bufsize;
255
iocb1.aio_fildes = fd;
256
iocb1.aio_buf = buf1;
257
iocb1.aio_offset = off1;
258
iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
259
ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
260
261
/*
262
* Sleep for awhile to make sure the kernel has had a chance to issue
263
* the second read, even though the first has not yet returned
264
*/
265
nap();
266
EXPECT_EQ(read_count, 1);
267
268
m_mock->kill_daemon();
269
/* Wait for AIO activity to complete, but ignore errors */
270
(void)aio_waitcomplete(NULL, NULL);
271
272
leak(fd);
273
}
274
275
/*
276
* With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
277
* simultaneous read requests on the same file handle.
278
*/
279
TEST_F(AsyncRead, async_read)
280
{
281
const char FULLPATH[] = "mountpoint/some_file.txt";
282
const char RELPATH[] = "some_file.txt";
283
uint64_t ino = 42;
284
int fd;
285
ssize_t bufsize = 50;
286
char buf0[bufsize], buf1[bufsize];
287
off_t off0 = 0;
288
off_t off1 = m_maxbcachebuf;
289
off_t fsize = 2 * m_maxbcachebuf;
290
struct aiocb iocb0, iocb1;
291
sem_t sem;
292
293
ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
294
295
expect_lookup(RELPATH, ino, fsize);
296
expect_open(ino, 0, 1);
297
EXPECT_CALL(*m_mock, process(
298
ResultOf([=](auto in) {
299
return (in.header.opcode == FUSE_READ &&
300
in.header.nodeid == ino &&
301
in.body.read.fh == FH &&
302
in.body.read.offset == (uint64_t)off0);
303
}, Eq(true)),
304
_)
305
).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
306
sem_post(&sem);
307
/* Filesystem is slow to respond */
308
}));
309
EXPECT_CALL(*m_mock, process(
310
ResultOf([=](auto in) {
311
return (in.header.opcode == FUSE_READ &&
312
in.header.nodeid == ino &&
313
in.body.read.fh == FH &&
314
in.body.read.offset == (uint64_t)off1);
315
}, Eq(true)),
316
_)
317
).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
318
sem_post(&sem);
319
/* Filesystem is slow to respond */
320
}));
321
322
fd = open(FULLPATH, O_RDONLY);
323
ASSERT_LE(0, fd) << strerror(errno);
324
325
/*
326
* Submit two AIO read requests, but respond to neither. Ensure that
327
* we received both.
328
*/
329
iocb0.aio_nbytes = bufsize;
330
iocb0.aio_fildes = fd;
331
iocb0.aio_buf = buf0;
332
iocb0.aio_offset = off0;
333
iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
334
ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
335
336
iocb1.aio_nbytes = bufsize;
337
iocb1.aio_fildes = fd;
338
iocb1.aio_buf = buf1;
339
iocb1.aio_offset = off1;
340
iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
341
ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
342
343
/* Wait until both reads have reached the daemon */
344
ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
345
ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
346
347
m_mock->kill_daemon();
348
/* Wait for AIO activity to complete, but ignore errors */
349
(void)aio_waitcomplete(NULL, NULL);
350
351
leak(fd);
352
}
353
354
/* The kernel should update the cached atime attribute during a read */
355
TEST_F(Read, atime)
356
{
357
const char FULLPATH[] = "mountpoint/some_file.txt";
358
const char RELPATH[] = "some_file.txt";
359
const char *CONTENTS = "abcdefgh";
360
struct stat sb1, sb2;
361
uint64_t ino = 42;
362
int fd;
363
ssize_t bufsize = strlen(CONTENTS);
364
uint8_t buf[bufsize];
365
366
expect_lookup(RELPATH, ino, bufsize);
367
expect_open(ino, 0, 1);
368
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
369
370
fd = open(FULLPATH, O_RDONLY);
371
ASSERT_LE(0, fd) << strerror(errno);
372
ASSERT_EQ(0, fstat(fd, &sb1));
373
374
/* Ensure atime will be different than it was during lookup */
375
nap();
376
377
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
378
ASSERT_EQ(0, fstat(fd, &sb2));
379
380
/* The kernel should automatically update atime during read */
381
EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
382
EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
383
EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
384
385
leak(fd);
386
}
387
388
/* The kernel should update the cached atime attribute during a cached read */
389
TEST_F(Read, atime_cached)
390
{
391
const char FULLPATH[] = "mountpoint/some_file.txt";
392
const char RELPATH[] = "some_file.txt";
393
const char *CONTENTS = "abcdefgh";
394
struct stat sb1, sb2;
395
uint64_t ino = 42;
396
int fd;
397
ssize_t bufsize = strlen(CONTENTS);
398
uint8_t buf[bufsize];
399
400
expect_lookup(RELPATH, ino, bufsize);
401
expect_open(ino, 0, 1);
402
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
403
404
fd = open(FULLPATH, O_RDONLY);
405
ASSERT_LE(0, fd) << strerror(errno);
406
407
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
408
ASSERT_EQ(0, fstat(fd, &sb1));
409
410
/* Ensure atime will be different than it was during the first read */
411
nap();
412
413
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
414
ASSERT_EQ(0, fstat(fd, &sb2));
415
416
/* The kernel should automatically update atime during read */
417
EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
418
EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
419
EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
420
421
leak(fd);
422
}
423
424
/* dirty atime values should be flushed during close */
425
TEST_F(Read, atime_during_close)
426
{
427
const char FULLPATH[] = "mountpoint/some_file.txt";
428
const char RELPATH[] = "some_file.txt";
429
const char *CONTENTS = "abcdefgh";
430
struct stat sb;
431
uint64_t ino = 42;
432
const mode_t newmode = 0755;
433
int fd;
434
ssize_t bufsize = strlen(CONTENTS);
435
uint8_t buf[bufsize];
436
437
expect_lookup(RELPATH, ino, bufsize);
438
expect_open(ino, 0, 1);
439
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
440
EXPECT_CALL(*m_mock, process(
441
ResultOf([&](auto in) {
442
uint32_t valid = FATTR_ATIME;
443
return (in.header.opcode == FUSE_SETATTR &&
444
in.header.nodeid == ino &&
445
in.body.setattr.valid == valid &&
446
(time_t)in.body.setattr.atime ==
447
sb.st_atim.tv_sec &&
448
(long)in.body.setattr.atimensec ==
449
sb.st_atim.tv_nsec);
450
}, Eq(true)),
451
_)
452
).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
453
SET_OUT_HEADER_LEN(out, attr);
454
out.body.attr.attr.ino = ino;
455
out.body.attr.attr.mode = S_IFREG | newmode;
456
})));
457
expect_flush(ino, 1, ReturnErrno(0));
458
expect_release(ino, FuseTest::FH);
459
460
fd = open(FULLPATH, O_RDONLY);
461
ASSERT_LE(0, fd) << strerror(errno);
462
463
/* Ensure atime will be different than during lookup */
464
nap();
465
466
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
467
ASSERT_EQ(0, fstat(fd, &sb));
468
469
close(fd);
470
}
471
472
/*
473
* When not using -o default_permissions, the daemon may make its own decisions
474
* regarding access permissions, and these may be unpredictable. If it rejects
475
* our attempt to set atime, that should not cause close(2) to fail.
476
*/
477
TEST_F(Read, atime_during_close_eacces)
478
{
479
const char FULLPATH[] = "mountpoint/some_file.txt";
480
const char RELPATH[] = "some_file.txt";
481
const char *CONTENTS = "abcdefgh";
482
uint64_t ino = 42;
483
int fd;
484
ssize_t bufsize = strlen(CONTENTS);
485
uint8_t buf[bufsize];
486
487
expect_lookup(RELPATH, ino, bufsize);
488
expect_open(ino, 0, 1);
489
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
490
EXPECT_CALL(*m_mock, process(
491
ResultOf([&](auto in) {
492
uint32_t valid = FATTR_ATIME;
493
return (in.header.opcode == FUSE_SETATTR &&
494
in.header.nodeid == ino &&
495
in.body.setattr.valid == valid);
496
}, Eq(true)),
497
_)
498
).WillOnce(Invoke(ReturnErrno(EACCES)));
499
expect_flush(ino, 1, ReturnErrno(0));
500
expect_release(ino, FuseTest::FH);
501
502
fd = open(FULLPATH, O_RDONLY);
503
ASSERT_LE(0, fd) << strerror(errno);
504
505
/* Ensure atime will be different than during lookup */
506
nap();
507
508
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
509
510
ASSERT_EQ(0, close(fd));
511
}
512
513
/* A cached atime should be flushed during FUSE_SETATTR */
514
TEST_F(Read, atime_during_setattr)
515
{
516
const char FULLPATH[] = "mountpoint/some_file.txt";
517
const char RELPATH[] = "some_file.txt";
518
const char *CONTENTS = "abcdefgh";
519
struct stat sb;
520
uint64_t ino = 42;
521
const mode_t newmode = 0755;
522
int fd;
523
ssize_t bufsize = strlen(CONTENTS);
524
uint8_t buf[bufsize];
525
526
expect_lookup(RELPATH, ino, bufsize);
527
expect_open(ino, 0, 1);
528
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
529
EXPECT_CALL(*m_mock, process(
530
ResultOf([&](auto in) {
531
uint32_t valid = FATTR_MODE | FATTR_ATIME;
532
return (in.header.opcode == FUSE_SETATTR &&
533
in.header.nodeid == ino &&
534
in.body.setattr.valid == valid &&
535
(time_t)in.body.setattr.atime ==
536
sb.st_atim.tv_sec &&
537
(long)in.body.setattr.atimensec ==
538
sb.st_atim.tv_nsec);
539
}, Eq(true)),
540
_)
541
).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
542
SET_OUT_HEADER_LEN(out, attr);
543
out.body.attr.attr.ino = ino;
544
out.body.attr.attr.mode = S_IFREG | newmode;
545
})));
546
547
fd = open(FULLPATH, O_RDONLY);
548
ASSERT_LE(0, fd) << strerror(errno);
549
550
/* Ensure atime will be different than during lookup */
551
nap();
552
553
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
554
ASSERT_EQ(0, fstat(fd, &sb));
555
ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
556
557
leak(fd);
558
}
559
560
/* 0-length reads shouldn't cause any confusion */
561
TEST_F(Read, direct_io_read_nothing)
562
{
563
const char FULLPATH[] = "mountpoint/some_file.txt";
564
const char RELPATH[] = "some_file.txt";
565
uint64_t ino = 42;
566
int fd;
567
uint64_t offset = 100;
568
char buf[80];
569
570
expect_lookup(RELPATH, ino, offset + 1000);
571
expect_open(ino, FOPEN_DIRECT_IO, 1);
572
573
fd = open(FULLPATH, O_RDONLY);
574
ASSERT_LE(0, fd) << strerror(errno);
575
576
ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
577
leak(fd);
578
}
579
580
/*
581
* With direct_io, reads should not fill the cache. They should go straight to
582
* the daemon
583
*/
584
TEST_F(Read, direct_io_pread)
585
{
586
const char FULLPATH[] = "mountpoint/some_file.txt";
587
const char RELPATH[] = "some_file.txt";
588
const char *CONTENTS = "abcdefgh";
589
uint64_t ino = 42;
590
int fd;
591
uint64_t offset = 100;
592
ssize_t bufsize = strlen(CONTENTS);
593
uint8_t buf[bufsize];
594
595
expect_lookup(RELPATH, ino, offset + bufsize);
596
expect_open(ino, FOPEN_DIRECT_IO, 1);
597
expect_read(ino, offset, bufsize, bufsize, CONTENTS);
598
599
fd = open(FULLPATH, O_RDONLY);
600
ASSERT_LE(0, fd) << strerror(errno);
601
602
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
603
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
604
605
// With FOPEN_DIRECT_IO, the cache should be bypassed. The server will
606
// get a 2nd read request.
607
expect_read(ino, offset, bufsize, bufsize, CONTENTS);
608
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
609
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
610
leak(fd);
611
}
612
613
/*
614
* With direct_io, filesystems are allowed to return less data than is
615
* requested. fuse(4) should return a short read to userland.
616
*/
617
TEST_F(Read, direct_io_short_read)
618
{
619
const char FULLPATH[] = "mountpoint/some_file.txt";
620
const char RELPATH[] = "some_file.txt";
621
const char *CONTENTS = "abcdefghijklmnop";
622
uint64_t ino = 42;
623
int fd;
624
uint64_t offset = 100;
625
ssize_t bufsize = strlen(CONTENTS);
626
ssize_t halfbufsize = bufsize / 2;
627
uint8_t buf[bufsize];
628
629
expect_lookup(RELPATH, ino, offset + bufsize);
630
expect_open(ino, FOPEN_DIRECT_IO, 1);
631
expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
632
633
fd = open(FULLPATH, O_RDONLY);
634
ASSERT_LE(0, fd) << strerror(errno);
635
636
ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
637
<< strerror(errno);
638
ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
639
leak(fd);
640
}
641
642
TEST_F(Read, eio)
643
{
644
const char FULLPATH[] = "mountpoint/some_file.txt";
645
const char RELPATH[] = "some_file.txt";
646
const char *CONTENTS = "abcdefgh";
647
uint64_t ino = 42;
648
int fd;
649
ssize_t bufsize = strlen(CONTENTS);
650
uint8_t buf[bufsize];
651
652
expect_lookup(RELPATH, ino, bufsize);
653
expect_open(ino, 0, 1);
654
EXPECT_CALL(*m_mock, process(
655
ResultOf([=](auto in) {
656
return (in.header.opcode == FUSE_READ);
657
}, Eq(true)),
658
_)
659
).WillOnce(Invoke(ReturnErrno(EIO)));
660
661
fd = open(FULLPATH, O_RDONLY);
662
ASSERT_LE(0, fd) << strerror(errno);
663
664
ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
665
ASSERT_EQ(EIO, errno);
666
leak(fd);
667
}
668
669
/*
670
* If the server returns a short read when direct io is not in use, that
671
* indicates EOF, because of a server-side truncation. We should invalidate
672
* all cached attributes. We may update the file size,
673
*/
674
TEST_F(Read, eof)
675
{
676
const char FULLPATH[] = "mountpoint/some_file.txt";
677
const char RELPATH[] = "some_file.txt";
678
const char *CONTENTS = "abcdefghijklmnop";
679
uint64_t ino = 42;
680
int fd;
681
uint64_t offset = 100;
682
ssize_t bufsize = strlen(CONTENTS);
683
ssize_t partbufsize = 3 * bufsize / 4;
684
ssize_t r;
685
uint8_t buf[bufsize];
686
struct stat sb;
687
688
expect_lookup(RELPATH, ino, offset + bufsize);
689
expect_open(ino, 0, 1);
690
expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
691
expect_getattr(ino, offset + partbufsize);
692
693
fd = open(FULLPATH, O_RDONLY);
694
ASSERT_LE(0, fd) << strerror(errno);
695
696
r = pread(fd, buf, bufsize, offset);
697
ASSERT_LE(0, r) << strerror(errno);
698
EXPECT_EQ(partbufsize, r) << strerror(errno);
699
ASSERT_EQ(0, fstat(fd, &sb));
700
EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
701
leak(fd);
702
}
703
704
/* Like Read.eof, but causes an entire buffer to be invalidated */
705
TEST_F(Read, eof_of_whole_buffer)
706
{
707
const char FULLPATH[] = "mountpoint/some_file.txt";
708
const char RELPATH[] = "some_file.txt";
709
const char *CONTENTS = "abcdefghijklmnop";
710
uint64_t ino = 42;
711
int fd;
712
ssize_t bufsize = strlen(CONTENTS);
713
off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
714
uint8_t buf[bufsize];
715
struct stat sb;
716
717
expect_lookup(RELPATH, ino, old_filesize);
718
expect_open(ino, 0, 1);
719
expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
720
expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
721
expect_getattr(ino, m_maxbcachebuf);
722
723
fd = open(FULLPATH, O_RDONLY);
724
ASSERT_LE(0, fd) << strerror(errno);
725
726
/* Cache the third block */
727
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
728
<< strerror(errno);
729
/* Try to read the 2nd block, but it's past EOF */
730
ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
731
<< strerror(errno);
732
ASSERT_EQ(0, fstat(fd, &sb));
733
EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
734
leak(fd);
735
}
736
737
/*
738
* With the keep_cache option, the kernel may keep its read cache across
739
* multiple open(2)s.
740
*/
741
TEST_F(Read, keep_cache)
742
{
743
const char FULLPATH[] = "mountpoint/some_file.txt";
744
const char RELPATH[] = "some_file.txt";
745
const char *CONTENTS = "abcdefgh";
746
uint64_t ino = 42;
747
int fd0, fd1;
748
ssize_t bufsize = strlen(CONTENTS);
749
uint8_t buf[bufsize];
750
751
FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
752
expect_open(ino, FOPEN_KEEP_CACHE, 2);
753
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
754
755
fd0 = open(FULLPATH, O_RDONLY);
756
ASSERT_LE(0, fd0) << strerror(errno);
757
ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
758
759
fd1 = open(FULLPATH, O_RDWR);
760
ASSERT_LE(0, fd1) << strerror(errno);
761
762
/*
763
* This read should be serviced by cache, even though it's on the other
764
* file descriptor
765
*/
766
ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
767
768
leak(fd0);
769
leak(fd1);
770
}
771
772
/*
773
* Without the keep_cache option, the kernel should drop its read caches on
774
* every open
775
*/
776
TEST_F(Read, keep_cache_disabled)
777
{
778
const char FULLPATH[] = "mountpoint/some_file.txt";
779
const char RELPATH[] = "some_file.txt";
780
const char *CONTENTS = "abcdefgh";
781
uint64_t ino = 42;
782
int fd0, fd1;
783
ssize_t bufsize = strlen(CONTENTS);
784
uint8_t buf[bufsize];
785
786
FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
787
expect_open(ino, 0, 2);
788
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
789
790
fd0 = open(FULLPATH, O_RDONLY);
791
ASSERT_LE(0, fd0) << strerror(errno);
792
ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
793
794
fd1 = open(FULLPATH, O_RDWR);
795
ASSERT_LE(0, fd1) << strerror(errno);
796
797
/*
798
* This read should not be serviced by cache, even though it's on the
799
* original file descriptor
800
*/
801
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
802
ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
803
ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
804
805
leak(fd0);
806
leak(fd1);
807
}
808
809
TEST_F(Read, mmap)
810
{
811
const char FULLPATH[] = "mountpoint/some_file.txt";
812
const char RELPATH[] = "some_file.txt";
813
const char *CONTENTS = "abcdefgh";
814
uint64_t ino = 42;
815
int fd;
816
ssize_t len;
817
size_t bufsize = strlen(CONTENTS);
818
void *p;
819
820
len = getpagesize();
821
822
expect_lookup(RELPATH, ino, bufsize);
823
expect_open(ino, 0, 1);
824
EXPECT_CALL(*m_mock, process(
825
ResultOf([=](auto in) {
826
return (in.header.opcode == FUSE_READ &&
827
in.header.nodeid == ino &&
828
in.body.read.fh == Read::FH &&
829
in.body.read.offset == 0 &&
830
in.body.read.size == bufsize);
831
}, Eq(true)),
832
_)
833
).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
834
out.header.len = sizeof(struct fuse_out_header) + bufsize;
835
memmove(out.body.bytes, CONTENTS, bufsize);
836
})));
837
838
fd = open(FULLPATH, O_RDONLY);
839
ASSERT_LE(0, fd) << strerror(errno);
840
841
p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
842
ASSERT_NE(MAP_FAILED, p) << strerror(errno);
843
844
ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
845
846
ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
847
leak(fd);
848
}
849
850
851
/* When max_read is set, large reads will be split up as necessary */
852
TEST_F(ReadMaxRead, split)
853
{
854
const char FULLPATH[] = "mountpoint/some_file.txt";
855
const char RELPATH[] = "some_file.txt";
856
uint64_t ino = 42;
857
int fd;
858
ssize_t bufsize = 65536;
859
ssize_t fragsize = bufsize / 4;
860
char *rbuf, *frag0, *frag1, *frag2, *frag3;
861
862
rbuf = new char[bufsize]();
863
frag0 = new char[fragsize]();
864
frag1 = new char[fragsize]();
865
frag2 = new char[fragsize]();
866
frag3 = new char[fragsize]();
867
memset(frag0, '0', fragsize);
868
memset(frag1, '1', fragsize);
869
memset(frag2, '2', fragsize);
870
memset(frag3, '3', fragsize);
871
872
expect_lookup(RELPATH, ino, bufsize);
873
expect_open(ino, 0, 1);
874
expect_read(ino, 0, fragsize, fragsize, frag0);
875
expect_read(ino, fragsize, fragsize, fragsize, frag1);
876
expect_read(ino, 2 * fragsize, fragsize, fragsize, frag2);
877
expect_read(ino, 3 * fragsize, fragsize, fragsize, frag3);
878
879
fd = open(FULLPATH, O_RDONLY);
880
ASSERT_LE(0, fd) << strerror(errno);
881
882
ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
883
ASSERT_EQ(0, memcmp(rbuf, frag0, fragsize));
884
ASSERT_EQ(0, memcmp(rbuf + fragsize, frag1, fragsize));
885
ASSERT_EQ(0, memcmp(rbuf + 2 * fragsize, frag2, fragsize));
886
ASSERT_EQ(0, memcmp(rbuf + 3 * fragsize, frag3, fragsize));
887
888
delete[] frag3;
889
delete[] frag2;
890
delete[] frag1;
891
delete[] frag0;
892
delete[] rbuf;
893
leak(fd);
894
}
895
896
/*
897
* The kernel should not update the cached atime attribute during a read, if
898
* MNT_NOATIME is used.
899
*/
900
TEST_F(ReadNoatime, atime)
901
{
902
const char FULLPATH[] = "mountpoint/some_file.txt";
903
const char RELPATH[] = "some_file.txt";
904
const char *CONTENTS = "abcdefgh";
905
struct stat sb1, sb2;
906
uint64_t ino = 42;
907
int fd;
908
ssize_t bufsize = strlen(CONTENTS);
909
uint8_t buf[bufsize];
910
911
expect_lookup(RELPATH, ino, bufsize);
912
expect_open(ino, 0, 1);
913
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
914
915
fd = open(FULLPATH, O_RDONLY);
916
ASSERT_LE(0, fd) << strerror(errno);
917
ASSERT_EQ(0, fstat(fd, &sb1));
918
919
nap();
920
921
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
922
ASSERT_EQ(0, fstat(fd, &sb2));
923
924
/* The kernel should not update atime during read */
925
EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
926
EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
927
EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
928
929
leak(fd);
930
}
931
932
/*
933
* The kernel should not update the cached atime attribute during a cached
934
* read, if MNT_NOATIME is used.
935
*/
936
TEST_F(ReadNoatime, atime_cached)
937
{
938
const char FULLPATH[] = "mountpoint/some_file.txt";
939
const char RELPATH[] = "some_file.txt";
940
const char *CONTENTS = "abcdefgh";
941
struct stat sb1, sb2;
942
uint64_t ino = 42;
943
int fd;
944
ssize_t bufsize = strlen(CONTENTS);
945
uint8_t buf[bufsize];
946
947
expect_lookup(RELPATH, ino, bufsize);
948
expect_open(ino, 0, 1);
949
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
950
951
fd = open(FULLPATH, O_RDONLY);
952
ASSERT_LE(0, fd) << strerror(errno);
953
954
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
955
ASSERT_EQ(0, fstat(fd, &sb1));
956
957
nap();
958
959
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
960
ASSERT_EQ(0, fstat(fd, &sb2));
961
962
/* The kernel should automatically update atime during read */
963
EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
964
EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
965
EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
966
967
leak(fd);
968
}
969
970
/* Read of an mmap()ed file fails */
971
TEST_F(ReadSigbus, mmap_eio)
972
{
973
const char FULLPATH[] = "mountpoint/some_file.txt";
974
const char RELPATH[] = "some_file.txt";
975
const char *CONTENTS = "abcdefgh";
976
struct sigaction sa;
977
uint64_t ino = 42;
978
int fd;
979
ssize_t len;
980
size_t bufsize = strlen(CONTENTS);
981
void *p;
982
983
len = getpagesize();
984
985
expect_lookup(RELPATH, ino, bufsize);
986
expect_open(ino, 0, 1);
987
EXPECT_CALL(*m_mock, process(
988
ResultOf([=](auto in) {
989
return (in.header.opcode == FUSE_READ &&
990
in.header.nodeid == ino &&
991
in.body.read.fh == Read::FH);
992
}, Eq(true)),
993
_)
994
).WillRepeatedly(Invoke(ReturnErrno(EIO)));
995
996
fd = open(FULLPATH, O_RDONLY);
997
ASSERT_LE(0, fd) << strerror(errno);
998
999
p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1000
ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1001
1002
/* Accessing the mapped page should return SIGBUS. */
1003
1004
bzero(&sa, sizeof(sa));
1005
sa.sa_handler = SIG_DFL;
1006
sa.sa_sigaction = handle_sigbus;
1007
sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
1008
ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
1009
if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
1010
atomic_signal_fence(std::memory_order::memory_order_seq_cst);
1011
volatile char x __unused = *(volatile char*)p;
1012
FAIL() << "shouldn't get here";
1013
}
1014
1015
ASSERT_EQ(p, ReadSigbus::s_si_addr);
1016
ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1017
leak(fd);
1018
}
1019
1020
/*
1021
* A read via mmap comes up short, indicating that the file was truncated
1022
* server-side.
1023
*/
1024
TEST_F(Read, mmap_eof)
1025
{
1026
const char FULLPATH[] = "mountpoint/some_file.txt";
1027
const char RELPATH[] = "some_file.txt";
1028
const char *CONTENTS = "abcdefgh";
1029
uint64_t ino = 42;
1030
int fd;
1031
ssize_t len;
1032
size_t bufsize = strlen(CONTENTS);
1033
struct stat sb;
1034
void *p;
1035
1036
len = getpagesize();
1037
1038
expect_lookup(RELPATH, ino, m_maxbcachebuf);
1039
expect_open(ino, 0, 1);
1040
EXPECT_CALL(*m_mock, process(
1041
ResultOf([=](auto in) {
1042
return (in.header.opcode == FUSE_READ &&
1043
in.header.nodeid == ino &&
1044
in.body.read.fh == Read::FH &&
1045
in.body.read.offset == 0 &&
1046
in.body.read.size == (uint32_t)m_maxbcachebuf);
1047
}, Eq(true)),
1048
_)
1049
).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1050
out.header.len = sizeof(struct fuse_out_header) + bufsize;
1051
memmove(out.body.bytes, CONTENTS, bufsize);
1052
})));
1053
expect_getattr(ino, bufsize);
1054
1055
fd = open(FULLPATH, O_RDONLY);
1056
ASSERT_LE(0, fd) << strerror(errno);
1057
1058
p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1059
ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1060
1061
/* The file size should be automatically truncated */
1062
ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
1063
ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1064
EXPECT_EQ((off_t)bufsize, sb.st_size);
1065
1066
ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1067
leak(fd);
1068
}
1069
1070
/*
1071
* During VOP_GETPAGES, the FUSE server fails a FUSE_GETATTR operation. This
1072
* almost certainly indicates a buggy FUSE server, and our goal should be not
1073
* to panic. Instead, generate SIGBUS.
1074
*/
1075
TEST_F(ReadSigbus, mmap_getblksz_fail)
1076
{
1077
const char FULLPATH[] = "mountpoint/some_file.txt";
1078
const char RELPATH[] = "some_file.txt";
1079
const char *CONTENTS = "abcdefgh";
1080
struct sigaction sa;
1081
Sequence seq;
1082
uint64_t ino = 42;
1083
int fd;
1084
ssize_t len;
1085
size_t bufsize = strlen(CONTENTS);
1086
mode_t mode = S_IFREG | 0644;
1087
void *p;
1088
1089
len = getpagesize();
1090
1091
FuseTest::expect_lookup(RELPATH, ino, mode, bufsize, 1, 0);
1092
/* Expect two GETATTR calls that succeed, followed by one that fail. */
1093
EXPECT_CALL(*m_mock, process(
1094
ResultOf([=](auto in) {
1095
return (in.header.opcode == FUSE_GETATTR &&
1096
in.header.nodeid == ino);
1097
}, Eq(true)),
1098
_)
1099
).Times(2)
1100
.InSequence(seq)
1101
.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
1102
SET_OUT_HEADER_LEN(out, attr);
1103
out.body.attr.attr.ino = ino;
1104
out.body.attr.attr.mode = mode;
1105
out.body.attr.attr.size = bufsize;
1106
out.body.attr.attr_valid = 0;
1107
})));
1108
EXPECT_CALL(*m_mock, process(
1109
ResultOf([=](auto in) {
1110
return (in.header.opcode == FUSE_GETATTR &&
1111
in.header.nodeid == ino);
1112
}, Eq(true)),
1113
_)
1114
).InSequence(seq)
1115
.WillRepeatedly(Invoke(ReturnErrno(EIO)));
1116
expect_open(ino, 0, 1);
1117
EXPECT_CALL(*m_mock, process(
1118
ResultOf([=](auto in) {
1119
return (in.header.opcode == FUSE_READ);
1120
}, Eq(true)),
1121
_)
1122
).Times(0);
1123
1124
fd = open(FULLPATH, O_RDONLY);
1125
ASSERT_LE(0, fd) << strerror(errno);
1126
1127
p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1128
ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1129
1130
/* Accessing the mapped page should return SIGBUS. */
1131
bzero(&sa, sizeof(sa));
1132
sa.sa_handler = SIG_DFL;
1133
sa.sa_sigaction = handle_sigbus;
1134
sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
1135
ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
1136
if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
1137
atomic_signal_fence(std::memory_order::memory_order_seq_cst);
1138
volatile char x __unused = *(volatile char*)p;
1139
FAIL() << "shouldn't get here";
1140
}
1141
1142
ASSERT_EQ(p, ReadSigbus::s_si_addr);
1143
ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1144
leak(fd);
1145
}
1146
1147
/*
1148
* Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
1149
* cache and to straight to the daemon
1150
*/
1151
TEST_F(Read, o_direct)
1152
{
1153
const char FULLPATH[] = "mountpoint/some_file.txt";
1154
const char RELPATH[] = "some_file.txt";
1155
const char *CONTENTS = "abcdefgh";
1156
uint64_t ino = 42;
1157
int fd;
1158
ssize_t bufsize = strlen(CONTENTS);
1159
uint8_t buf[bufsize];
1160
1161
expect_lookup(RELPATH, ino, bufsize);
1162
expect_open(ino, 0, 1);
1163
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1164
1165
fd = open(FULLPATH, O_RDONLY);
1166
ASSERT_LE(0, fd) << strerror(errno);
1167
1168
// Fill the cache
1169
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1170
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1171
1172
// Reads with o_direct should bypass the cache
1173
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1174
ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1175
ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1176
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1177
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1178
1179
leak(fd);
1180
}
1181
1182
TEST_F(Read, pread)
1183
{
1184
const char FULLPATH[] = "mountpoint/some_file.txt";
1185
const char RELPATH[] = "some_file.txt";
1186
const char *CONTENTS = "abcdefgh";
1187
uint64_t ino = 42;
1188
int fd;
1189
/*
1190
* Set offset to a maxbcachebuf boundary so we'll be sure what offset
1191
* to read from. Without this, the read might start at a lower offset.
1192
*/
1193
uint64_t offset = m_maxbcachebuf;
1194
ssize_t bufsize = strlen(CONTENTS);
1195
uint8_t buf[bufsize];
1196
1197
expect_lookup(RELPATH, ino, offset + bufsize);
1198
expect_open(ino, 0, 1);
1199
expect_read(ino, offset, bufsize, bufsize, CONTENTS);
1200
1201
fd = open(FULLPATH, O_RDONLY);
1202
ASSERT_LE(0, fd) << strerror(errno);
1203
1204
ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
1205
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1206
leak(fd);
1207
}
1208
1209
TEST_F(Read, read)
1210
{
1211
const char FULLPATH[] = "mountpoint/some_file.txt";
1212
const char RELPATH[] = "some_file.txt";
1213
const char *CONTENTS = "abcdefgh";
1214
uint64_t ino = 42;
1215
int fd;
1216
ssize_t bufsize = strlen(CONTENTS);
1217
uint8_t buf[bufsize];
1218
1219
expect_lookup(RELPATH, ino, bufsize);
1220
expect_open(ino, 0, 1);
1221
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1222
1223
fd = open(FULLPATH, O_RDONLY);
1224
ASSERT_LE(0, fd) << strerror(errno);
1225
1226
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1227
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1228
1229
leak(fd);
1230
}
1231
1232
TEST_F(Read_7_8, read)
1233
{
1234
const char FULLPATH[] = "mountpoint/some_file.txt";
1235
const char RELPATH[] = "some_file.txt";
1236
const char *CONTENTS = "abcdefgh";
1237
uint64_t ino = 42;
1238
int fd;
1239
ssize_t bufsize = strlen(CONTENTS);
1240
uint8_t buf[bufsize];
1241
1242
expect_lookup(RELPATH, ino, bufsize);
1243
expect_open(ino, 0, 1);
1244
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1245
1246
fd = open(FULLPATH, O_RDONLY);
1247
ASSERT_LE(0, fd) << strerror(errno);
1248
1249
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1250
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1251
1252
leak(fd);
1253
}
1254
1255
/*
1256
* If cacheing is enabled, the kernel should try to read an entire cache block
1257
* at a time.
1258
*/
1259
TEST_F(Read, cache_block)
1260
{
1261
const char FULLPATH[] = "mountpoint/some_file.txt";
1262
const char RELPATH[] = "some_file.txt";
1263
const char *CONTENTS0 = "abcdefghijklmnop";
1264
uint64_t ino = 42;
1265
int fd;
1266
ssize_t bufsize = 8;
1267
ssize_t filesize = m_maxbcachebuf * 2;
1268
char *contents;
1269
char buf[bufsize];
1270
const char *contents1 = CONTENTS0 + bufsize;
1271
1272
contents = new char[filesize]();
1273
memmove(contents, CONTENTS0, strlen(CONTENTS0));
1274
1275
expect_lookup(RELPATH, ino, filesize);
1276
expect_open(ino, 0, 1);
1277
expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
1278
contents);
1279
1280
fd = open(FULLPATH, O_RDONLY);
1281
ASSERT_LE(0, fd) << strerror(errno);
1282
1283
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1284
ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
1285
1286
/* A subsequent read should be serviced by cache */
1287
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1288
ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
1289
leak(fd);
1290
delete[] contents;
1291
}
1292
1293
/* Reading with sendfile should work (though it obviously won't be 0-copy) */
1294
TEST_F(Read, sendfile)
1295
{
1296
const char FULLPATH[] = "mountpoint/some_file.txt";
1297
const char RELPATH[] = "some_file.txt";
1298
const char *CONTENTS = "abcdefgh";
1299
uint64_t ino = 42;
1300
int fd;
1301
size_t bufsize = strlen(CONTENTS);
1302
uint8_t buf[bufsize];
1303
int sp[2];
1304
off_t sbytes;
1305
1306
expect_lookup(RELPATH, ino, bufsize);
1307
expect_open(ino, 0, 1);
1308
EXPECT_CALL(*m_mock, process(
1309
ResultOf([=](auto in) {
1310
return (in.header.opcode == FUSE_READ &&
1311
in.header.nodeid == ino &&
1312
in.body.read.fh == Read::FH &&
1313
in.body.read.offset == 0 &&
1314
in.body.read.size == bufsize);
1315
}, Eq(true)),
1316
_)
1317
).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1318
out.header.len = sizeof(struct fuse_out_header) + bufsize;
1319
memmove(out.body.bytes, CONTENTS, bufsize);
1320
})));
1321
1322
ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1323
<< strerror(errno);
1324
fd = open(FULLPATH, O_RDONLY);
1325
ASSERT_LE(0, fd) << strerror(errno);
1326
1327
ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
1328
<< strerror(errno);
1329
ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
1330
<< strerror(errno);
1331
ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1332
1333
close(sp[1]);
1334
close(sp[0]);
1335
leak(fd);
1336
}
1337
1338
/* sendfile should fail gracefully if fuse declines the read */
1339
/* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
1340
TEST_F(Read, sendfile_eio)
1341
{
1342
const char FULLPATH[] = "mountpoint/some_file.txt";
1343
const char RELPATH[] = "some_file.txt";
1344
const char *CONTENTS = "abcdefgh";
1345
uint64_t ino = 42;
1346
int fd;
1347
ssize_t bufsize = strlen(CONTENTS);
1348
int sp[2];
1349
off_t sbytes;
1350
1351
expect_lookup(RELPATH, ino, bufsize);
1352
expect_open(ino, 0, 1);
1353
EXPECT_CALL(*m_mock, process(
1354
ResultOf([=](auto in) {
1355
return (in.header.opcode == FUSE_READ);
1356
}, Eq(true)),
1357
_)
1358
).WillOnce(Invoke(ReturnErrno(EIO)));
1359
1360
ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1361
<< strerror(errno);
1362
fd = open(FULLPATH, O_RDONLY);
1363
ASSERT_LE(0, fd) << strerror(errno);
1364
1365
ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
1366
1367
close(sp[1]);
1368
close(sp[0]);
1369
leak(fd);
1370
}
1371
1372
/*
1373
* Sequential reads should use readahead. And if allowed, large reads should
1374
* be clustered.
1375
*/
1376
TEST_P(ReadAhead, readahead) {
1377
const char FULLPATH[] = "mountpoint/some_file.txt";
1378
const char RELPATH[] = "some_file.txt";
1379
uint64_t ino = 42;
1380
int fd, maxcontig, clustersize;
1381
ssize_t bufsize = 4 * m_maxbcachebuf;
1382
ssize_t filesize = bufsize;
1383
uint64_t len;
1384
char *rbuf, *contents;
1385
off_t offs;
1386
1387
contents = new char[filesize];
1388
memset(contents, 'X', filesize);
1389
rbuf = new char[bufsize]();
1390
1391
expect_lookup(RELPATH, ino, filesize);
1392
expect_open(ino, 0, 1);
1393
maxcontig = m_noclusterr ? m_maxbcachebuf :
1394
m_maxbcachebuf + m_maxreadahead;
1395
clustersize = MIN((unsigned long )maxcontig, m_maxphys);
1396
for (offs = 0; offs < bufsize; offs += clustersize) {
1397
len = std::min((size_t)clustersize, (size_t)(filesize - offs));
1398
expect_read(ino, offs, len, len, contents + offs);
1399
}
1400
1401
fd = open(FULLPATH, O_RDONLY);
1402
ASSERT_LE(0, fd) << strerror(errno);
1403
1404
/* Set the internal readahead counter to a "large" value */
1405
ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
1406
1407
ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
1408
ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
1409
1410
leak(fd);
1411
delete[] rbuf;
1412
delete[] contents;
1413
}
1414
1415
INSTANTIATE_TEST_SUITE_P(RA, ReadAhead,
1416
Values(tuple<bool, int>(false, 0),
1417
tuple<bool, int>(false, 1),
1418
tuple<bool, int>(false, 2),
1419
tuple<bool, int>(false, 3),
1420
tuple<bool, int>(true, 0),
1421
tuple<bool, int>(true, 1),
1422
tuple<bool, int>(true, 2)));
1423
1424
/* With read-only mounts, fuse should never update atime during close */
1425
TEST_F(RofsRead, atime_during_close)
1426
{
1427
const char FULLPATH[] = "mountpoint/some_file.txt";
1428
const char RELPATH[] = "some_file.txt";
1429
const char *CONTENTS = "abcdefgh";
1430
uint64_t ino = 42;
1431
int fd;
1432
ssize_t bufsize = strlen(CONTENTS);
1433
uint8_t buf[bufsize];
1434
1435
expect_lookup(RELPATH, ino, bufsize);
1436
expect_open(ino, 0, 1);
1437
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1438
EXPECT_CALL(*m_mock, process(
1439
ResultOf([&](auto in) {
1440
return (in.header.opcode == FUSE_SETATTR);
1441
}, Eq(true)),
1442
_)
1443
).Times(0);
1444
expect_flush(ino, 1, ReturnErrno(0));
1445
expect_release(ino, FuseTest::FH);
1446
1447
fd = open(FULLPATH, O_RDONLY);
1448
ASSERT_LE(0, fd) << strerror(errno);
1449
1450
/* Ensure atime will be different than during lookup */
1451
nap();
1452
1453
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1454
1455
close(fd);
1456
}
1457
1458
/* fuse_init_out.time_gran controls the granularity of timestamps */
1459
TEST_P(TimeGran, atime_during_setattr)
1460
{
1461
const char FULLPATH[] = "mountpoint/some_file.txt";
1462
const char RELPATH[] = "some_file.txt";
1463
const char *CONTENTS = "abcdefgh";
1464
ssize_t bufsize = strlen(CONTENTS);
1465
uint8_t buf[bufsize];
1466
uint64_t ino = 42;
1467
const mode_t newmode = 0755;
1468
int fd;
1469
1470
expect_lookup(RELPATH, ino, bufsize);
1471
expect_open(ino, 0, 1);
1472
expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1473
EXPECT_CALL(*m_mock, process(
1474
ResultOf([=](auto in) {
1475
uint32_t valid = FATTR_MODE | FATTR_ATIME;
1476
return (in.header.opcode == FUSE_SETATTR &&
1477
in.header.nodeid == ino &&
1478
in.body.setattr.valid == valid &&
1479
in.body.setattr.atimensec % m_time_gran == 0);
1480
}, Eq(true)),
1481
_)
1482
).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1483
SET_OUT_HEADER_LEN(out, attr);
1484
out.body.attr.attr.ino = ino;
1485
out.body.attr.attr.mode = S_IFREG | newmode;
1486
})));
1487
1488
fd = open(FULLPATH, O_RDWR);
1489
ASSERT_LE(0, fd) << strerror(errno);
1490
1491
ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1492
ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1493
1494
leak(fd);
1495
}
1496
1497
INSTANTIATE_TEST_SUITE_P(TG, TimeGran, Range(0u, 10u));
1498
1499